summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Documentation/admin-guide/README.rst30
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst5
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst18
-rw-r--r--Documentation/block/index.rst1
-rw-r--r--Documentation/block/ublk.rst253
-rw-r--r--Documentation/conf.py1
-rw-r--r--Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml2
-rw-r--r--Documentation/devicetree/bindings/hwmon/moortec,mr75203.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/renesas,riic.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.yaml1
-rw-r--r--Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml2
-rw-r--r--Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml2
-rw-r--r--Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml2
-rw-r--r--Documentation/devicetree/bindings/mfd/khadas,mcu.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,axg-mipi-dphy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/qcom,rpmpd.yaml2
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml3
-rw-r--r--Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml2
-rw-r--r--Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml79
-rw-r--r--Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml2
-rw-r--r--Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/amlogic,meson6-spifc.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml1
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml6
-rw-r--r--Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml2
-rw-r--r--Documentation/filesystems/proc.rst4
-rw-r--r--Documentation/i2c/busses/i2c-piix4.rst13
-rw-r--r--Documentation/i2c/i2c-topology.rst214
-rw-r--r--Documentation/input/joydev/joystick.rst1
-rw-r--r--Documentation/networking/devlink/netdevsim.rst2
-rw-r--r--Documentation/networking/driver.rst2
-rw-r--r--Documentation/networking/ip-sysctl.rst5
-rw-r--r--Documentation/networking/ipvlan.rst2
-rw-r--r--Documentation/networking/l2tp.rst2
-rw-r--r--Documentation/networking/rxrpc.rst11
-rw-r--r--Documentation/networking/switchdev.rst2
-rw-r--r--Documentation/sphinx/kerneldoc-preamble.sty22
-rw-r--r--Documentation/translations/ja_JP/SubmittingPatches3
-rw-r--r--Documentation/virt/kvm/api.rst113
-rw-r--r--Documentation/virt/kvm/vcpu-requests.rst28
-rw-r--r--MAINTAINERS50
-rw-r--r--Makefile5
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arm/boot/dts/arm-realview-eb.dtsi2
-rw-r--r--arch/arm/boot/dts/arm-realview-pb1176.dts2
-rw-r--r--arch/arm/boot/dts/arm-realview-pb11mp.dts2
-rw-r--r--arch/arm/boot/dts/arm-realview-pbx.dtsi2
-rw-r--r--arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi21
-rw-r--r--arch/arm/boot/dts/at91-sama5d2_icp.dts21
-rw-r--r--arch/arm/boot/dts/at91-sama7g5ek.dts18
-rw-r--r--arch/arm/boot/dts/bcm63178.dtsi20
-rw-r--r--arch/arm/boot/dts/bcm6846.dtsi18
-rw-r--r--arch/arm/boot/dts/bcm6878.dtsi9
-rw-r--r--arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6qdl-vicut1.dtsi2
-rw-r--r--arch/arm/boot/dts/integratorap-im-pd1.dts4
-rw-r--r--arch/arm/boot/dts/versatile-ab.dts2
-rw-r--r--arch/arm/configs/at91_dt_defconfig1
-rw-r--r--arch/arm/configs/sama7_defconfig1
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/mach-at91/pm.c36
-rw-r--r--arch/arm/mach-at91/pm_suspend.S24
-rw-r--r--arch/arm/mach-ixp4xx/ixp4xx-of.c2
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/boot/dts/arm/juno-base.dtsi3
-rw-r--r--arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi2
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts1
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi11
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi14
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a779g0.dtsi2
-rw-r--r--arch/arm64/include/asm/kernel-pgtable.h26
-rw-r--r--arch/arm64/kernel/head.S2
-rw-r--r--arch/arm64/kernel/machine_kexec_file.c2
-rw-r--r--arch/arm64/kernel/ptrace.c2
-rw-r--r--arch/arm64/kernel/sleep.S3
-rw-r--r--arch/arm64/kvm/arm.c3
-rw-r--r--arch/arm64/kvm/mmu.c36
-rw-r--r--arch/loongarch/Kconfig2
-rw-r--r--arch/loongarch/include/asm/acpi.h2
-rw-r--r--arch/loongarch/kernel/acpi.c2
-rw-r--r--arch/loongarch/kernel/signal.c4
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S2
-rw-r--r--arch/loongarch/lib/dump_tlb.c26
-rw-r--r--arch/loongarch/mm/init.c19
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c4
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c10
-rw-r--r--arch/mips/cavium-octeon/setup.c27
-rw-r--r--arch/mips/kvm/emulate.c6
-rw-r--r--arch/mips/loongson32/ls1c/board.c1
-rw-r--r--arch/parisc/Kconfig12
-rw-r--r--arch/parisc/kernel/irq.c2
-rw-r--r--arch/powerpc/include/asm/firmware.h8
-rw-r--r--arch/powerpc/include/asm/hw_irq.h46
-rw-r--r--arch/powerpc/kernel/irq.c4
-rw-r--r--arch/powerpc/kernel/pci_32.c9
-rw-r--r--arch/powerpc/kernel/rtas_entry.S4
-rw-r--r--arch/powerpc/kernel/systbl.S1
-rw-r--r--arch/powerpc/kvm/book3s_pr.c1
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c1
-rw-r--r--arch/powerpc/kvm/booke.c1
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c90
-rw-r--r--arch/powerpc/platforms/pseries/plpks.c3
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs.dtsi2
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h12
-rw-r--r--arch/riscv/kvm/vcpu_insn.c1
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c12
-rw-r--r--arch/riscv/kvm/vcpu_timer.c4
-rw-r--r--arch/riscv/mm/pageattr.c4
-rw-r--r--arch/s390/configs/debug_defconfig53
-rw-r--r--arch/s390/configs/defconfig49
-rw-r--r--arch/s390/configs/zfcpdump_defconfig6
-rw-r--r--arch/s390/include/asm/hugetlb.h6
-rw-r--r--arch/s390/include/asm/kvm_host.h17
-rw-r--r--arch/s390/include/asm/softirq_stack.h2
-rw-r--r--arch/s390/kernel/nmi.c2
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/vmlinux.lds.S1
-rw-r--r--arch/s390/kvm/gaccess.c16
-rw-r--r--arch/s390/kvm/interrupt.c2
-rw-r--r--arch/s390/kvm/kvm-s390.c6
-rw-r--r--arch/s390/kvm/pci.c26
-rw-r--r--arch/s390/kvm/pci.h6
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/pci/Makefile2
-rw-r--r--arch/s390/pci/pci_kvm_hook.c11
-rw-r--r--arch/sh/kernel/irq.c2
-rw-r--r--arch/sparc/kernel/irq_64.c2
-rw-r--r--arch/x86/events/intel/core.c3
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h22
-rw-r--r--arch/x86/include/asm/irq_stack.h2
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h52
-rw-r--r--arch/x86/include/asm/vmx.h2
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kvm/cpuid.c31
-rw-r--r--arch/x86/kvm/emulate.c34
-rw-r--r--arch/x86/kvm/hyperv.c70
-rw-r--r--arch/x86/kvm/hyperv.h6
-rw-r--r--arch/x86/kvm/lapic.c38
-rw-r--r--arch/x86/kvm/lapic.h9
-rw-r--r--arch/x86/kvm/mmu/mmu.c84
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/mmu/spte.h14
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c12
-rw-r--r--arch/x86/kvm/pmu.c20
-rw-r--r--arch/x86/kvm/svm/nested.c124
-rw-r--r--arch/x86/kvm/svm/pmu.c117
-rw-r--r--arch/x86/kvm/svm/svm.c35
-rw-r--r--arch/x86/kvm/trace.h53
-rw-r--r--arch/x86/kvm/vmx/capabilities.h14
-rw-r--r--arch/x86/kvm/vmx/evmcs.c192
-rw-r--r--arch/x86/kvm/vmx/evmcs.h10
-rw-r--r--arch/x86/kvm/vmx/nested.c468
-rw-r--r--arch/x86/kvm/vmx/nested.h2
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c29
-rw-r--r--arch/x86/kvm/vmx/sgx.c2
-rw-r--r--arch/x86/kvm/vmx/vmenter.S24
-rw-r--r--arch/x86/kvm/vmx/vmx.c324
-rw-r--r--arch/x86/kvm/vmx/vmx.h172
-rw-r--r--arch/x86/kvm/vmx/vmx_ops.h2
-rw-r--r--arch/x86/kvm/x86.c678
-rw-r--r--arch/x86/kvm/x86.h16
-rw-r--r--arch/x86/kvm/xen.c1
-rw-r--r--block/blk-core.c4
-rw-r--r--block/blk-lib.c11
-rw-r--r--block/blk-mq-debugfs.c2
-rw-r--r--block/partitions/core.c3
-rw-r--r--drivers/amba/bus.c8
-rw-r--r--drivers/android/binder.c12
-rw-r--r--drivers/android/binder_alloc.c4
-rw-r--r--drivers/base/arch_topology.c4
-rw-r--r--drivers/base/dd.c40
-rw-r--r--drivers/base/driver.c6
-rw-r--r--drivers/base/firmware_loader/sysfs.c7
-rw-r--r--drivers/base/firmware_loader/sysfs.h5
-rw-r--r--drivers/base/firmware_loader/sysfs_upload.c12
-rw-r--r--drivers/base/node.c2
-rw-r--r--drivers/base/power/domain.c2
-rw-r--r--drivers/base/regmap/regmap-spi.c8
-rw-r--r--drivers/block/xen-blkback/common.h3
-rw-r--r--drivers/block/xen-blkback/xenbus.c6
-rw-r--r--drivers/block/xen-blkfront.c20
-rw-r--r--drivers/bus/mhi/host/main.c19
-rw-r--r--drivers/char/mem.c6
-rw-r--r--drivers/clk/bcm/clk-raspberrypi.c16
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/ti/clk.c1
-rw-r--r--drivers/dma-buf/dma-resv.c3
-rw-r--r--drivers/firmware/efi/capsule-loader.c31
-rw-r--r--drivers/firmware/efi/libstub/Makefile7
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c1
-rw-r--r--drivers/gpio/gpio-104-dio-48e.c10
-rw-r--r--drivers/gpio/gpio-104-idi-48.c10
-rw-r--r--drivers/gpio/gpio-104-idio-16.c18
-rw-r--r--drivers/gpio/gpio-ixp4xx.c17
-rw-r--r--drivers/gpio/gpio-mockup.c9
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c1
-rw-r--r--drivers/gpio/gpio-mt7621.c21
-rw-r--r--drivers/gpio/gpio-pca953x.c8
-rw-r--r--drivers/gpio/gpio-pxa.c11
-rw-r--r--drivers/gpio/gpio-realtek-otto.c166
-rw-r--r--drivers/gpio/gpio-rockchip.c4
-rw-r--r--drivers/gpio/gpio-ws16c48.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc21.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c420
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c260
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c323
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h51
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c4
-rw-r--r--drivers/gpu/drm/amd/include/mes_v11_api_def.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c23
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c72
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c36
-rw-r--r--drivers/gpu/drm/drm_debugfs.c4
-rw-r--r--drivers/gpu/drm/drm_edid.c24
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c4
-rw-r--r--drivers/gpu/drm/gma500/gem.c4
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c11
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c5
-rw-r--r--drivers/gpu/drm/gma500/power.c8
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h5
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c15
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h2
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c10
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c1
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c44
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c50
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h16
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c3
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c8
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c2
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c3
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c11
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c13
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c18
-rw-r--r--drivers/hid/hid-asus.c7
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-nintendo.c6
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-steam.c10
-rw-r--r--drivers/hid/hid-thrustmaster.c3
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.h2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c68
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/vmbus_drv.c56
-rw-r--r--drivers/hwmon/asus-ec-sensors.c408
-rw-r--r--drivers/hwmon/gpio-fan.c3
-rw-r--r--drivers/hwmon/mr75203.c72
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c19
-rw-r--r--drivers/hwmon/tps23861.c10
-rw-r--r--drivers/iio/adc/ad7292.c4
-rw-r--r--drivers/iio/adc/mcp3911.c28
-rw-r--r--drivers/iio/light/cm32181.c2
-rw-r--r--drivers/iio/light/cm3605.c6
-rw-r--r--drivers/infiniband/core/cma.c4
-rw-r--r--drivers/infiniband/core/umem_odp.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c7
-rw-r--r--drivers/infiniband/hw/irdma/uk.c7
-rw-r--r--drivers/infiniband/hw/irdma/utils.c15
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c6
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_tx.c18
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c9
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c14
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c3
-rw-r--r--drivers/input/joystick/iforce/iforce-main.c1
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c6
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c8
-rw-r--r--drivers/input/joystick/iforce/iforce.h6
-rw-r--r--drivers/input/misc/rk805-pwrkey.c1
-rw-r--r--drivers/input/touchscreen/goodix.c2
-rw-r--r--drivers/iommu/amd/iommu.c3
-rw-r--r--drivers/iommu/amd/iommu_v2.c2
-rw-r--r--drivers/iommu/intel/dmar.c7
-rw-r--r--drivers/iommu/intel/iommu.c241
-rw-r--r--drivers/iommu/intel/iommu.h9
-rw-r--r--drivers/iommu/iommu.c21
-rw-r--r--drivers/iommu/of_iommu.c2
-rw-r--r--drivers/iommu/virtio-iommu.c11
-rw-r--r--drivers/media/rc/mceusb.c35
-rw-r--r--drivers/misc/fastrpc.c14
-rw-r--r--drivers/mmc/core/sd.c46
-rw-r--r--drivers/net/bonding/bond_main.c20
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c30
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c161
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c2
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/cortina/gemini.c24
-rw-r--r--drivers/net/ethernet/freescale/fec.h6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c26
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c28
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_txrx.h4
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c16
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c12
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h8
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c122
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c3
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c5
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/qos_conf.c5
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c1
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c2
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/ieee802154/cc2520.c1
-rw-r--r--drivers/net/mdio/fwnode_mdio.c4
-rw-r--r--drivers/net/netdevsim/netdev.c4
-rw-r--r--drivers/net/phy/meson-gxl.c8
-rw-r--r--drivers/net/phy/micrel.c8
-rw-r--r--drivers/net/phy/microchip_t1.c58
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c5
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c39
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c15
-rw-r--r--drivers/net/xen-netback/xenbus.c2
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/pci.c2
-rw-r--r--drivers/nvme/host/tcp.c7
-rw-r--r--drivers/nvme/target/auth.c1
-rw-r--r--drivers/nvme/target/core.c6
-rw-r--r--drivers/nvme/target/tcp.c3
-rw-r--r--drivers/nvme/target/zns.c17
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/parisc/ccio-dma.c1
-rw-r--r--drivers/parisc/iosapic.c11
-rw-r--r--drivers/peci/controller/peci-aspeed.c2
-rw-r--r--drivers/peci/cpu.c3
-rw-r--r--drivers/perf/arm_pmu_platform.c2
-rw-r--r--drivers/perf/riscv_pmu_sbi.c2
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c111
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c10
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c53
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c47
-rw-r--r--drivers/platform/x86/acer-wmi.c9
-rw-r--r--drivers/platform/x86/asus-wmi.c11
-rw-r--r--drivers/platform/x86/p2sb.c18
-rw-r--r--drivers/platform/x86/pmc_atom.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c7
-rw-r--r--drivers/platform/x86/x86-android-tablets.c14
-rw-r--r--drivers/regulator/core.c9
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/scsi/hosts.c28
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2
-rw-r--r--drivers/scsi/scsi.c9
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c10
-rw-r--r--drivers/scsi/scsi_sysfs.c30
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c50
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/imx/gpcv2.c5
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c1
-rw-r--r--drivers/soundwire/qcom.c10
-rw-r--r--drivers/spi/spi-bitbang-txrx.h6
-rw-r--r--drivers/spi/spi-cadence-quadspi.c38
-rw-r--r--drivers/spi/spi-mux.c1
-rw-r--r--drivers/spi/spi.c5
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c1
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c36
-rw-r--r--drivers/tee/tee_shm.c1
-rw-r--r--drivers/thunderbolt/Kconfig3
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/switch.c6
-rw-r--r--drivers/tty/n_gsm.c85
-rw-r--r--drivers/tty/serial/atmel_serial.c4
-rw-r--r--drivers/tty/serial/fsl_lpuart.c5
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/tty/vt/vt.c12
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c4
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/dwc2/platform.c8
-rw-r--r--drivers/usb/dwc3/core.c24
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c96
-rw-r--r--drivers/usb/dwc3/gadget.c8
-rw-r--r--drivers/usb/dwc3/host.c11
-rw-r--r--drivers/usb/gadget/function/f_uac2.c16
-rw-r--r--drivers/usb/gadget/function/storage_common.c6
-rw-r--r--drivers/usb/gadget/udc/core.c26
-rw-r--r--drivers/usb/host/xhci-hub.c13
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c15
-rw-r--r--drivers/usb/host/xhci-plat.c11
-rw-r--r--drivers/usb/host/xhci.c19
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c5
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/serial/ch341.c16
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/option.c15
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/altmodes/displayport.c4
-rw-r--r--drivers/usb/typec/class.c1
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c9
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c53
-rw-r--r--drivers/vfio/pci/vfio_pci_zdev.c8
-rw-r--r--drivers/vfio/vfio_iommu_type1.c12
-rw-r--r--drivers/video/fbdev/hyperv_fb.c4
-rw-r--r--drivers/virt/nitro_enclaves/Kconfig2
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--fs/afs/flock.c2
-rw-r--r--fs/afs/fsclient.c2
-rw-r--r--fs/afs/internal.h3
-rw-r--r--fs/afs/misc.c1
-rw-r--r--fs/afs/rxrpc.c7
-rw-r--r--fs/afs/yfsclient.c3
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/inode.c7
-rw-r--r--fs/btrfs/space-info.c2
-rw-r--r--fs/btrfs/volumes.c3
-rw-r--r--fs/btrfs/zoned.c99
-rw-r--r--fs/cachefiles/internal.h1
-rw-r--r--fs/cachefiles/ondemand.c22
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsfs.h4
-rw-r--r--fs/cifs/connect.c14
-rw-r--r--fs/cifs/file.c3
-rw-r--r--fs/cifs/smb2ops.c69
-rw-r--r--fs/cifs/smb2pdu.c12
-rw-r--r--fs/cifs/transport.c6
-rw-r--r--fs/debugfs/inode.c22
-rw-r--r--fs/erofs/fscache.c8
-rw-r--r--fs/erofs/internal.h29
-rw-r--r--fs/erofs/zmap.c16
-rw-r--r--fs/nfs/internal.h25
-rw-r--r--fs/nfs/nfs42proc.c9
-rw-r--r--fs/nfs/super.c27
-rw-r--r--fs/nfs/write.c25
-rw-r--r--fs/nfsd/vfs.c31
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/tracefs/inode.c31
-rw-r--r--include/asm-generic/softirq_stack.h2
-rw-r--r--include/drm/drm_connector.h4
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/kunit/test.h6
-rw-r--r--include/linux/amba/bus.h1
-rw-r--r--include/linux/buffer_head.h11
-rw-r--r--include/linux/compiler.h6
-rw-r--r--include/linux/debugfs.h6
-rw-r--r--include/linux/device/driver.h1
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/dmar.h4
-rw-r--r--include/linux/fscache.h4
-rw-r--r--include/linux/hp_sdc.h2
-rw-r--r--include/linux/ieee80211.h8
-rw-r--r--include/linux/kvm_host.h16
-rw-r--r--include/linux/lsm_hook_defs.h1
-rw-r--r--include/linux/lsm_hooks.h3
-rw-r--r--include/linux/mlx5/driver.h19
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/of_device.h5
-rw-r--r--include/linux/overflow.h1
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/platform_data/x86/pmc_atom.h6
-rw-r--r--include/linux/rmap.h7
-rw-r--r--include/linux/security.h5
-rw-r--r--include/linux/serial_core.h8
-rw-r--r--include/linux/skbuff.h21
-rw-r--r--include/linux/spi/spi.h2
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--include/linux/udp.h1
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/typec_dp.h5
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/dropreason.h67
-rw-r--r--include/net/ip_tunnels.h4
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netns/conntrack.h1
-rw-r--r--include/net/netns/ipv4.h2
-rw-r--r--include/net/udp_tunnel.h4
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_host.h5
-rw-r--r--include/soc/at91/sama7-ddr.h8
-rw-r--r--include/trace/events/skb.h15
-rw-r--r--include/uapi/linux/bpf.h2
-rw-r--r--include/uapi/linux/io_uring.h28
-rw-r--r--include/uapi/linux/virtio_net.h14
-rw-r--r--io_uring/io_uring.c15
-rw-r--r--io_uring/kbuf.h8
-rw-r--r--io_uring/msg_ring.c3
-rw-r--r--io_uring/net.c68
-rw-r--r--io_uring/net.h1
-rw-r--r--io_uring/notif.c91
-rw-r--r--io_uring/notif.h54
-rw-r--r--io_uring/opdef.c14
-rw-r--r--io_uring/rsrc.c55
-rw-r--r--io_uring/rsrc.h4
-rw-r--r--io_uring/rw.c30
-rw-r--r--io_uring/uring_cmd.c5
-rw-r--r--kernel/bpf/cgroup.c4
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/bpf/verifier.c13
-rw-r--r--kernel/dma/debug.c6
-rw-r--r--kernel/dma/mapping.c3
-rw-r--r--kernel/dma/swiotlb.c13
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/kprobes.c1
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/trace/rv/monitors/wip/wip.h2
-rw-r--r--kernel/trace/rv/monitors/wwnr/wwnr.h2
-rw-r--r--kernel/trace/rv/reactor_panic.c4
-rw-r--r--kernel/trace/rv/reactor_printk.c4
-rw-r--r--kernel/trace/trace_events_trigger.c3
-rw-r--r--kernel/trace/trace_preemptirq.c4
-rw-r--r--kernel/tracepoint.c5
-rw-r--r--lib/crypto/Kconfig1
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/pagewalk.c21
-rw-r--r--mm/ptdump.c4
-rw-r--r--mm/rmap.c29
-rw-r--r--mm/slab_common.c45
-rw-r--r--mm/vmstat.c1
-rw-r--r--net/bluetooth/hci_event.c13
-rw-r--r--net/bluetooth/hci_sync.c42
-rw-r--r--net/bluetooth/hidp/core.c6
-rw-r--r--net/bluetooth/iso.c35
-rw-r--r--net/bluetooth/l2cap_core.c10
-rw-r--r--net/bluetooth/mgmt.c72
-rw-r--r--net/bridge/br_netfilter_hooks.c2
-rw-r--r--net/bridge/br_netfilter_ipv6.c1
-rw-r--r--net/core/.gitignore1
-rw-r--r--net/core/Makefile22
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/core/skmsg.c4
-rw-r--r--net/dsa/tag_hellcreek.c2
-rw-r--r--net/ipv4/fib_frontend.c4
-rw-r--r--net/ipv4/ip_gre.c2
-rw-r--r--net/ipv4/ip_tunnel.c7
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c46
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/udp_tunnel_core.c1
-rw-r--r--net/ipv6/addrconf.c8
-rw-r--r--net/ipv6/seg6.c5
-rw-r--r--net/ipv6/udp.c5
-rw-r--r--net/kcm/kcmsock.c15
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/mlme.c12
-rw-r--r--net/mac80211/rx.c4
-rw-r--r--net/mac80211/scan.c11
-rw-r--r--net/mac80211/sta_info.c10
-rw-r--r--net/mac80211/tx.c3
-rw-r--r--net/mac80211/wpa.c4
-rw-r--r--net/mac802154/rx.c2
-rw-r--r--net/mpls/af_mpls.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c7
-rw-r--r--net/netfilter/nf_conntrack_helper.c80
-rw-r--r--net/netfilter/nf_conntrack_irc.c5
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_conntrack_standalone.c10
-rw-r--r--net/netfilter/nf_tables_api.c4
-rw-r--r--net/netfilter/nft_ct.c3
-rw-r--r--net/openvswitch/datapath.c4
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_event.c2
-rw-r--r--net/rxrpc/local_object.c4
-rw-r--r--net/rxrpc/peer_event.c293
-rw-r--r--net/rxrpc/recvmsg.c43
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/sched/sch_generic.c31
-rw-r--r--net/sched/sch_sfb.c13
-rw-r--r--net/sched/sch_tbf.c4
-rw-r--r--net/smc/af_smc.c1
-rw-r--r--net/smc/smc_core.c1
-rw-r--r--net/smc/smc_core.h2
-rw-r--r--net/smc/smc_wr.c5
-rw-r--r--net/smc/smc_wr.h5
-rw-r--r--net/sunrpc/clnt.c3
-rw-r--r--net/sunrpc/xprt.c8
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/wireless/debugfs.c3
-rw-r--r--net/wireless/lib80211_crypt_ccmp.c2
-rw-r--r--net/xdp/xsk_buff_pool.c16
-rw-r--r--scripts/Makefile.extrawarn12
-rwxr-xr-xscripts/extract-ikconfig1
-rwxr-xr-xscripts/gcc-ld30
-rwxr-xr-xscripts/mksysmap2
-rw-r--r--security/landlock/fs.c48
-rw-r--r--security/security.c4
-rw-r--r--security/selinux/hooks.c24
-rw-r--r--security/selinux/include/classmap.h2
-rw-r--r--security/smack/smack_lsm.c32
-rw-r--r--sound/core/control.c6
-rw-r--r--sound/core/memalloc.c96
-rw-r--r--sound/core/oss/pcm_oss.c6
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c2
-rw-r--r--sound/core/seq/seq_clientmgr.c12
-rw-r--r--sound/drivers/aloop.c7
-rw-r--r--sound/hda/intel-nhlt.c8
-rw-r--r--sound/pci/emu10k1/emupcm.c2
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/hda_tegra.c3
-rw-r--r--sound/pci/hda/patch_realtek.c63
-rw-r--r--sound/pci/hda/patch_sigmatel.c24
-rw-r--r--sound/soc/atmel/mchp-spdiftx.c2
-rw-r--r--sound/soc/codecs/cs42l42.c13
-rw-r--r--sound/soc/codecs/nau8540.c40
-rw-r--r--sound/soc/codecs/nau8821.c66
-rw-r--r--sound/soc/codecs/nau8824.c80
-rw-r--r--sound/soc/codecs/nau8825.c83
-rw-r--r--sound/soc/fsl/fsl_aud2htx.c16
-rw-r--r--sound/soc/fsl/fsl_mqs.c2
-rw-r--r--sound/soc/fsl/fsl_sai.c2
-rw-r--r--sound/soc/mediatek/mt8186/mt8186-dai-adda.c3
-rw-r--r--sound/soc/qcom/sm8250.c1
-rw-r--r--sound/soc/sof/Kconfig2
-rw-r--r--sound/soc/sof/ipc4-topology.c4
-rw-r--r--sound/usb/card.c2
-rw-r--r--sound/usb/endpoint.c25
-rw-r--r--sound/usb/endpoint.h6
-rw-r--r--sound/usb/pcm.c14
-rw-r--r--sound/usb/quirks.c4
-rw-r--r--sound/usb/stream.c9
-rwxr-xr-xtools/debugging/kernel-chktaint9
-rw-r--r--tools/hv/hv_kvp_daemon.c6
-rw-r--r--tools/include/uapi/asm/errno.h4
-rw-r--r--tools/lib/perf/evlist.c50
-rw-r--r--tools/perf/Makefile.perf24
-rw-r--r--tools/perf/builtin-c2c.c12
-rw-r--r--tools/perf/builtin-lock.c3
-rw-r--r--tools/perf/builtin-record.c34
-rw-r--r--tools/perf/builtin-script.c5
-rw-r--r--tools/perf/builtin-stat.c5
-rw-r--r--tools/perf/dlfilters/dlfilter-show-cycles.c4
-rw-r--r--tools/perf/util/affinity.c8
-rw-r--r--tools/perf/util/genelf.c20
-rw-r--r--tools/perf/util/metricgroup.c3
-rw-r--r--tools/testing/selftests/bpf/DENYLIST.s390x1
-rw-r--r--tools/testing/selftests/bpf/verifier/precise.c25
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile12
-rw-r--r--tools/testing/selftests/kvm/access_tracking_perf_test.c25
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_base.h4
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h6
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/evmcs.h45
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h8
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm_util.h7
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h54
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c20
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c39
-rw-r--r--tools/testing/selftests/kvm/lib/string_override.c39
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c40
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/svm.c14
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c20
-rw-r--r--tools/testing/selftests/kvm/rseq_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c134
-rw-r--r--tools/testing/selftests/kvm/x86_64/hyperv_features.c13
-rw-r--r--tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c295
-rw-r--r--tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c43
-rw-r--r--tools/testing/selftests/landlock/fs_test.c155
-rw-r--r--tools/testing/selftests/net/.gitignore50
-rw-r--r--tools/testing/selftests/net/io_uring_zerocopy_tx.c101
-rwxr-xr-xtools/testing/selftests/net/io_uring_zerocopy_tx.sh10
-rwxr-xr-xtools/testing/selftests/netfilter/nft_conntrack_helper.sh36
-rw-r--r--virt/kvm/kvm_main.c9
810 files changed, 9127 insertions, 5889 deletions
diff --git a/.mailmap b/.mailmap
index 8ded2e7c2906..952c89546c64 100644
--- a/.mailmap
+++ b/.mailmap
@@ -315,6 +315,7 @@ Morten Welinder <welinder@troll.com>
Mythri P K <mythripk@ti.com>
Nadia Yvette Chambers <nyc@holomorphy.com> William Lee Irwin III <wli@holomorphy.com>
Nathan Chancellor <nathan@kernel.org> <natechancellor@gmail.com>
+Neil Armstrong <neil.armstrong@linaro.org> <narmstrong@baylibre.com>
Nguyen Anh Quynh <aquynh@gmail.com>
Nicholas Piggin <npiggin@gmail.com> <npiggen@suse.de>
Nicholas Piggin <npiggin@gmail.com> <npiggin@kernel.dk>
@@ -332,6 +333,7 @@ Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
Oleksij Rempel <linux@rempel-privat.de> <fixed-term.Oleksij.Rempel@de.bosch.com>
Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
+Oliver Upton <oliver.upton@linux.dev> <oupton@google.com>
Pali Rohár <pali@kernel.org> <pali.rohar@gmail.com>
Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Patrick Mochel <mochel@digitalimplant.org>
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst
index caa3c09a5c3f..9eb6b9042f75 100644
--- a/Documentation/admin-guide/README.rst
+++ b/Documentation/admin-guide/README.rst
@@ -1,9 +1,9 @@
.. _readme:
-Linux kernel release 5.x <http://kernel.org/>
+Linux kernel release 6.x <http://kernel.org/>
=============================================
-These are the release notes for Linux version 5. Read them carefully,
+These are the release notes for Linux version 6. Read them carefully,
as they tell you what this is all about, explain how to install the
kernel, and what to do if something goes wrong.
@@ -63,7 +63,7 @@ Installing the kernel source
directory where you have permissions (e.g. your home directory) and
unpack it::
- xz -cd linux-5.x.tar.xz | tar xvf -
+ xz -cd linux-6.x.tar.xz | tar xvf -
Replace "X" with the version number of the latest kernel.
@@ -72,12 +72,12 @@ Installing the kernel source
files. They should match the library, and not get messed up by
whatever the kernel-du-jour happens to be.
- - You can also upgrade between 5.x releases by patching. Patches are
+ - You can also upgrade between 6.x releases by patching. Patches are
distributed in the xz format. To install by patching, get all the
newer patch files, enter the top level directory of the kernel source
- (linux-5.x) and execute::
+ (linux-6.x) and execute::
- xz -cd ../patch-5.x.xz | patch -p1
+ xz -cd ../patch-6.x.xz | patch -p1
Replace "x" for all versions bigger than the version "x" of your current
source tree, **in_order**, and you should be ok. You may want to remove
@@ -85,13 +85,13 @@ Installing the kernel source
that there are no failed patches (some-file-name# or some-file-name.rej).
If there are, either you or I have made a mistake.
- Unlike patches for the 5.x kernels, patches for the 5.x.y kernels
+ Unlike patches for the 6.x kernels, patches for the 6.x.y kernels
(also known as the -stable kernels) are not incremental but instead apply
- directly to the base 5.x kernel. For example, if your base kernel is 5.0
- and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1
- and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and
- want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is,
- patch -R) **before** applying the 5.0.3 patch. You can read more on this in
+ directly to the base 6.x kernel. For example, if your base kernel is 6.0
+ and you want to apply the 6.0.3 patch, you must not first apply the 6.0.1
+ and 6.0.2 patches. Similarly, if you are running kernel version 6.0.2 and
+ want to jump to 6.0.3, you must first reverse the 6.0.2 patch (that is,
+ patch -R) **before** applying the 6.0.3 patch. You can read more on this in
:ref:`Documentation/process/applying-patches.rst <applying_patches>`.
Alternatively, the script patch-kernel can be used to automate this
@@ -114,7 +114,7 @@ Installing the kernel source
Software requirements
---------------------
- Compiling and running the 5.x kernels requires up-to-date
+ Compiling and running the 6.x kernels requires up-to-date
versions of various software packages. Consult
:ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers
required and how to get updates for these packages. Beware that using
@@ -132,12 +132,12 @@ Build directory for the kernel
place for the output files (including .config).
Example::
- kernel source code: /usr/src/linux-5.x
+ kernel source code: /usr/src/linux-6.x
build directory: /home/name/build/kernel
To configure and build the kernel, use::
- cd /usr/src/linux-5.x
+ cd /usr/src/linux-6.x
make O=/home/name/build/kernel menuconfig
make O=/home/name/build/kernel
sudo make O=/home/name/build/kernel modules_install install
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index be4a77baf784..7ce8130a8924 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1355,6 +1355,11 @@ PAGE_SIZE multiple when read back.
pagetables
Amount of memory allocated for page tables.
+ sec_pagetables
+ Amount of memory allocated for secondary page tables,
+ this currently includes KVM mmu allocations on x86
+ and arm64.
+
percpu (npn)
Amount of memory used for storing per-cpu kernel
data structures.
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index d52f572a9029..ca91ecc29078 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -50,10 +50,10 @@ For a short example, users can monitor the virtual address space of a given
workload as below. ::
# cd /sys/kernel/mm/damon/admin/
- # echo 1 > kdamonds/nr && echo 1 > kdamonds/0/contexts/nr
+ # echo 1 > kdamonds/nr_kdamonds && echo 1 > kdamonds/0/contexts/nr_contexts
# echo vaddr > kdamonds/0/contexts/0/operations
- # echo 1 > kdamonds/0/contexts/0/targets/nr
- # echo $(pidof <workload>) > kdamonds/0/contexts/0/targets/0/pid
+ # echo 1 > kdamonds/0/contexts/0/targets/nr_targets
+ # echo $(pidof <workload>) > kdamonds/0/contexts/0/targets/0/pid_target
# echo on > kdamonds/0/state
Files Hierarchy
@@ -366,12 +366,12 @@ memory rate becomes larger than 60%, or lower than 30%". ::
# echo 1 > kdamonds/0/contexts/0/schemes/nr_schemes
# cd kdamonds/0/contexts/0/schemes/0
# # set the basic access pattern and the action
- # echo 4096 > access_patterns/sz/min
- # echo 8192 > access_patterns/sz/max
- # echo 0 > access_patterns/nr_accesses/min
- # echo 5 > access_patterns/nr_accesses/max
- # echo 10 > access_patterns/age/min
- # echo 20 > access_patterns/age/max
+ # echo 4096 > access_pattern/sz/min
+ # echo 8192 > access_pattern/sz/max
+ # echo 0 > access_pattern/nr_accesses/min
+ # echo 5 > access_pattern/nr_accesses/max
+ # echo 10 > access_pattern/age/min
+ # echo 20 > access_pattern/age/max
# echo pageout > action
# # set quotas
# echo 10 > quotas/ms
diff --git a/Documentation/block/index.rst b/Documentation/block/index.rst
index 68f115f2b1c6..c4c73db748a8 100644
--- a/Documentation/block/index.rst
+++ b/Documentation/block/index.rst
@@ -23,3 +23,4 @@ Block
stat
switching-sched
writeback_cache_control
+ ublk
diff --git a/Documentation/block/ublk.rst b/Documentation/block/ublk.rst
new file mode 100644
index 000000000000..2122d1a4a541
--- /dev/null
+++ b/Documentation/block/ublk.rst
@@ -0,0 +1,253 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================================
+Userspace block device driver (ublk driver)
+===========================================
+
+Overview
+========
+
+ublk is a generic framework for implementing block device logic from userspace.
+The motivation behind it is that moving virtual block drivers into userspace,
+such as loop, nbd and similar can be very helpful. It can help to implement
+new virtual block device such as ublk-qcow2 (there are several attempts of
+implementing qcow2 driver in kernel).
+
+Userspace block devices are attractive because:
+
+- They can be written many programming languages.
+- They can use libraries that are not available in the kernel.
+- They can be debugged with tools familiar to application developers.
+- Crashes do not kernel panic the machine.
+- Bugs are likely to have a lower security impact than bugs in kernel
+ code.
+- They can be installed and updated independently of the kernel.
+- They can be used to simulate block device easily with user specified
+ parameters/setting for test/debug purpose
+
+ublk block device (``/dev/ublkb*``) is added by ublk driver. Any IO request
+on the device will be forwarded to ublk userspace program. For convenience,
+in this document, ``ublk server`` refers to generic ublk userspace
+program. ``ublksrv`` [#userspace]_ is one of such implementation. It
+provides ``libublksrv`` [#userspace_lib]_ library for developing specific
+user block device conveniently, while also generic type block device is
+included, such as loop and null. Richard W.M. Jones wrote userspace nbd device
+``nbdublk`` [#userspace_nbdublk]_ based on ``libublksrv`` [#userspace_lib]_.
+
+After the IO is handled by userspace, the result is committed back to the
+driver, thus completing the request cycle. This way, any specific IO handling
+logic is totally done by userspace, such as loop's IO handling, NBD's IO
+communication, or qcow2's IO mapping.
+
+``/dev/ublkb*`` is driven by blk-mq request-based driver. Each request is
+assigned by one queue wide unique tag. ublk server assigns unique tag to each
+IO too, which is 1:1 mapped with IO of ``/dev/ublkb*``.
+
+Both the IO request forward and IO handling result committing are done via
+``io_uring`` passthrough command; that is why ublk is also one io_uring based
+block driver. It has been observed that using io_uring passthrough command can
+give better IOPS than block IO; which is why ublk is one of high performance
+implementation of userspace block device: not only IO request communication is
+done by io_uring, but also the preferred IO handling in ublk server is io_uring
+based approach too.
+
+ublk provides control interface to set/get ublk block device parameters.
+The interface is extendable and kabi compatible: basically any ublk request
+queue's parameter or ublk generic feature parameters can be set/get via the
+interface. Thus, ublk is generic userspace block device framework.
+For example, it is easy to setup a ublk device with specified block
+parameters from userspace.
+
+Using ublk
+==========
+
+ublk requires userspace ublk server to handle real block device logic.
+
+Below is example of using ``ublksrv`` to provide ublk-based loop device.
+
+- add a device::
+
+ ublk add -t loop -f ublk-loop.img
+
+- format with xfs, then use it::
+
+ mkfs.xfs /dev/ublkb0
+ mount /dev/ublkb0 /mnt
+ # do anything. all IOs are handled by io_uring
+ ...
+ umount /mnt
+
+- list the devices with their info::
+
+ ublk list
+
+- delete the device::
+
+ ublk del -a
+ ublk del -n $ublk_dev_id
+
+See usage details in README of ``ublksrv`` [#userspace_readme]_.
+
+Design
+======
+
+Control plane
+-------------
+
+ublk driver provides global misc device node (``/dev/ublk-control``) for
+managing and controlling ublk devices with help of several control commands:
+
+- ``UBLK_CMD_ADD_DEV``
+
+ Add a ublk char device (``/dev/ublkc*``) which is talked with ublk server
+ WRT IO command communication. Basic device info is sent together with this
+ command. It sets UAPI structure of ``ublksrv_ctrl_dev_info``,
+ such as ``nr_hw_queues``, ``queue_depth``, and max IO request buffer size,
+ for which the info is negotiated with the driver and sent back to the server.
+ When this command is completed, the basic device info is immutable.
+
+- ``UBLK_CMD_SET_PARAMS`` / ``UBLK_CMD_GET_PARAMS``
+
+ Set or get parameters of the device, which can be either generic feature
+ related, or request queue limit related, but can't be IO logic specific,
+ because the driver does not handle any IO logic. This command has to be
+ sent before sending ``UBLK_CMD_START_DEV``.
+
+- ``UBLK_CMD_START_DEV``
+
+ After the server prepares userspace resources (such as creating per-queue
+ pthread & io_uring for handling ublk IO), this command is sent to the
+ driver for allocating & exposing ``/dev/ublkb*``. Parameters set via
+ ``UBLK_CMD_SET_PARAMS`` are applied for creating the device.
+
+- ``UBLK_CMD_STOP_DEV``
+
+ Halt IO on ``/dev/ublkb*`` and remove the device. When this command returns,
+ ublk server will release resources (such as destroying per-queue pthread &
+ io_uring).
+
+- ``UBLK_CMD_DEL_DEV``
+
+ Remove ``/dev/ublkc*``. When this command returns, the allocated ublk device
+ number can be reused.
+
+- ``UBLK_CMD_GET_QUEUE_AFFINITY``
+
+ When ``/dev/ublkc`` is added, the driver creates block layer tagset, so
+ that each queue's affinity info is available. The server sends
+ ``UBLK_CMD_GET_QUEUE_AFFINITY`` to retrieve queue affinity info. It can
+ set up the per-queue context efficiently, such as bind affine CPUs with IO
+ pthread and try to allocate buffers in IO thread context.
+
+- ``UBLK_CMD_GET_DEV_INFO``
+
+ For retrieving device info via ``ublksrv_ctrl_dev_info``. It is the server's
+ responsibility to save IO target specific info in userspace.
+
+Data plane
+----------
+
+ublk server needs to create per-queue IO pthread & io_uring for handling IO
+commands via io_uring passthrough. The per-queue IO pthread
+focuses on IO handling and shouldn't handle any control & management
+tasks.
+
+The's IO is assigned by a unique tag, which is 1:1 mapping with IO
+request of ``/dev/ublkb*``.
+
+UAPI structure of ``ublksrv_io_desc`` is defined for describing each IO from
+the driver. A fixed mmaped area (array) on ``/dev/ublkc*`` is provided for
+exporting IO info to the server; such as IO offset, length, OP/flags and
+buffer address. Each ``ublksrv_io_desc`` instance can be indexed via queue id
+and IO tag directly.
+
+The following IO commands are communicated via io_uring passthrough command,
+and each command is only for forwarding the IO and committing the result
+with specified IO tag in the command data:
+
+- ``UBLK_IO_FETCH_REQ``
+
+ Sent from the server IO pthread for fetching future incoming IO requests
+ destined to ``/dev/ublkb*``. This command is sent only once from the server
+ IO pthread for ublk driver to setup IO forward environment.
+
+- ``UBLK_IO_COMMIT_AND_FETCH_REQ``
+
+ When an IO request is destined to ``/dev/ublkb*``, the driver stores
+ the IO's ``ublksrv_io_desc`` to the specified mapped area; then the
+ previous received IO command of this IO tag (either ``UBLK_IO_FETCH_REQ``
+ or ``UBLK_IO_COMMIT_AND_FETCH_REQ)`` is completed, so the server gets
+ the IO notification via io_uring.
+
+ After the server handles the IO, its result is committed back to the
+ driver by sending ``UBLK_IO_COMMIT_AND_FETCH_REQ`` back. Once ublkdrv
+ received this command, it parses the result and complete the request to
+ ``/dev/ublkb*``. In the meantime setup environment for fetching future
+ requests with the same IO tag. That is, ``UBLK_IO_COMMIT_AND_FETCH_REQ``
+ is reused for both fetching request and committing back IO result.
+
+- ``UBLK_IO_NEED_GET_DATA``
+
+ With ``UBLK_F_NEED_GET_DATA`` enabled, the WRITE request will be firstly
+ issued to ublk server without data copy. Then, IO backend of ublk server
+ receives the request and it can allocate data buffer and embed its addr
+ inside this new io command. After the kernel driver gets the command,
+ data copy is done from request pages to this backend's buffer. Finally,
+ backend receives the request again with data to be written and it can
+ truly handle the request.
+
+ ``UBLK_IO_NEED_GET_DATA`` adds one additional round-trip and one
+ io_uring_enter() syscall. Any user thinks that it may lower performance
+ should not enable UBLK_F_NEED_GET_DATA. ublk server pre-allocates IO
+ buffer for each IO by default. Any new project should try to use this
+ buffer to communicate with ublk driver. However, existing project may
+ break or not able to consume the new buffer interface; that's why this
+ command is added for backwards compatibility so that existing projects
+ can still consume existing buffers.
+
+- data copy between ublk server IO buffer and ublk block IO request
+
+ The driver needs to copy the block IO request pages into the server buffer
+ (pages) first for WRITE before notifying the server of the coming IO, so
+ that the server can handle WRITE request.
+
+ When the server handles READ request and sends
+ ``UBLK_IO_COMMIT_AND_FETCH_REQ`` to the server, ublkdrv needs to copy
+ the server buffer (pages) read to the IO request pages.
+
+Future development
+==================
+
+Container-aware ublk deivice
+----------------------------
+
+ublk driver doesn't handle any IO logic. Its function is well defined
+for now and very limited userspace interfaces are needed, which is also
+well defined too. It is possible to make ublk devices container-aware block
+devices in future as Stefan Hajnoczi suggested [#stefan]_, by removing
+ADMIN privilege.
+
+Zero copy
+---------
+
+Zero copy is a generic requirement for nbd, fuse or similar drivers. A
+problem [#xiaoguang]_ Xiaoguang mentioned is that pages mapped to userspace
+can't be remapped any more in kernel with existing mm interfaces. This can
+occurs when destining direct IO to ``/dev/ublkb*``. Also, he reported that
+big requests (IO size >= 256 KB) may benefit a lot from zero copy.
+
+
+References
+==========
+
+.. [#userspace] https://github.com/ming1/ubdsrv
+
+.. [#userspace_lib] https://github.com/ming1/ubdsrv/tree/master/lib
+
+.. [#userspace_nbdublk] https://gitlab.com/rwmjones/libnbd/-/tree/nbdublk
+
+.. [#userspace_readme] https://github.com/ming1/ubdsrv/blob/master/README
+
+.. [#stefan] https://lore.kernel.org/linux-block/YoOr6jBfgVm8GvWg@stefanha-x1.localdomain/
+
+.. [#xiaoguang] https://lore.kernel.org/linux-block/YoOr6jBfgVm8GvWg@stefanha-x1.localdomain/
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 934727e23e0e..255384d094bf 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -86,6 +86,7 @@ if major >= 3:
"__used",
"__weak",
"noinline",
+ "__fix_address",
# include/linux/memblock.h:
"__init_memblock",
diff --git a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
index 6cc74523ebfd..1748f1605cc7 100644
--- a/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
+++ b/Documentation/devicetree/bindings/arm/amlogic/amlogic,meson-gx-ao-secure.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson Firmware registers Interface
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
description: |
The Meson SoCs have a register bank with status and data shared with the
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
index 2e208d2fc98f..7cdffdb131ac 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic specific extensions to the Synopsys Designware HDMI Controller
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
allOf:
- $ref: /schemas/sound/name-prefix.yaml#
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
index 047fd69e0377..6655a93b1874 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson Display Controller
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
description: |
The Amlogic Meson Display controller is composed of several components
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
index bce96b5b0db0..4a5e5d9d6f90 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
@@ -8,7 +8,7 @@ title: Analogix ANX7814 SlimPort (Full-HD Transmitter)
maintainers:
- Andrzej Hajda <andrzej.hajda@intel.com>
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
- Robert Foss <robert.foss@linaro.org>
properties:
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
index c6e81f532215..1b2185be92cd 100644
--- a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
@@ -8,7 +8,7 @@ title: ITE it66121 HDMI bridge Device Tree Bindings
maintainers:
- Phong LE <ple@baylibre.com>
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
description: |
The IT66121 is a high-performance and low-power single channel HDMI
diff --git a/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml b/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
index 44e02decdf3a..2e75e3738ff0 100644
--- a/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Solomon Goldentek Display GKTW70SDAE4SE 7" WVGA LVDS Display Panel
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
- Thierry Reding <thierry.reding@gmail.com>
allOf:
diff --git a/Documentation/devicetree/bindings/hwmon/moortec,mr75203.yaml b/Documentation/devicetree/bindings/hwmon/moortec,mr75203.yaml
index b79f069a04c2..8ea97e774364 100644
--- a/Documentation/devicetree/bindings/hwmon/moortec,mr75203.yaml
+++ b/Documentation/devicetree/bindings/hwmon/moortec,mr75203.yaml
@@ -48,7 +48,6 @@ required:
- compatible
- reg
- reg-names
- - intel,vm-map
- clocks
- resets
- "#thermal-sensor-cells"
diff --git a/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml b/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml
index 6ecb0270d88d..199a354ccb97 100644
--- a/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/amlogic,meson6-i2c.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson I2C Controller
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
- Beniamino Galvani <b.galvani@gmail.com>
allOf:
diff --git a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
index 2f315489aaae..d3c0d5c427ac 100644
--- a/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
+++ b/Documentation/devicetree/bindings/i2c/renesas,riic.yaml
@@ -60,6 +60,9 @@ properties:
power-domains:
maxItems: 1
+ resets:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
index b6bbc312a7cf..1414ba9977c1 100644
--- a/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
+++ b/Documentation/devicetree/bindings/iio/gyroscope/bosch,bmg160.yaml
@@ -24,8 +24,10 @@ properties:
interrupts:
minItems: 1
+ maxItems: 2
description:
Should be configured with type IRQ_TYPE_EDGE_RISING.
+ If two interrupts are provided, expected order is INT1 and INT2.
required:
- compatible
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
index 93f2ce3130ae..19ac9da421df 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
@@ -16,6 +16,7 @@ properties:
compatible:
enum:
- goodix,gt1151
+ - goodix,gt1158
- goodix,gt5663
- goodix,gt5688
- goodix,gt911
diff --git a/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml b/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
index 09c8948b5e25..fa4f7685ab2b 100644
--- a/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
+++ b/Documentation/devicetree/bindings/interconnect/fsl,imx8m-noc.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Generic i.MX bus frequency device
maintainers:
- - Leonard Crestez <leonard.crestez@nxp.com>
+ - Peng Fan <peng.fan@nxp.com>
description: |
The i.MX SoC family has multiple buses for which clock frequency (and
diff --git a/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml b/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
index 85c85b694217..e18107eafe7c 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
@@ -96,7 +96,7 @@ properties:
Documentation/devicetree/bindings/arm/cpus.yaml).
required:
- - fiq-index
+ - apple,fiq-index
- cpus
required:
diff --git a/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml b/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
index ea06976fbbc7..dfd26b998189 100644
--- a/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
+++ b/Documentation/devicetree/bindings/mailbox/amlogic,meson-gxbb-mhu.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson Message-Handling-Unit Controller
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
description: |
The Amlogic's Meson SoCs Message-Handling-Unit (MHU) is a mailbox controller
diff --git a/Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml b/Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml
index bee93bd84771..e551be5e680e 100644
--- a/Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,axg-ge2d.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic GE2D Acceleration Unit
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
index 5044c4bb94e0..b827edabcafa 100644
--- a/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Video Decoder
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
- Maxime Jourdan <mjourdan@baylibre.com>
description: |
diff --git a/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml b/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
index d93aea6a0258..8d844f4312d1 100644
--- a/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
+++ b/Documentation/devicetree/bindings/media/amlogic,meson-gx-ao-cec.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson AO-CEC Controller
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
description: |
The Amlogic Meson AO-CEC module is present is Amlogic SoCs and its purpose is
diff --git a/Documentation/devicetree/bindings/mfd/khadas,mcu.yaml b/Documentation/devicetree/bindings/mfd/khadas,mcu.yaml
index a3b976f101e8..5750cc06e923 100644
--- a/Documentation/devicetree/bindings/mfd/khadas,mcu.yaml
+++ b/Documentation/devicetree/bindings/mfd/khadas,mcu.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Khadas on-board Microcontroller Device Tree Bindings
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
description: |
Khadas embeds a microcontroller on their VIM and Edge boards adding some
diff --git a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
index 608e1d62bed5..ddd5a073c3a8 100644
--- a/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/amlogic,meson-dwmac.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson DWMAC Ethernet controller
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
- Martin Blumenstingl <martin.blumenstingl@googlemail.com>
# We need a select here so we don't match all nodes with 'snps,dwmac'
diff --git a/Documentation/devicetree/bindings/phy/amlogic,axg-mipi-dphy.yaml b/Documentation/devicetree/bindings/phy/amlogic,axg-mipi-dphy.yaml
index be485f500887..5eddaed3d853 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,axg-mipi-dphy.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,axg-mipi-dphy.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic AXG MIPI D-PHY
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
index 399ebde45409..f3a5fbabbbb5 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic G12A USB2 PHY
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
index 453c083cf44c..868b4e6fde71 100644
--- a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic G12A USB3 + PCIE Combo PHY
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
index 33d1d37fdf6d..624e14f00790 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-lpass-lpi-pinctrl.yaml
@@ -8,7 +8,6 @@ title: Qualcomm Technologies, Inc. Low Power Audio SubSystem (LPASS)
Low Power Island (LPI) TLMM block
maintainers:
- - Srinivasa Rao Mandadapu <srivasam@codeaurora.org>
- Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
description: |
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
index 2d228164357c..2bd60c49a442 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Technologies, Inc. SC7280 TLMM block
maintainers:
- - Rajendra Nayak <rnayak@codeaurora.org>
+ - Bjorn Andersson <andersson@kernel.org>
description: |
This binding describes the Top Level Mode Multiplexer block found in the
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
index 5390e988a934..43a932237a92 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Amlogic Meson Everything-Else Power Domains
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
description: |+
The Everything-Else Power Domains node should be the child of a syscon
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
index 0ccca493251a..3934a2b44894 100644
--- a/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm RPM/RPMh Power domains
maintainers:
- - Rajendra Nayak <rnayak@codeaurora.org>
+ - Bjorn Andersson <andersson@kernel.org>
description:
For RPM/RPMh Power domains, we communicate a performance state to RPM/RPMh
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml
index 8b7c4af4b551..faa4af9fd035 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.yaml
@@ -35,6 +35,7 @@ patternProperties:
description: List of regulators and its properties
type: object
$ref: regulator.yaml#
+ unevaluatedProperties: false
properties:
qcom,ocp-max-retries:
@@ -100,8 +101,6 @@ patternProperties:
SAW controlled gang leader. Will be configured as SAW regulator.
type: boolean
- unevaluatedProperties: false
-
required:
- compatible
diff --git a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
index 494a454928ce..98db2aa74dc8 100644
--- a/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
+++ b/Documentation/devicetree/bindings/reset/amlogic,meson-reset.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson SoC Reset Controller
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
index 69cdab18d629..ca3b9be58058 100644
--- a/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
+++ b/Documentation/devicetree/bindings/riscv/sifive-l2-cache.yaml
@@ -17,9 +17,6 @@ description:
acts as directory-based coherency manager.
All the properties in ePAPR/DeviceTree specification applies for this platform.
-allOf:
- - $ref: /schemas/cache-controller.yaml#
-
select:
properties:
compatible:
@@ -33,11 +30,16 @@ select:
properties:
compatible:
- items:
- - enum:
- - sifive,fu540-c000-ccache
- - sifive,fu740-c000-ccache
- - const: cache
+ oneOf:
+ - items:
+ - enum:
+ - sifive,fu540-c000-ccache
+ - sifive,fu740-c000-ccache
+ - const: cache
+ - items:
+ - const: microchip,mpfs-ccache
+ - const: sifive,fu540-c000-ccache
+ - const: cache
cache-block-size:
const: 64
@@ -72,29 +74,46 @@ properties:
The reference to the reserved-memory for the L2 Loosely Integrated Memory region.
The reserved memory node should be defined as per the bindings in reserved-memory.txt.
-if:
- properties:
- compatible:
- contains:
- const: sifive,fu540-c000-ccache
+allOf:
+ - $ref: /schemas/cache-controller.yaml#
-then:
- properties:
- interrupts:
- description: |
- Must contain entries for DirError, DataError and DataFail signals.
- maxItems: 3
- cache-sets:
- const: 1024
-
-else:
- properties:
- interrupts:
- description: |
- Must contain entries for DirError, DataError, DataFail, DirFail signals.
- minItems: 4
- cache-sets:
- const: 2048
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - sifive,fu740-c000-ccache
+ - microchip,mpfs-ccache
+
+ then:
+ properties:
+ interrupts:
+ description: |
+ Must contain entries for DirError, DataError, DataFail, DirFail signals.
+ minItems: 4
+
+ else:
+ properties:
+ interrupts:
+ description: |
+ Must contain entries for DirError, DataError and DataFail signals.
+ maxItems: 3
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: sifive,fu740-c000-ccache
+
+ then:
+ properties:
+ cache-sets:
+ const: 2048
+
+ else:
+ properties:
+ cache-sets:
+ const: 1024
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml b/Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml
index 444be32a8a29..09c6c906b1f9 100644
--- a/Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml
+++ b/Documentation/devicetree/bindings/rng/amlogic,meson-rng.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson Random number generator
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
index 72e8868db3e0..7822705ad16c 100644
--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson SoC UART Serial Interface
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
description: |
The Amlogic Meson SoC UART Serial Interface is present on a large range
diff --git a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
index 17db87cb9dab..c3c599096353 100644
--- a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
+++ b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Canvas Video Lookup Table
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
- Maxime Jourdan <mjourdan@baylibre.com>
description: |
diff --git a/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml b/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
index 50de0da42c13..0c10f7678178 100644
--- a/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
+++ b/Documentation/devicetree/bindings/spi/amlogic,meson-gx-spicc.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson SPI Communication Controller
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
allOf:
- $ref: "spi-controller.yaml#"
diff --git a/Documentation/devicetree/bindings/spi/amlogic,meson6-spifc.yaml b/Documentation/devicetree/bindings/spi/amlogic,meson6-spifc.yaml
index 8a9d526d06eb..ac3b2ec300ac 100644
--- a/Documentation/devicetree/bindings/spi/amlogic,meson6-spifc.yaml
+++ b/Documentation/devicetree/bindings/spi/amlogic,meson6-spifc.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson SPI Flash Controller
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
allOf:
- $ref: "spi-controller.yaml#"
diff --git a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
index e349fa5de606..daf2a859418d 100644
--- a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
+++ b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Amlogic Meson G12A DWC3 USB SoC Controller Glue
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
description: |
The Amlogic G12A embeds a DWC3 USB IP Core configured for USB2 and USB3
diff --git a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
index e63b66545317..b019d490170d 100644
--- a/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
+++ b/Documentation/devicetree/bindings/usb/mediatek,mtu3.yaml
@@ -24,6 +24,7 @@ properties:
- mediatek,mt2712-mtu3
- mediatek,mt8173-mtu3
- mediatek,mt8183-mtu3
+ - mediatek,mt8188-mtu3
- mediatek,mt8192-mtu3
- mediatek,mt8195-mtu3
- const: mediatek,mtu3
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index fea3e7092ace..cd2f7cb6745a 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -33,6 +33,7 @@ properties:
- qcom,sm6115-dwc3
- qcom,sm6125-dwc3
- qcom,sm6350-dwc3
+ - qcom,sm6375-dwc3
- qcom,sm8150-dwc3
- qcom,sm8250-dwc3
- qcom,sm8350-dwc3
@@ -108,12 +109,17 @@ properties:
HS/FS/LS modes are supported.
type: boolean
+ wakeup-source: true
+
# Required child node:
patternProperties:
"^usb@[0-9a-f]+$":
$ref: snps,dwc3.yaml#
+ properties:
+ wakeup-source: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml b/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
index c7459cf70e30..497d60408ea0 100644
--- a/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
@@ -8,7 +8,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Meson GXBB SoCs Watchdog timer
maintainers:
- - Neil Armstrong <narmstrong@baylibre.com>
+ - Neil Armstrong <neil.armstrong@linaro.org>
allOf:
- $ref: watchdog.yaml#
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index e7aafc82be99..898c99eae8e4 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -982,6 +982,7 @@ Example output. You may not have all of these fields.
SUnreclaim: 142336 kB
KernelStack: 11168 kB
PageTables: 20540 kB
+ SecPageTables: 0 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
WritebackTmp: 0 kB
@@ -1090,6 +1091,9 @@ KernelStack
Memory consumed by the kernel stacks of all tasks
PageTables
Memory consumed by userspace page tables
+SecPageTables
+ Memory consumed by secondary page tables, this currently
+ currently includes KVM mmu allocations on x86 and arm64.
NFS_Unstable
Always zero. Previous counted pages which had been written to
the server, but has not been committed to stable storage.
diff --git a/Documentation/i2c/busses/i2c-piix4.rst b/Documentation/i2c/busses/i2c-piix4.rst
index cc9000259223..07fe6f6f4b18 100644
--- a/Documentation/i2c/busses/i2c-piix4.rst
+++ b/Documentation/i2c/busses/i2c-piix4.rst
@@ -64,7 +64,7 @@ correct address for this module, you could get in big trouble (read:
crashes, data corruption, etc.). Try this only as a last resort (try BIOS
updates first, for example), and backup first! An even more dangerous
option is 'force_addr=<IOPORT>'. This will not only enable the PIIX4 like
-'force' foes, but it will also set a new base I/O port address. The SMBus
+'force' does, but it will also set a new base I/O port address. The SMBus
parts of the PIIX4 needs a range of 8 of these addresses to function
correctly. If these addresses are already reserved by some other device,
you will get into big trouble! DON'T USE THIS IF YOU ARE NOT VERY SURE
@@ -86,15 +86,15 @@ If you own Force CPCI735 motherboard or other OSB4 based systems you may need
to change the SMBus Interrupt Select register so the SMBus controller uses
the SMI mode.
-1) Use lspci command and locate the PCI device with the SMBus controller:
+1) Use ``lspci`` command and locate the PCI device with the SMBus controller:
00:0f.0 ISA bridge: ServerWorks OSB4 South Bridge (rev 4f)
The line may vary for different chipsets. Please consult the driver source
- for all possible PCI ids (and lspci -n to match them). Lets assume the
+ for all possible PCI ids (and ``lspci -n`` to match them). Let's assume the
device is located at 00:0f.0.
2) Now you just need to change the value in 0xD2 register. Get it first with
- command: lspci -xxx -s 00:0f.0
+ command: ``lspci -xxx -s 00:0f.0``
If the value is 0x3 then you need to change it to 0x1:
- setpci -s 00:0f.0 d2.b=1
+ ``setpci -s 00:0f.0 d2.b=1``
Please note that you don't need to do that in all cases, just when the SMBus is
not working properly.
@@ -109,6 +109,3 @@ which can easily get corrupted due to a state machine bug. These are mostly
Thinkpad laptops, but desktop systems may also be affected. We have no list
of all affected systems, so the only safe solution was to prevent access to
the SMBus on all IBM systems (detected using DMI data.)
-
-For additional information, read:
-http://www.lm-sensors.org/browser/lm-sensors/trunk/README
diff --git a/Documentation/i2c/i2c-topology.rst b/Documentation/i2c/i2c-topology.rst
index 7cb53819778e..48fce0f7491b 100644
--- a/Documentation/i2c/i2c-topology.rst
+++ b/Documentation/i2c/i2c-topology.rst
@@ -5,6 +5,8 @@ I2C muxes and complex topologies
There are a couple of reasons for building more complex I2C topologies
than a straight-forward I2C bus with one adapter and one or more devices.
+Some example use cases are:
+
1. A mux may be needed on the bus to prevent address collisions.
2. The bus may be accessible from some external bus master, and arbitration
@@ -14,10 +16,10 @@ than a straight-forward I2C bus with one adapter and one or more devices.
from the I2C bus, at least most of the time, and sits behind a gate
that has to be operated before the device can be accessed.
-Etc
-===
+Several types of hardware components such as I2C muxes, I2C gates and I2C
+arbitrators allow to handle such needs.
-These constructs are represented as I2C adapter trees by Linux, where
+These components are represented as I2C adapter trees by Linux, where
each adapter has a parent adapter (except the root adapter) and zero or
more child adapters. The root adapter is the actual adapter that issues
I2C transfers, and all adapters with a parent are part of an "i2c-mux"
@@ -35,46 +37,7 @@ Locking
=======
There are two variants of locking available to I2C muxes, they can be
-mux-locked or parent-locked muxes. As is evident from below, it can be
-useful to know if a mux is mux-locked or if it is parent-locked. The
-following list was correct at the time of writing:
-
-In drivers/i2c/muxes/:
-
-====================== =============================================
-i2c-arb-gpio-challenge Parent-locked
-i2c-mux-gpio Normally parent-locked, mux-locked iff
- all involved gpio pins are controlled by the
- same I2C root adapter that they mux.
-i2c-mux-gpmux Normally parent-locked, mux-locked iff
- specified in device-tree.
-i2c-mux-ltc4306 Mux-locked
-i2c-mux-mlxcpld Parent-locked
-i2c-mux-pca9541 Parent-locked
-i2c-mux-pca954x Parent-locked
-i2c-mux-pinctrl Normally parent-locked, mux-locked iff
- all involved pinctrl devices are controlled
- by the same I2C root adapter that they mux.
-i2c-mux-reg Parent-locked
-====================== =============================================
-
-In drivers/iio/:
-
-====================== =============================================
-gyro/mpu3050 Mux-locked
-imu/inv_mpu6050/ Mux-locked
-====================== =============================================
-
-In drivers/media/:
-
-======================= =============================================
-dvb-frontends/lgdt3306a Mux-locked
-dvb-frontends/m88ds3103 Parent-locked
-dvb-frontends/rtl2830 Parent-locked
-dvb-frontends/rtl2832 Mux-locked
-dvb-frontends/si2168 Mux-locked
-usb/cx231xx/ Parent-locked
-======================= =============================================
+mux-locked or parent-locked muxes.
Mux-locked muxes
@@ -89,40 +52,8 @@ full transaction, unrelated I2C transfers may interleave the different
stages of the transaction. This has the benefit that the mux driver
may be easier and cleaner to implement, but it has some caveats.
-==== =====================================================================
-ML1. If you build a topology with a mux-locked mux being the parent
- of a parent-locked mux, this might break the expectation from the
- parent-locked mux that the root adapter is locked during the
- transaction.
-
-ML2. It is not safe to build arbitrary topologies with two (or more)
- mux-locked muxes that are not siblings, when there are address
- collisions between the devices on the child adapters of these
- non-sibling muxes.
-
- I.e. the select-transfer-deselect transaction targeting e.g. device
- address 0x42 behind mux-one may be interleaved with a similar
- operation targeting device address 0x42 behind mux-two. The
- intension with such a topology would in this hypothetical example
- be that mux-one and mux-two should not be selected simultaneously,
- but mux-locked muxes do not guarantee that in all topologies.
-
-ML3. A mux-locked mux cannot be used by a driver for auto-closing
- gates/muxes, i.e. something that closes automatically after a given
- number (one, in most cases) of I2C transfers. Unrelated I2C transfers
- may creep in and close prematurely.
-
-ML4. If any non-I2C operation in the mux driver changes the I2C mux state,
- the driver has to lock the root adapter during that operation.
- Otherwise garbage may appear on the bus as seen from devices
- behind the mux, when an unrelated I2C transfer is in flight during
- the non-I2C mux-changing operation.
-==== =====================================================================
-
-
Mux-locked Example
-------------------
-
+~~~~~~~~~~~~~~~~~~
::
@@ -153,6 +84,43 @@ This means that accesses to D2 are lockout out for the full duration
of the entire operation. But accesses to D3 are possibly interleaved
at any point.
+Mux-locked caveats
+~~~~~~~~~~~~~~~~~~
+
+When using a mux-locked mux, be aware of the following restrictions:
+
+[ML1]
+ If you build a topology with a mux-locked mux being the parent
+ of a parent-locked mux, this might break the expectation from the
+ parent-locked mux that the root adapter is locked during the
+ transaction.
+
+[ML2]
+ It is not safe to build arbitrary topologies with two (or more)
+ mux-locked muxes that are not siblings, when there are address
+ collisions between the devices on the child adapters of these
+ non-sibling muxes.
+
+ I.e. the select-transfer-deselect transaction targeting e.g. device
+ address 0x42 behind mux-one may be interleaved with a similar
+ operation targeting device address 0x42 behind mux-two. The
+ intent with such a topology would in this hypothetical example
+ be that mux-one and mux-two should not be selected simultaneously,
+ but mux-locked muxes do not guarantee that in all topologies.
+
+[ML3]
+ A mux-locked mux cannot be used by a driver for auto-closing
+ gates/muxes, i.e. something that closes automatically after a given
+ number (one, in most cases) of I2C transfers. Unrelated I2C transfers
+ may creep in and close prematurely.
+
+[ML4]
+ If any non-I2C operation in the mux driver changes the I2C mux state,
+ the driver has to lock the root adapter during that operation.
+ Otherwise garbage may appear on the bus as seen from devices
+ behind the mux, when an unrelated I2C transfer is in flight during
+ the non-I2C mux-changing operation.
+
Parent-locked muxes
-------------------
@@ -161,28 +129,10 @@ Parent-locked muxes lock the parent adapter during the full select-
transfer-deselect transaction. The implication is that the mux driver
has to ensure that any and all I2C transfers through that parent
adapter during the transaction are unlocked I2C transfers (using e.g.
-__i2c_transfer), or a deadlock will follow. There are a couple of
-caveats.
-
-==== ====================================================================
-PL1. If you build a topology with a parent-locked mux being the child
- of another mux, this might break a possible assumption from the
- child mux that the root adapter is unused between its select op
- and the actual transfer (e.g. if the child mux is auto-closing
- and the parent mux issues I2C transfers as part of its select).
- This is especially the case if the parent mux is mux-locked, but
- it may also happen if the parent mux is parent-locked.
-
-PL2. If select/deselect calls out to other subsystems such as gpio,
- pinctrl, regmap or iio, it is essential that any I2C transfers
- caused by these subsystems are unlocked. This can be convoluted to
- accomplish, maybe even impossible if an acceptably clean solution
- is sought.
-==== ====================================================================
-
+__i2c_transfer), or a deadlock will follow.
Parent-locked Example
----------------------
+~~~~~~~~~~~~~~~~~~~~~
::
@@ -212,10 +162,30 @@ When there is an access to D1, this happens:
9. M1 unlocks its parent adapter.
10. M1 unlocks muxes on its parent.
-
This means that accesses to both D2 and D3 are locked out for the full
duration of the entire operation.
+Parent-locked Caveats
+~~~~~~~~~~~~~~~~~~~~~
+
+When using a parent-locked mux, be aware of the following restrictions:
+
+[PL1]
+ If you build a topology with a parent-locked mux being the child
+ of another mux, this might break a possible assumption from the
+ child mux that the root adapter is unused between its select op
+ and the actual transfer (e.g. if the child mux is auto-closing
+ and the parent mux issues I2C transfers as part of its select).
+ This is especially the case if the parent mux is mux-locked, but
+ it may also happen if the parent mux is parent-locked.
+
+[PL2]
+ If select/deselect calls out to other subsystems such as gpio,
+ pinctrl, regmap or iio, it is essential that any I2C transfers
+ caused by these subsystems are unlocked. This can be convoluted to
+ accomplish, maybe even impossible if an acceptably clean solution
+ is sought.
+
Complex Examples
================
@@ -261,8 +231,10 @@ This is a good topology::
When device D1 is accessed, accesses to D2 are locked out for the
full duration of the operation (muxes on the top child adapter of M1
are locked). But accesses to D3 and D4 are possibly interleaved at
-any point. Accesses to D3 locks out D1 and D2, but accesses to D4
-are still possibly interleaved.
+any point.
+
+Accesses to D3 locks out D1 and D2, but accesses to D4 are still possibly
+interleaved.
Mux-locked mux as parent of parent-locked mux
@@ -394,3 +366,47 @@ This is a good topology::
When D1 or D2 are accessed, accesses to D3 and D4 are locked out while
accesses to D5 may interleave. When D3 or D4 are accessed, accesses to
all other devices are locked out.
+
+
+Mux type of existing device drivers
+===================================
+
+Whether a device is mux-locked or parent-locked depends on its
+implementation. The following list was correct at the time of writing:
+
+In drivers/i2c/muxes/:
+
+====================== =============================================
+i2c-arb-gpio-challenge Parent-locked
+i2c-mux-gpio Normally parent-locked, mux-locked iff
+ all involved gpio pins are controlled by the
+ same I2C root adapter that they mux.
+i2c-mux-gpmux Normally parent-locked, mux-locked iff
+ specified in device-tree.
+i2c-mux-ltc4306 Mux-locked
+i2c-mux-mlxcpld Parent-locked
+i2c-mux-pca9541 Parent-locked
+i2c-mux-pca954x Parent-locked
+i2c-mux-pinctrl Normally parent-locked, mux-locked iff
+ all involved pinctrl devices are controlled
+ by the same I2C root adapter that they mux.
+i2c-mux-reg Parent-locked
+====================== =============================================
+
+In drivers/iio/:
+
+====================== =============================================
+gyro/mpu3050 Mux-locked
+imu/inv_mpu6050/ Mux-locked
+====================== =============================================
+
+In drivers/media/:
+
+======================= =============================================
+dvb-frontends/lgdt3306a Mux-locked
+dvb-frontends/m88ds3103 Parent-locked
+dvb-frontends/rtl2830 Parent-locked
+dvb-frontends/rtl2832 Mux-locked
+dvb-frontends/si2168 Mux-locked
+usb/cx231xx/ Parent-locked
+======================= =============================================
diff --git a/Documentation/input/joydev/joystick.rst b/Documentation/input/joydev/joystick.rst
index f615906a0821..6d721396717a 100644
--- a/Documentation/input/joydev/joystick.rst
+++ b/Documentation/input/joydev/joystick.rst
@@ -517,6 +517,7 @@ All I-Force devices are supported by the iforce module. This includes:
* AVB Mag Turbo Force
* AVB Top Shot Pegasus
* AVB Top Shot Force Feedback Racing Wheel
+* Boeder Force Feedback Wheel
* Logitech WingMan Force
* Logitech WingMan Force Wheel
* Guillemot Race Leader Force Feedback
diff --git a/Documentation/networking/devlink/netdevsim.rst b/Documentation/networking/devlink/netdevsim.rst
index 8a292fb5aaea..ec5e6d79b2e2 100644
--- a/Documentation/networking/devlink/netdevsim.rst
+++ b/Documentation/networking/devlink/netdevsim.rst
@@ -67,7 +67,7 @@ The ``netdevsim`` driver supports rate objects management, which includes:
- setting tx_share and tx_max rate values for any rate object type;
- setting parent node for any rate object type.
-Rate nodes and it's parameters are exposed in ``netdevsim`` debugfs in RO mode.
+Rate nodes and their parameters are exposed in ``netdevsim`` debugfs in RO mode.
For example created rate node with name ``some_group``:
.. code:: shell
diff --git a/Documentation/networking/driver.rst b/Documentation/networking/driver.rst
index c8f59dbda46f..64f7236ff10b 100644
--- a/Documentation/networking/driver.rst
+++ b/Documentation/networking/driver.rst
@@ -8,7 +8,7 @@ Transmit path guidelines:
1) The ndo_start_xmit method must not return NETDEV_TX_BUSY under
any normal circumstances. It is considered a hard error unless
- there is no way your device can tell ahead of time when it's
+ there is no way your device can tell ahead of time when its
transmit function will become busy.
Instead it must maintain the queue properly. For example,
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index 56cd4ea059b2..a759872a2883 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -1035,7 +1035,10 @@ tcp_limit_output_bytes - INTEGER
tcp_challenge_ack_limit - INTEGER
Limits number of Challenge ACK sent per second, as recommended
in RFC 5961 (Improving TCP's Robustness to Blind In-Window Attacks)
- Default: 1000
+ Note that this per netns rate limit can allow some side channel
+ attacks and probably should not be enabled.
+ TCP stack implements per TCP socket limits anyway.
+ Default: INT_MAX (unlimited)
UDP variables
=============
diff --git a/Documentation/networking/ipvlan.rst b/Documentation/networking/ipvlan.rst
index 694adcba36b0..0000c1d383bc 100644
--- a/Documentation/networking/ipvlan.rst
+++ b/Documentation/networking/ipvlan.rst
@@ -11,7 +11,7 @@ Initial Release:
================
This is conceptually very similar to the macvlan driver with one major
exception of using L3 for mux-ing /demux-ing among slaves. This property makes
-the master device share the L2 with it's slave devices. I have developed this
+the master device share the L2 with its slave devices. I have developed this
driver in conjunction with network namespaces and not sure if there is use case
outside of it.
diff --git a/Documentation/networking/l2tp.rst b/Documentation/networking/l2tp.rst
index 498b382d25a0..7f383e99dbad 100644
--- a/Documentation/networking/l2tp.rst
+++ b/Documentation/networking/l2tp.rst
@@ -530,7 +530,7 @@ its tunnel close actions. For L2TPIP sockets, the socket's close
handler initiates the same tunnel close actions. All sessions are
first closed. Each session drops its tunnel ref. When the tunnel ref
reaches zero, the tunnel puts its socket ref. When the socket is
-eventually destroyed, it's sk_destruct finally frees the L2TP tunnel
+eventually destroyed, its sk_destruct finally frees the L2TP tunnel
context.
Sessions
diff --git a/Documentation/networking/rxrpc.rst b/Documentation/networking/rxrpc.rst
index 39c2249c7aa7..39494a6ea739 100644
--- a/Documentation/networking/rxrpc.rst
+++ b/Documentation/networking/rxrpc.rst
@@ -1055,17 +1055,6 @@ The kernel interface functions are as follows:
first function to change. Note that this must be called in TASK_RUNNING
state.
- (#) Get reply timestamp::
-
- bool rxrpc_kernel_get_reply_time(struct socket *sock,
- struct rxrpc_call *call,
- ktime_t *_ts)
-
- This allows the timestamp on the first DATA packet of the reply of a
- client call to be queried, provided that it is still in the Rx ring. If
- successful, the timestamp will be stored into ``*_ts`` and true will be
- returned; false will be returned otherwise.
-
(#) Get remote client epoch::
u32 rxrpc_kernel_get_epoch(struct socket *sock,
diff --git a/Documentation/networking/switchdev.rst b/Documentation/networking/switchdev.rst
index f1f4e6a85a29..bbf272e9d607 100644
--- a/Documentation/networking/switchdev.rst
+++ b/Documentation/networking/switchdev.rst
@@ -159,7 +159,7 @@ tools such as iproute2.
The switchdev driver can know a particular port's position in the topology by
monitoring NETDEV_CHANGEUPPER notifications. For example, a port moved into a
-bond will see it's upper master change. If that bond is moved into a bridge,
+bond will see its upper master change. If that bond is moved into a bridge,
the bond's upper master will change. And so on. The driver will track such
movements to know what position a port is in in the overall topology by
registering for netdevice events and acting on NETDEV_CHANGEUPPER.
diff --git a/Documentation/sphinx/kerneldoc-preamble.sty b/Documentation/sphinx/kerneldoc-preamble.sty
index 2a29cbe51396..9707e033c8c4 100644
--- a/Documentation/sphinx/kerneldoc-preamble.sty
+++ b/Documentation/sphinx/kerneldoc-preamble.sty
@@ -70,8 +70,16 @@
% Translations have Asian (CJK) characters which are only displayed if
% xeCJK is used
+\usepackage{ifthen}
+\newboolean{enablecjk}
+\setboolean{enablecjk}{false}
\IfFontExistsTF{Noto Sans CJK SC}{
- % Load xeCJK when CJK font is available
+ \IfFileExists{xeCJK.sty}{
+ \setboolean{enablecjk}{true}
+ }{}
+}{}
+\ifthenelse{\boolean{enablecjk}}{
+ % Load xeCJK when both the Noto Sans CJK font and xeCJK.sty are available.
\usepackage{xeCJK}
% Noto CJK fonts don't provide slant shape. [AutoFakeSlant] permits
% its emulation.
@@ -196,7 +204,7 @@
% Inactivate CJK after tableofcontents
\apptocmd{\sphinxtableofcontents}{\kerneldocCJKoff}{}{}
\xeCJKsetup{CJKspace = true}% For inter-phrase space of Korean TOC
-}{ % No CJK font found
+}{ % Don't enable CJK
% Custom macros to on/off CJK and switch CJK fonts (Dummy)
\newcommand{\kerneldocCJKon}{}
\newcommand{\kerneldocCJKoff}{}
@@ -204,14 +212,16 @@
%% and ignore the argument (#1) in their definitions, whole contents of
%% CJK chapters can be ignored.
\newcommand{\kerneldocBeginSC}[1]{%
- %% Put a note on missing CJK fonts in place of zh_CN translation.
- \begin{sphinxadmonition}{note}{Note on missing fonts:}
+ %% Put a note on missing CJK fonts or the xecjk package in place of
+ %% zh_CN translation.
+ \begin{sphinxadmonition}{note}{Note on missing fonts and a package:}
Translations of Simplified Chinese (zh\_CN), Traditional Chinese
(zh\_TW), Korean (ko\_KR), and Japanese (ja\_JP) were skipped
- due to the lack of suitable font families.
+ due to the lack of suitable font families and/or the texlive-xecjk
+ package.
If you want them, please install ``Noto Sans CJK'' font families
- by following instructions from
+ along with the texlive-xecjk package by following instructions from
\sphinxcode{./scripts/sphinx-pre-install}.
Having optional ``Noto Serif CJK'' font families will improve
the looks of those translations.
diff --git a/Documentation/translations/ja_JP/SubmittingPatches b/Documentation/translations/ja_JP/SubmittingPatches
index 66ce0d8b0526..04deb77b20c6 100644
--- a/Documentation/translations/ja_JP/SubmittingPatches
+++ b/Documentation/translations/ja_JP/SubmittingPatches
@@ -35,8 +35,7 @@ Linux カーネルに変更を加えたいと思っている個人又は会社
てもらえやすくする提案を集めたものです。
コードを投稿する前に、Documentation/process/submit-checklist.rst の項目リストに目
-を通してチェックしてください。もしあなたがドライバーを投稿しようとし
-ているなら、Documentation/process/submitting-drivers.rst にも目を通してください。
+を通してチェックしてください。
--------------------------------------------
セクション1 パッチの作り方と送り方
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 32427ea160df..eee9f857a986 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -4074,7 +4074,7 @@ Queues an SMI on the thread's vcpu.
4.97 KVM_X86_SET_MSR_FILTER
----------------------------
-:Capability: KVM_X86_SET_MSR_FILTER
+:Capability: KVM_CAP_X86_MSR_FILTER
:Architectures: x86
:Type: vm ioctl
:Parameters: struct kvm_msr_filter
@@ -4173,8 +4173,10 @@ If an MSR access is not permitted through the filtering, it generates a
allows user space to deflect and potentially handle various MSR accesses
into user space.
-If a vCPU is in running state while this ioctl is invoked, the vCPU may
-experience inconsistent filtering behavior on MSR accesses.
+Note, invoking this ioctl while a vCPU is running is inherently racy. However,
+KVM does guarantee that vCPUs will see either the previous filter or the new
+filter, e.g. MSRs with identical settings in both the old and new filter will
+have deterministic behavior.
4.98 KVM_CREATE_SPAPR_TCE_64
----------------------------
@@ -5287,110 +5289,7 @@ KVM_PV_DUMP
authentication tag all of which are needed to decrypt the dump at a
later time.
-
-4.126 KVM_X86_SET_MSR_FILTER
-----------------------------
-
-:Capability: KVM_CAP_X86_MSR_FILTER
-:Architectures: x86
-:Type: vm ioctl
-:Parameters: struct kvm_msr_filter
-:Returns: 0 on success, < 0 on error
-
-::
-
- struct kvm_msr_filter_range {
- #define KVM_MSR_FILTER_READ (1 << 0)
- #define KVM_MSR_FILTER_WRITE (1 << 1)
- __u32 flags;
- __u32 nmsrs; /* number of msrs in bitmap */
- __u32 base; /* MSR index the bitmap starts at */
- __u8 *bitmap; /* a 1 bit allows the operations in flags, 0 denies */
- };
-
- #define KVM_MSR_FILTER_MAX_RANGES 16
- struct kvm_msr_filter {
- #define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0)
- #define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0)
- __u32 flags;
- struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES];
- };
-
-flags values for ``struct kvm_msr_filter_range``:
-
-``KVM_MSR_FILTER_READ``
-
- Filter read accesses to MSRs using the given bitmap. A 0 in the bitmap
- indicates that a read should immediately fail, while a 1 indicates that
- a read for a particular MSR should be handled regardless of the default
- filter action.
-
-``KVM_MSR_FILTER_WRITE``
-
- Filter write accesses to MSRs using the given bitmap. A 0 in the bitmap
- indicates that a write should immediately fail, while a 1 indicates that
- a write for a particular MSR should be handled regardless of the default
- filter action.
-
-``KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE``
-
- Filter both read and write accesses to MSRs using the given bitmap. A 0
- in the bitmap indicates that both reads and writes should immediately fail,
- while a 1 indicates that reads and writes for a particular MSR are not
- filtered by this range.
-
-flags values for ``struct kvm_msr_filter``:
-
-``KVM_MSR_FILTER_DEFAULT_ALLOW``
-
- If no filter range matches an MSR index that is getting accessed, KVM will
- fall back to allowing access to the MSR.
-
-``KVM_MSR_FILTER_DEFAULT_DENY``
-
- If no filter range matches an MSR index that is getting accessed, KVM will
- fall back to rejecting access to the MSR. In this mode, all MSRs that should
- be processed by KVM need to explicitly be marked as allowed in the bitmaps.
-
-This ioctl allows user space to define up to 16 bitmaps of MSR ranges to
-specify whether a certain MSR access should be explicitly filtered for or not.
-
-If this ioctl has never been invoked, MSR accesses are not guarded and the
-default KVM in-kernel emulation behavior is fully preserved.
-
-Calling this ioctl with an empty set of ranges (all nmsrs == 0) disables MSR
-filtering. In that mode, ``KVM_MSR_FILTER_DEFAULT_DENY`` is invalid and causes
-an error.
-
-As soon as the filtering is in place, every MSR access is processed through
-the filtering except for accesses to the x2APIC MSRs (from 0x800 to 0x8ff);
-x2APIC MSRs are always allowed, independent of the ``default_allow`` setting,
-and their behavior depends on the ``X2APIC_ENABLE`` bit of the APIC base
-register.
-
-If a bit is within one of the defined ranges, read and write accesses are
-guarded by the bitmap's value for the MSR index if the kind of access
-is included in the ``struct kvm_msr_filter_range`` flags. If no range
-cover this particular access, the behavior is determined by the flags
-field in the kvm_msr_filter struct: ``KVM_MSR_FILTER_DEFAULT_ALLOW``
-and ``KVM_MSR_FILTER_DEFAULT_DENY``.
-
-Each bitmap range specifies a range of MSRs to potentially allow access on.
-The range goes from MSR index [base .. base+nmsrs]. The flags field
-indicates whether reads, writes or both reads and writes are filtered
-by setting a 1 bit in the bitmap for the corresponding MSR index.
-
-If an MSR access is not permitted through the filtering, it generates a
-#GP inside the guest. When combined with KVM_CAP_X86_USER_SPACE_MSR, that
-allows user space to deflect and potentially handle various MSR accesses
-into user space.
-
-Note, invoking this ioctl with a vCPU is running is inherently racy. However,
-KVM does guarantee that vCPUs will see either the previous filter or the new
-filter, e.g. MSRs with identical settings in both the old and new filter will
-have deterministic behavior.
-
-4.127 KVM_XEN_HVM_SET_ATTR
+4.126 KVM_XEN_HVM_SET_ATTR
--------------------------
:Capability: KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO
diff --git a/Documentation/virt/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst
index 31f62b64e07b..87f04c1fa53d 100644
--- a/Documentation/virt/kvm/vcpu-requests.rst
+++ b/Documentation/virt/kvm/vcpu-requests.rst
@@ -97,7 +97,7 @@ VCPU requests are simply bit indices of the ``vcpu->requests`` bitmap.
This means general bitops, like those documented in [atomic-ops]_ could
also be used, e.g. ::
- clear_bit(KVM_REQ_UNHALT & KVM_REQUEST_MASK, &vcpu->requests);
+ clear_bit(KVM_REQ_UNBLOCK & KVM_REQUEST_MASK, &vcpu->requests);
However, VCPU request users should refrain from doing so, as it would
break the abstraction. The first 8 bits are reserved for architecture
@@ -126,17 +126,6 @@ KVM_REQ_UNBLOCK
or in order to update the interrupt routing and ensure that assigned
devices will wake up the vCPU.
-KVM_REQ_UNHALT
-
- This request may be made from the KVM common function kvm_vcpu_block(),
- which is used to emulate an instruction that causes a CPU to halt until
- one of an architectural specific set of events and/or interrupts is
- received (determined by checking kvm_arch_vcpu_runnable()). When that
- event or interrupt arrives kvm_vcpu_block() makes the request. This is
- in contrast to when kvm_vcpu_block() returns due to any other reason,
- such as a pending signal, which does not indicate the VCPU's halt
- emulation should stop, and therefore does not make the request.
-
KVM_REQ_OUTSIDE_GUEST_MODE
This "request" ensures the target vCPU has exited guest mode prior to the
@@ -297,21 +286,6 @@ architecture dependent. kvm_vcpu_block() calls kvm_arch_vcpu_runnable()
to check if it should awaken. One reason to do so is to provide
architectures a function where requests may be checked if necessary.
-Clearing Requests
------------------
-
-Generally it only makes sense for the receiving VCPU thread to clear a
-request. However, in some circumstances, such as when the requesting
-thread and the receiving VCPU thread are executed serially, such as when
-they are the same thread, or when they are using some form of concurrency
-control to temporarily execute synchronously, then it's possible to know
-that the request may be cleared immediately, rather than waiting for the
-receiving VCPU thread to handle the request in VCPU RUN. The only current
-examples of this are kvm_vcpu_block() calls made by VCPUs to block
-themselves. A possible side-effect of that call is to make the
-KVM_REQ_UNHALT request, which may then be cleared immediately when the
-VCPU returns from the call.
-
References
==========
diff --git a/MAINTAINERS b/MAINTAINERS
index f29f27717de4..7f3720e443cc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1803,7 +1803,7 @@ N: sun[x456789]i
N: sun50i
ARM/Amlogic Meson SoC CLOCK FRAMEWORK
-M: Neil Armstrong <narmstrong@baylibre.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
M: Jerome Brunet <jbrunet@baylibre.com>
L: linux-amlogic@lists.infradead.org
S: Maintained
@@ -1828,7 +1828,7 @@ F: Documentation/devicetree/bindings/sound/amlogic*
F: sound/soc/meson/
ARM/Amlogic Meson SoC support
-M: Neil Armstrong <narmstrong@baylibre.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
M: Kevin Hilman <khilman@baylibre.com>
R: Jerome Brunet <jbrunet@baylibre.com>
R: Martin Blumenstingl <martin.blumenstingl@googlemail.com>
@@ -2531,7 +2531,7 @@ W: http://www.digriz.org.uk/ts78xx/kernel
F: arch/arm/mach-orion5x/ts78xx-*
ARM/OXNAS platform support
-M: Neil Armstrong <narmstrong@baylibre.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-oxnas@groups.io (moderated for non-subscribers)
S: Maintained
@@ -6792,7 +6792,7 @@ F: Documentation/devicetree/bindings/display/allwinner*
F: drivers/gpu/drm/sun4i/
DRM DRIVERS FOR AMLOGIC SOCS
-M: Neil Armstrong <narmstrong@baylibre.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
L: dri-devel@lists.freedesktop.org
L: linux-amlogic@lists.infradead.org
S: Supported
@@ -6814,7 +6814,7 @@ F: drivers/gpu/drm/atmel-hlcdc/
DRM DRIVERS FOR BRIDGE CHIPS
M: Andrzej Hajda <andrzej.hajda@intel.com>
-M: Neil Armstrong <narmstrong@baylibre.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
M: Robert Foss <robert.foss@linaro.org>
R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
R: Jonas Karlman <jonas@kwiboo.se>
@@ -9122,7 +9122,7 @@ S: Maintained
F: drivers/dma/hisi_dma.c
HISILICON GPIO DRIVER
-M: Luo Jiaxing <luojiaxing@huawei.com>
+M: Jay Fang <f.fangjian@huawei.com>
L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-hisi.c
@@ -9208,8 +9208,8 @@ F: Documentation/ABI/testing/debugfs-hisi-zip
F: drivers/crypto/hisilicon/zip/
HISILICON ROCE DRIVER
+M: Haoyue Xu <xuhaoyue1@hisilicon.com>
M: Wenpeng Liang <liangwenpeng@huawei.com>
-M: Weihang Li <liweihang@huawei.com>
L: linux-rdma@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
@@ -10032,6 +10032,7 @@ F: Documentation/devicetree/bindings/input/
F: Documentation/devicetree/bindings/serio/
F: Documentation/input/
F: drivers/input/
+F: include/dt-bindings/input/
F: include/linux/input.h
F: include/linux/input/
F: include/uapi/linux/input-event-codes.h
@@ -10827,7 +10828,7 @@ F: drivers/media/tuners/it913x*
ITE IT66121 HDMI BRIDGE DRIVER
M: Phong LE <ple@baylibre.com>
-M: Neil Armstrong <narmstrong@baylibre.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
@@ -11347,7 +11348,7 @@ F: kernel/debug/
F: kernel/module/kdb.c
KHADAS MCU MFD DRIVER
-M: Neil Armstrong <narmstrong@baylibre.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
L: linux-amlogic@lists.infradead.org
S: Maintained
F: Documentation/devicetree/bindings/mfd/khadas,mcu.yaml
@@ -13218,7 +13219,7 @@ S: Maintained
F: drivers/watchdog/menz69_wdt.c
MESON AO CEC DRIVER FOR AMLOGIC SOCS
-M: Neil Armstrong <narmstrong@baylibre.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
L: linux-media@vger.kernel.org
L: linux-amlogic@lists.infradead.org
S: Supported
@@ -13229,7 +13230,7 @@ F: drivers/media/cec/platform/meson/ao-cec-g12a.c
F: drivers/media/cec/platform/meson/ao-cec.c
MESON GE2D DRIVER FOR AMLOGIC SOCS
-M: Neil Armstrong <narmstrong@baylibre.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
L: linux-media@vger.kernel.org
L: linux-amlogic@lists.infradead.org
S: Supported
@@ -13245,7 +13246,7 @@ F: Documentation/devicetree/bindings/mtd/amlogic,meson-nand.txt
F: drivers/mtd/nand/raw/meson_*
MESON VIDEO DECODER DRIVER FOR AMLOGIC SOCS
-M: Neil Armstrong <narmstrong@baylibre.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
L: linux-media@vger.kernel.org
L: linux-amlogic@lists.infradead.org
S: Supported
@@ -17532,9 +17533,19 @@ M: Conor Dooley <conor.dooley@microchip.com>
M: Daire McNamara <daire.mcnamara@microchip.com>
L: linux-riscv@lists.infradead.org
S: Supported
+F: Documentation/devicetree/bindings/clock/microchip,mpfs.yaml
+F: Documentation/devicetree/bindings/gpio/microchip,mpfs-gpio.yaml
+F: Documentation/devicetree/bindings/i2c/microchip,corei2c.yaml
+F: Documentation/devicetree/bindings/mailbox/microchip,mpfs-mailbox.yaml
+F: Documentation/devicetree/bindings/net/can/microchip,mpfs-can.yaml
+F: Documentation/devicetree/bindings/pwm/microchip,corepwm.yaml
+F: Documentation/devicetree/bindings/soc/microchip/microchip,mpfs-sys-controller.yaml
+F: Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml
+F: Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml
F: arch/riscv/boot/dts/microchip/
F: drivers/char/hw_random/mpfs-rng.c
F: drivers/clk/microchip/clk-mpfs.c
+F: drivers/i2c/busses/i2c-microchip-core.c
F: drivers/mailbox/mailbox-mpfs.c
F: drivers/pci/controller/pcie-microchip-host.c
F: drivers/rtc/rtc-mpfs.c
@@ -17735,6 +17746,17 @@ L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/infiniband/ulp/rtrs/
+RUNTIME VERIFICATION (RV)
+M: Daniel Bristot de Oliveira <bristot@kernel.org>
+M: Steven Rostedt <rostedt@goodmis.org>
+L: linux-trace-devel@vger.kernel.org
+S: Maintained
+F: Documentation/trace/rv/
+F: include/linux/rv.h
+F: include/rv/
+F: kernel/trace/rv/
+F: tools/verification/
+
RXRPC SOCKETS (AF_RXRPC)
M: David Howells <dhowells@redhat.com>
M: Marc Dionne <marc.dionne@auristor.com>
@@ -20601,6 +20623,7 @@ F: include/*/ftrace.h
F: include/linux/trace*.h
F: include/trace/
F: kernel/trace/
+F: scripts/tracing/
F: tools/testing/selftests/ftrace/
TRACING MMIO ACCESSES (MMIOTRACE)
@@ -20765,6 +20788,7 @@ UBLK USERSPACE BLOCK DRIVER
M: Ming Lei <ming.lei@redhat.com>
L: linux-block@vger.kernel.org
S: Maintained
+F: Documentation/block/ublk.rst
F: drivers/block/ublk_drv.c
F: include/uapi/linux/ublk_cmd.h
@@ -22306,7 +22330,7 @@ M: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
R: Srinivas Neeli <srinivas.neeli@xilinx.com>
R: Michal Simek <michal.simek@xilinx.com>
S: Maintained
-F: Documentation/devicetree/bindings/gpio/gpio-xilinx.txt
+F: Documentation/devicetree/bindings/gpio/xlnx,gpio-xilinx.yaml
F: Documentation/devicetree/bindings/gpio/gpio-zynq.yaml
F: drivers/gpio/gpio-xilinx.c
F: drivers/gpio/gpio-zynq.c
diff --git a/Makefile b/Makefile
index 952d354069a4..298f69060f10 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*
@@ -1287,8 +1287,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
PHONY += headers
headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
- $(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \
- $(error Headers not exportable for the $(SRCARCH) architecture))
+ $(if $(filter um, $(SRCARCH)), $(error Headers not exportable for UML))
$(Q)$(MAKE) $(hdr-inst)=include/uapi
$(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi
diff --git a/arch/Kconfig b/arch/Kconfig
index 5dbf11a5ba4e..8b311e400ec1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -923,6 +923,9 @@ config HAVE_SOFTIRQ_ON_OWN_STACK
Architecture provides a function to run __do_softirq() on a
separate stack.
+config SOFTIRQ_ON_OWN_STACK
+ def_bool HAVE_SOFTIRQ_ON_OWN_STACK && !PREEMPT_RT
+
config ALTERNATE_USER_ADDRESS_SPACE
bool
help
diff --git a/arch/arm/boot/dts/arm-realview-eb.dtsi b/arch/arm/boot/dts/arm-realview-eb.dtsi
index 2dfb32bf9d48..fbb2258b451f 100644
--- a/arch/arm/boot/dts/arm-realview-eb.dtsi
+++ b/arch/arm/boot/dts/arm-realview-eb.dtsi
@@ -399,7 +399,7 @@
compatible = "arm,pl022", "arm,primecell";
reg = <0x1000d000 0x1000>;
clocks = <&sspclk>, <&pclk>;
- clock-names = "SSPCLK", "apb_pclk";
+ clock-names = "sspclk", "apb_pclk";
};
wdog: watchdog@10010000 {
diff --git a/arch/arm/boot/dts/arm-realview-pb1176.dts b/arch/arm/boot/dts/arm-realview-pb1176.dts
index 06b8723b09eb..efed325af88d 100644
--- a/arch/arm/boot/dts/arm-realview-pb1176.dts
+++ b/arch/arm/boot/dts/arm-realview-pb1176.dts
@@ -410,7 +410,7 @@
interrupt-parent = <&intc_dc1176>;
interrupts = <0 17 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&sspclk>, <&pclk>;
- clock-names = "SSPCLK", "apb_pclk";
+ clock-names = "sspclk", "apb_pclk";
};
pb1176_serial0: serial@1010c000 {
diff --git a/arch/arm/boot/dts/arm-realview-pb11mp.dts b/arch/arm/boot/dts/arm-realview-pb11mp.dts
index 295aef448123..89103d54ecc1 100644
--- a/arch/arm/boot/dts/arm-realview-pb11mp.dts
+++ b/arch/arm/boot/dts/arm-realview-pb11mp.dts
@@ -555,7 +555,7 @@
interrupt-parent = <&intc_pb11mp>;
interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&sspclk>, <&pclk>;
- clock-names = "SSPCLK", "apb_pclk";
+ clock-names = "sspclk", "apb_pclk";
};
watchdog@1000f000 {
diff --git a/arch/arm/boot/dts/arm-realview-pbx.dtsi b/arch/arm/boot/dts/arm-realview-pbx.dtsi
index 6f61f968d689..ec1507c5147c 100644
--- a/arch/arm/boot/dts/arm-realview-pbx.dtsi
+++ b/arch/arm/boot/dts/arm-realview-pbx.dtsi
@@ -390,7 +390,7 @@
compatible = "arm,pl022", "arm,primecell";
reg = <0x1000d000 0x1000>;
clocks = <&sspclk>, <&pclk>;
- clock-names = "SSPCLK", "apb_pclk";
+ clock-names = "sspclk", "apb_pclk";
};
wdog0: watchdog@1000f000 {
diff --git a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
index 76b2025c67b4..83bcf9fe0152 100644
--- a/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
+++ b/arch/arm/boot/dts/at91-sama5d27_wlsom1.dtsi
@@ -76,8 +76,8 @@
regulators {
vdd_3v3: VDD_IO {
regulator-name = "VDD_IO";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3700000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
regulator-always-on;
@@ -95,8 +95,8 @@
vddio_ddr: VDD_DDR {
regulator-name = "VDD_DDR";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1850000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
regulator-always-on;
@@ -118,8 +118,8 @@
vdd_core: VDD_CORE {
regulator-name = "VDD_CORE";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1850000>;
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
regulator-always-on;
@@ -160,8 +160,8 @@
LDO1 {
regulator-name = "LDO1";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3700000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-state-standby {
@@ -175,9 +175,8 @@
LDO2 {
regulator-name = "LDO2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3700000>;
- regulator-always-on;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
regulator-state-standby {
regulator-on-in-suspend;
diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts
index 6865be8d7787..dd1dec9d4e07 100644
--- a/arch/arm/boot/dts/at91-sama5d2_icp.dts
+++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts
@@ -196,8 +196,8 @@
regulators {
vdd_io_reg: VDD_IO {
regulator-name = "VDD_IO";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3700000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
regulator-always-on;
@@ -215,8 +215,8 @@
VDD_DDR {
regulator-name = "VDD_DDR";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1850000>;
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
regulator-always-on;
@@ -234,8 +234,8 @@
VDD_CORE {
regulator-name = "VDD_CORE";
- regulator-min-microvolt = <600000>;
- regulator-max-microvolt = <1850000>;
+ regulator-min-microvolt = <1250000>;
+ regulator-max-microvolt = <1250000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
regulator-always-on;
@@ -257,7 +257,6 @@
regulator-max-microvolt = <1850000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
- regulator-always-on;
regulator-state-standby {
regulator-on-in-suspend;
@@ -272,8 +271,8 @@
LDO1 {
regulator-name = "LDO1";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3700000>;
+ regulator-min-microvolt = <2500000>;
+ regulator-max-microvolt = <2500000>;
regulator-always-on;
regulator-state-standby {
@@ -287,8 +286,8 @@
LDO2 {
regulator-name = "LDO2";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3700000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-always-on;
regulator-state-standby {
diff --git a/arch/arm/boot/dts/at91-sama7g5ek.dts b/arch/arm/boot/dts/at91-sama7g5ek.dts
index de44da2e4aae..3b25c67795dd 100644
--- a/arch/arm/boot/dts/at91-sama7g5ek.dts
+++ b/arch/arm/boot/dts/at91-sama7g5ek.dts
@@ -244,8 +244,8 @@
regulators {
vdd_3v3: VDD_IO {
regulator-name = "VDD_IO";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3700000>;
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
regulator-always-on;
@@ -264,8 +264,8 @@
vddioddr: VDD_DDR {
regulator-name = "VDD_DDR";
- regulator-min-microvolt = <1300000>;
- regulator-max-microvolt = <1450000>;
+ regulator-min-microvolt = <1350000>;
+ regulator-max-microvolt = <1350000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
regulator-always-on;
@@ -285,8 +285,8 @@
vddcore: VDD_CORE {
regulator-name = "VDD_CORE";
- regulator-min-microvolt = <1100000>;
- regulator-max-microvolt = <1850000>;
+ regulator-min-microvolt = <1150000>;
+ regulator-max-microvolt = <1150000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
regulator-always-on;
@@ -306,7 +306,7 @@
vddcpu: VDD_OTHER {
regulator-name = "VDD_OTHER";
regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1850000>;
+ regulator-max-microvolt = <1250000>;
regulator-initial-mode = <2>;
regulator-allowed-modes = <2>, <4>;
regulator-ramp-delay = <3125>;
@@ -326,8 +326,8 @@
vldo1: LDO1 {
regulator-name = "LDO1";
- regulator-min-microvolt = <1200000>;
- regulator-max-microvolt = <3700000>;
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
regulator-always-on;
regulator-state-standby {
diff --git a/arch/arm/boot/dts/bcm63178.dtsi b/arch/arm/boot/dts/bcm63178.dtsi
index 5463443f0762..cbd094dde6d0 100644
--- a/arch/arm/boot/dts/bcm63178.dtsi
+++ b/arch/arm/boot/dts/bcm63178.dtsi
@@ -32,6 +32,7 @@
next-level-cache = <&L2_0>;
enable-method = "psci";
};
+
CA7_2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a7";
@@ -39,6 +40,7 @@
next-level-cache = <&L2_0>;
enable-method = "psci";
};
+
L2_0: l2-cache0 {
compatible = "cache";
};
@@ -46,10 +48,10 @@
timer {
compatible = "arm,armv7-timer";
- interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(3) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(3) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(3) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(3) | IRQ_TYPE_LEVEL_LOW)>;
arm,cpu-registers-not-fw-configured;
};
@@ -80,23 +82,23 @@
psci {
compatible = "arm,psci-0.2";
method = "smc";
- cpu_off = <1>;
- cpu_on = <2>;
};
axi@81000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0 0x81000000 0x4000>;
+ ranges = <0 0x81000000 0x8000>;
gic: interrupt-controller@1000 {
compatible = "arm,cortex-a7-gic";
#interrupt-cells = <3>;
- #address-cells = <0>;
interrupt-controller;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(3) | IRQ_TYPE_LEVEL_HIGH)>;
reg = <0x1000 0x1000>,
- <0x2000 0x2000>;
+ <0x2000 0x2000>,
+ <0x4000 0x2000>,
+ <0x6000 0x2000>;
};
};
diff --git a/arch/arm/boot/dts/bcm6846.dtsi b/arch/arm/boot/dts/bcm6846.dtsi
index e610c102498f..8aa47a2583b2 100644
--- a/arch/arm/boot/dts/bcm6846.dtsi
+++ b/arch/arm/boot/dts/bcm6846.dtsi
@@ -40,10 +40,10 @@
timer {
compatible = "arm,armv7-timer";
- interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
arm,cpu-registers-not-fw-configured;
};
@@ -65,23 +65,23 @@
psci {
compatible = "arm,psci-0.2";
method = "smc";
- cpu_off = <1>;
- cpu_on = <2>;
};
axi@81000000 {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
- ranges = <0 0x81000000 0x4000>;
+ ranges = <0 0x81000000 0x8000>;
gic: interrupt-controller@1000 {
compatible = "arm,cortex-a7-gic";
#interrupt-cells = <3>;
- #address-cells = <0>;
interrupt-controller;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
reg = <0x1000 0x1000>,
- <0x2000 0x2000>;
+ <0x2000 0x2000>,
+ <0x4000 0x2000>,
+ <0x6000 0x2000>;
};
};
diff --git a/arch/arm/boot/dts/bcm6878.dtsi b/arch/arm/boot/dts/bcm6878.dtsi
index a7dff596fe1e..1e8b5fa96c25 100644
--- a/arch/arm/boot/dts/bcm6878.dtsi
+++ b/arch/arm/boot/dts/bcm6878.dtsi
@@ -32,6 +32,7 @@
next-level-cache = <&L2_0>;
enable-method = "psci";
};
+
L2_0: l2-cache0 {
compatible = "cache";
};
@@ -39,10 +40,10 @@
timer {
compatible = "arm,armv7-timer";
- interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
- <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
arm,cpu-registers-not-fw-configured;
};
diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
index 095c9143d99a..6b791d515e29 100644
--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
@@ -51,16 +51,6 @@
vin-supply = <&reg_3p3v_s5>;
};
- reg_3p3v_s0: regulator-3p3v-s0 {
- compatible = "regulator-fixed";
- regulator-name = "V_3V3_S0";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
- regulator-boot-on;
- vin-supply = <&reg_3p3v_s5>;
- };
-
reg_3p3v_s5: regulator-3p3v-s5 {
compatible = "regulator-fixed";
regulator-name = "V_3V3_S5";
@@ -259,7 +249,7 @@
/* default boot source: workaround #1 for errata ERR006282 */
smarc_flash: flash@0 {
- compatible = "winbond,w25q16dw", "jedec,spi-nor";
+ compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <20000000>;
};
diff --git a/arch/arm/boot/dts/imx6qdl-vicut1.dtsi b/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
index a1676b5d2980..c5a98b0110dd 100644
--- a/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-vicut1.dtsi
@@ -28,7 +28,7 @@
enable-gpios = <&gpio4 28 GPIO_ACTIVE_HIGH>;
};
- backlight_led: backlight_led {
+ backlight_led: backlight-led {
compatible = "pwm-backlight";
pwms = <&pwm3 0 5000000 0>;
brightness-levels = <0 16 64 255>;
diff --git a/arch/arm/boot/dts/integratorap-im-pd1.dts b/arch/arm/boot/dts/integratorap-im-pd1.dts
index d47bfb66d069..4c22e4436271 100644
--- a/arch/arm/boot/dts/integratorap-im-pd1.dts
+++ b/arch/arm/boot/dts/integratorap-im-pd1.dts
@@ -178,12 +178,12 @@
clock-names = "uartclk", "apb_pclk";
};
- ssp@300000 {
+ spi@300000 {
compatible = "arm,pl022", "arm,primecell";
reg = <0x00300000 0x1000>;
interrupts-extended = <&impd1_vic 3>;
clocks = <&impd1_sspclk>, <&sysclk>;
- clock-names = "spiclk", "apb_pclk";
+ clock-names = "sspclk", "apb_pclk";
};
impd1_gpio0: gpio@400000 {
diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
index 79f7cc241282..a520615f4d8d 100644
--- a/arch/arm/boot/dts/versatile-ab.dts
+++ b/arch/arm/boot/dts/versatile-ab.dts
@@ -391,7 +391,7 @@
reg = <0x101f4000 0x1000>;
interrupts = <11>;
clocks = <&xtal24mhz>, <&pclk>;
- clock-names = "SSPCLK", "apb_pclk";
+ clock-names = "sspclk", "apb_pclk";
};
fpga {
diff --git a/arch/arm/configs/at91_dt_defconfig b/arch/arm/configs/at91_dt_defconfig
index da90ce9cd42e..e185eee18856 100644
--- a/arch/arm/configs/at91_dt_defconfig
+++ b/arch/arm/configs/at91_dt_defconfig
@@ -196,7 +196,6 @@ CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_DMADEVICES=y
CONFIG_AT_HDMAC=y
CONFIG_AT_XDMAC=y
-CONFIG_MICROCHIP_PIT64B=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_IIO=y
CONFIG_AT91_ADC=y
diff --git a/arch/arm/configs/sama7_defconfig b/arch/arm/configs/sama7_defconfig
index 0384030d8b25..b2311f004618 100644
--- a/arch/arm/configs/sama7_defconfig
+++ b/arch/arm/configs/sama7_defconfig
@@ -188,7 +188,6 @@ CONFIG_RTC_DRV_AT91SAM9=y
CONFIG_DMADEVICES=y
CONFIG_AT_XDMAC=y
CONFIG_STAGING=y
-CONFIG_MICROCHIP_PIT64B=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_IIO=y
CONFIG_IIO_SW_TRIGGER=y
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 034cb48c9eeb..fe28fc1f759d 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -70,7 +70,7 @@ static void __init init_irq_stacks(void)
}
}
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static void ____do_softirq(void *arg)
{
__do_softirq();
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index df6d673e83d5..f4501dea98b0 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -541,10 +541,42 @@ extern u32 at91_pm_suspend_in_sram_sz;
static int at91_suspend_finish(unsigned long val)
{
+ unsigned char modified_gray_code[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x06, 0x07, 0x04, 0x05, 0x0c, 0x0d,
+ 0x0e, 0x0f, 0x0a, 0x0b, 0x08, 0x09, 0x18, 0x19, 0x1a, 0x1b,
+ 0x1e, 0x1f, 0x1c, 0x1d, 0x14, 0x15, 0x16, 0x17, 0x12, 0x13,
+ 0x10, 0x11,
+ };
+ unsigned int tmp, index;
int i;
if (soc_pm.data.mode == AT91_PM_BACKUP && soc_pm.data.ramc_phy) {
/*
+ * Bootloader will perform DDR recalibration and will try to
+ * restore the ZQ0SR0 with the value saved here. But the
+ * calibration is buggy and restoring some values from ZQ0SR0
+ * is forbidden and risky thus we need to provide processed
+ * values for these (modified gray code values).
+ */
+ tmp = readl(soc_pm.data.ramc_phy + DDR3PHY_ZQ0SR0);
+
+ /* Store pull-down output impedance select. */
+ index = (tmp >> DDR3PHY_ZQ0SR0_PDO_OFF) & 0x1f;
+ soc_pm.bu->ddr_phy_calibration[0] = modified_gray_code[index];
+
+ /* Store pull-up output impedance select. */
+ index = (tmp >> DDR3PHY_ZQ0SR0_PUO_OFF) & 0x1f;
+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
+
+ /* Store pull-down on-die termination impedance select. */
+ index = (tmp >> DDR3PHY_ZQ0SR0_PDODT_OFF) & 0x1f;
+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
+
+ /* Store pull-up on-die termination impedance select. */
+ index = (tmp >> DDR3PHY_ZQ0SRO_PUODT_OFF) & 0x1f;
+ soc_pm.bu->ddr_phy_calibration[0] |= modified_gray_code[index];
+
+ /*
* The 1st 8 words of memory might get corrupted in the process
* of DDR PHY recalibration; it is saved here in securam and it
* will be restored later, after recalibration, by bootloader
@@ -1066,10 +1098,6 @@ static int __init at91_pm_backup_init(void)
of_scan_flat_dt(at91_pm_backup_scan_memcs, &located);
if (!located)
goto securam_fail;
-
- /* DDR3PHY_ZQ0SR0 */
- soc_pm.bu->ddr_phy_calibration[0] = readl(soc_pm.data.ramc_phy +
- 0x188);
}
return 0;
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index abe4ced33eda..ffed4d949042 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -172,9 +172,15 @@ sr_ena_2:
/* Put DDR PHY's DLL in bypass mode for non-backup modes. */
cmp r7, #AT91_PM_BACKUP
beq sr_ena_3
- ldr tmp1, [r3, #DDR3PHY_PIR]
- orr tmp1, tmp1, #DDR3PHY_PIR_DLLBYP
- str tmp1, [r3, #DDR3PHY_PIR]
+
+ /* Disable DX DLLs. */
+ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR]
+ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
+ str tmp1, [r3, #DDR3PHY_DX0DLLCR]
+
+ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR]
+ orr tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
+ str tmp1, [r3, #DDR3PHY_DX1DLLCR]
sr_ena_3:
/* Power down DDR PHY data receivers. */
@@ -221,10 +227,14 @@ sr_ena_3:
bic tmp1, tmp1, #DDR3PHY_DSGCR_ODTPDD_ODT0
str tmp1, [r3, #DDR3PHY_DSGCR]
- /* Take DDR PHY's DLL out of bypass mode. */
- ldr tmp1, [r3, #DDR3PHY_PIR]
- bic tmp1, tmp1, #DDR3PHY_PIR_DLLBYP
- str tmp1, [r3, #DDR3PHY_PIR]
+ /* Enable DX DLLs. */
+ ldr tmp1, [r3, #DDR3PHY_DX0DLLCR]
+ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
+ str tmp1, [r3, #DDR3PHY_DX0DLLCR]
+
+ ldr tmp1, [r3, #DDR3PHY_DX1DLLCR]
+ bic tmp1, tmp1, #DDR3PHY_DXDLLCR_DLLDIS
+ str tmp1, [r3, #DDR3PHY_DX1DLLCR]
/* Enable quasi-dynamic programming. */
mov tmp1, #0
diff --git a/arch/arm/mach-ixp4xx/ixp4xx-of.c b/arch/arm/mach-ixp4xx/ixp4xx-of.c
index f9904716ec7f..f543e2adae0c 100644
--- a/arch/arm/mach-ixp4xx/ixp4xx-of.c
+++ b/arch/arm/mach-ixp4xx/ixp4xx-of.c
@@ -46,7 +46,7 @@ static void __init ixp4xx_of_map_io(void)
}
/*
- * We handle 4 differen SoC families. These compatible strings are enough
+ * We handle 4 different SoC families. These compatible strings are enough
* to provide the core so that different boards can add their more detailed
* specifics.
*/
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9fb9fff08c94..1ce7685ad5de 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1887,6 +1887,8 @@ config ARM64_BTI_KERNEL
depends on CC_HAS_BRANCH_PROT_PAC_RET_BTI
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94697
depends on !CC_IS_GCC || GCC_VERSION >= 100100
+ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106671
+ depends on !CC_IS_GCC
# https://github.com/llvm/llvm-project/commit/a88c722e687e6780dcd6a58718350dc76fcc4cc9
depends on !CC_IS_CLANG || CLANG_VERSION >= 120000
depends on (!FUNCTION_GRAPH_TRACER || DYNAMIC_FTRACE_WITH_REGS)
diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi
index 8d0d45d168d1..2f27619d8abd 100644
--- a/arch/arm64/boot/dts/arm/juno-base.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-base.dtsi
@@ -26,7 +26,8 @@
compatible = "arm,mhu", "arm,primecell";
reg = <0x0 0x2b1f0000 0x0 0x1000>;
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
#mbox-cells = <1>;
clocks = <&soc_refclk100mhz>;
clock-names = "apb_pclk";
diff --git a/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi b/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi
index ba88d1596f6f..09d2b692e9e1 100644
--- a/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-cs-r1r2.dtsi
@@ -67,7 +67,6 @@
port@0 {
reg = <0>;
csys2_funnel_in_port0: endpoint {
- slave-mode;
remote-endpoint = <&etf0_out_port>;
};
};
@@ -75,7 +74,6 @@
port@1 {
reg = <1>;
csys2_funnel_in_port1: endpoint {
- slave-mode;
remote-endpoint = <&etf1_out_port>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts
index 40d34c8384a5..b949cac03742 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-qds-65bb.dts
@@ -25,7 +25,6 @@
&enetc_port0 {
phy-handle = <&slot1_sgmii>;
phy-mode = "2500base-x";
- managed = "in-band-status";
status = "okay";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
index 35fb929e7bcc..d3ee6fc4baab 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-venice-gw7901.dts
@@ -626,24 +626,28 @@
lan1: port@0 {
reg = <0>;
label = "lan1";
+ phy-mode = "internal";
local-mac-address = [00 00 00 00 00 00];
};
lan2: port@1 {
reg = <1>;
label = "lan2";
+ phy-mode = "internal";
local-mac-address = [00 00 00 00 00 00];
};
lan3: port@2 {
reg = <2>;
label = "lan3";
+ phy-mode = "internal";
local-mac-address = [00 00 00 00 00 00];
};
lan4: port@3 {
reg = <3>;
label = "lan4";
+ phy-mode = "internal";
local-mac-address = [00 00 00 00 00 00];
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
index d1b4582f44c4..b379c461aa13 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mm-verdin.dtsi
@@ -32,10 +32,10 @@
};
/* Fixed clock dedicated to SPI CAN controller */
- clk20m: oscillator {
+ clk40m: oscillator {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <20000000>;
+ clock-frequency = <40000000>;
};
gpio-keys {
@@ -202,8 +202,8 @@
can1: can@0 {
compatible = "microchip,mcp251xfd";
- clocks = <&clk20m>;
- interrupts-extended = <&gpio1 6 IRQ_TYPE_EDGE_FALLING>;
+ clocks = <&clk40m>;
+ interrupts-extended = <&gpio1 6 IRQ_TYPE_LEVEL_LOW>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_can1_int>;
reg = <0>;
@@ -603,7 +603,7 @@
pinctrl-0 = <&pinctrl_gpio_9_dsi>, <&pinctrl_i2s_2_bclk_touch_reset>;
reg = <0x4a>;
/* Verdin I2S_2_BCLK (TOUCH_RESET#, SODIMM 42) */
- reset-gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
status = "disabled";
};
@@ -745,6 +745,7 @@
};
&usbphynop2 {
+ power-domains = <&pgc_otg2>;
vcc-supply = <&reg_vdd_3v3>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
index a616eb378002..0f13ee362771 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-dhcom-som.dtsi
@@ -70,7 +70,7 @@
&ecspi1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_ecspi1>;
- cs-gpios = <&gpio5 9 GPIO_ACTIVE_LOW>;
+ cs-gpios = <&gpio5 17 GPIO_ACTIVE_LOW>;
status = "disabled";
};
@@ -403,8 +403,8 @@
pinctrl-names = "default", "gpio";
pinctrl-0 = <&pinctrl_i2c5>;
pinctrl-1 = <&pinctrl_i2c5_gpio>;
- scl-gpios = <&gpio5 26 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
- sda-gpios = <&gpio5 27 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ scl-gpios = <&gpio3 26 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+ sda-gpios = <&gpio3 27 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
status = "okay";
};
@@ -648,10 +648,10 @@
pinctrl_ecspi1: dhcom-ecspi1-grp {
fsl,pins = <
- MX8MP_IOMUXC_ECSPI1_SCLK__ECSPI1_SCLK 0x44
- MX8MP_IOMUXC_ECSPI1_MOSI__ECSPI1_MOSI 0x44
- MX8MP_IOMUXC_ECSPI1_MISO__ECSPI1_MISO 0x44
- MX8MP_IOMUXC_ECSPI1_SS0__GPIO5_IO09 0x40
+ MX8MP_IOMUXC_I2C1_SCL__ECSPI1_SCLK 0x44
+ MX8MP_IOMUXC_I2C1_SDA__ECSPI1_MOSI 0x44
+ MX8MP_IOMUXC_I2C2_SCL__ECSPI1_MISO 0x44
+ MX8MP_IOMUXC_I2C2_SDA__GPIO5_IO17 0x40
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
index 521215520a0f..6630ec561dc2 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
@@ -770,10 +770,10 @@
pinctrl_sai2: sai2grp {
fsl,pins = <
- MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC
- MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00
- MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK
- MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI2_MCLK
+ MX8MP_IOMUXC_SAI2_TXFS__AUDIOMIX_SAI2_TX_SYNC 0xd6
+ MX8MP_IOMUXC_SAI2_TXD0__AUDIOMIX_SAI2_TX_DATA00 0xd6
+ MX8MP_IOMUXC_SAI2_TXC__AUDIOMIX_SAI2_TX_BCLK 0xd6
+ MX8MP_IOMUXC_SAI2_MCLK__AUDIOMIX_SAI2_MCLK 0xd6
>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
index c5987bdbb383..1c74c6a19449 100644
--- a/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mp-verdin.dtsi
@@ -628,7 +628,7 @@
interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
reg = <0x4a>;
/* Verdin GPIO_2 (SODIMM 208) */
- reset-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
status = "disabled";
};
};
@@ -705,7 +705,7 @@
pinctrl-0 = <&pinctrl_gpio_9_dsi>, <&pinctrl_i2s_2_bclk_touch_reset>;
reg = <0x4a>;
/* Verdin I2S_2_BCLK (TOUCH_RESET#, SODIMM 42) */
- reset-gpios = <&gpio5 0 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
index 899e8e7dbc24..802ad6e5cef6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-tqma8mq.dtsi
@@ -204,7 +204,6 @@
reg = <0x51>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_rtc>;
- interrupt-names = "irq";
interrupt-parent = <&gpio1>;
interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
quartz-load-femtofarads = <7000>;
diff --git a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
index 7cbb0de060dd..1c15726cff8b 100644
--- a/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a779g0.dtsi
@@ -85,7 +85,7 @@
"renesas,rcar-gen4-hscif",
"renesas,hscif";
reg = <0 0xe6540000 0 96>;
- interrupts = <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&cpg CPG_MOD 514>,
<&cpg CPG_CORE R8A779G0_CLK_S0D3_PER>,
<&scif_clk>;
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 02e59fa8f293..32d14f481f0c 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -64,28 +64,28 @@
#define EARLY_KASLR (0)
#endif
-#define EARLY_ENTRIES(vstart, vend, shift) \
- ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR)
+#define EARLY_ENTRIES(vstart, vend, shift, add) \
+ ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add)
-#define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT))
+#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
#if SWAPPER_PGTABLE_LEVELS > 3
-#define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT))
+#define EARLY_PUDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT, add))
#else
-#define EARLY_PUDS(vstart, vend) (0)
+#define EARLY_PUDS(vstart, vend, add) (0)
#endif
#if SWAPPER_PGTABLE_LEVELS > 2
-#define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT))
+#define EARLY_PMDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT, add))
#else
-#define EARLY_PMDS(vstart, vend) (0)
+#define EARLY_PMDS(vstart, vend, add) (0)
#endif
-#define EARLY_PAGES(vstart, vend) ( 1 /* PGDIR page */ \
- + EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \
- + EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \
- + EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
-#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end))
+#define EARLY_PAGES(vstart, vend, add) ( 1 /* PGDIR page */ \
+ + EARLY_PGDS((vstart), (vend), add) /* each PGDIR needs a next level page table */ \
+ + EARLY_PUDS((vstart), (vend), add) /* each PUD needs a next level page table */ \
+ + EARLY_PMDS((vstart), (vend), add)) /* each PMD needs a next level page table */
+#define INIT_DIR_SIZE (PAGE_SIZE * EARLY_PAGES(KIMAGE_VADDR, _end, EARLY_KASLR))
/* the initial ID map may need two extra pages if it needs to be extended */
#if VA_BITS < 48
@@ -93,7 +93,7 @@
#else
#define INIT_IDMAP_DIR_SIZE (INIT_IDMAP_DIR_PAGES * PAGE_SIZE)
#endif
-#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE)
+#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
/* Initial memory map size */
#if ARM64_KERNEL_USES_PMD_MAPS
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index b5accf53a153..2196aad7b55b 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -371,7 +371,9 @@ SYM_FUNC_END(create_idmap)
SYM_FUNC_START_LOCAL(create_kernel_mapping)
adrp x0, init_pg_dir
mov_q x5, KIMAGE_VADDR // compile time __va(_text)
+#ifdef CONFIG_RELOCATABLE
add x5, x5, x23 // add KASLR displacement
+#endif
adrp x6, _end // runtime __pa(_end)
adrp x3, _text // runtime __pa(_text)
sub x6, x6, x3 // _end - _text
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
index 889951291cc0..a11a6e14ba89 100644
--- a/arch/arm64/kernel/machine_kexec_file.c
+++ b/arch/arm64/kernel/machine_kexec_file.c
@@ -47,7 +47,7 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
u64 i;
phys_addr_t start, end;
- nr_ranges = 1; /* for exclusion of crashkernel region */
+ nr_ranges = 2; /* for exclusion of crashkernel region */
for_each_mem_range(i, &start, &end)
nr_ranges++;
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index eb7c08dfb834..041d2ae5c30a 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1084,7 +1084,6 @@ static int za_set(struct task_struct *target,
if (!target->thread.sve_state) {
sve_alloc(target, false);
if (!target->thread.sve_state) {
- clear_thread_flag(TIF_SME);
ret = -ENOMEM;
goto out;
}
@@ -1094,7 +1093,6 @@ static int za_set(struct task_struct *target,
sme_alloc(target);
if (!target->thread.za_state) {
ret = -ENOMEM;
- clear_tsk_thread_flag(target, TIF_SME);
goto out;
}
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 617f78ad43a1..97c9de57725d 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -101,6 +101,9 @@ SYM_FUNC_END(__cpu_suspend_enter)
SYM_CODE_START(cpu_resume)
bl init_kernel_el
bl finalise_el2
+#if VA_BITS > 48
+ ldr_l x0, vabits_actual
+#endif
bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */
adrp x1, swapper_pg_dir
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c7fb2ad8be9f..94d33e296e10 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -666,7 +666,6 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
kvm_vcpu_halt(vcpu);
vcpu_clear_flag(vcpu, IN_WFIT);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
preempt_disable();
vgic_v4_load(vcpu);
@@ -2114,7 +2113,7 @@ static int finalize_hyp_mode(void)
* at, which would end badly once inaccessible.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
- kmemleak_free_part(__va(hyp_mem_base), hyp_mem_size);
+ kmemleak_free_part_phys(hyp_mem_base, hyp_mem_size);
return pkvm_drop_host_privileges();
}
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index c9a13e487187..34c5feed9dc1 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -92,9 +92,13 @@ static bool kvm_is_device_pfn(unsigned long pfn)
static void *stage2_memcache_zalloc_page(void *arg)
{
struct kvm_mmu_memory_cache *mc = arg;
+ void *virt;
/* Allocated with __GFP_ZERO, so no need to zero */
- return kvm_mmu_memory_cache_alloc(mc);
+ virt = kvm_mmu_memory_cache_alloc(mc);
+ if (virt)
+ kvm_account_pgtable_pages(virt, 1);
+ return virt;
}
static void *kvm_host_zalloc_pages_exact(size_t size)
@@ -102,6 +106,21 @@ static void *kvm_host_zalloc_pages_exact(size_t size)
return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
}
+static void *kvm_s2_zalloc_pages_exact(size_t size)
+{
+ void *virt = kvm_host_zalloc_pages_exact(size);
+
+ if (virt)
+ kvm_account_pgtable_pages(virt, (size >> PAGE_SHIFT));
+ return virt;
+}
+
+static void kvm_s2_free_pages_exact(void *virt, size_t size)
+{
+ kvm_account_pgtable_pages(virt, -(size >> PAGE_SHIFT));
+ free_pages_exact(virt, size);
+}
+
static void kvm_host_get_page(void *addr)
{
get_page(virt_to_page(addr));
@@ -112,6 +131,15 @@ static void kvm_host_put_page(void *addr)
put_page(virt_to_page(addr));
}
+static void kvm_s2_put_page(void *addr)
+{
+ struct page *p = virt_to_page(addr);
+ /* Dropping last refcount, the page will be freed */
+ if (page_count(p) == 1)
+ kvm_account_pgtable_pages(addr, -1);
+ put_page(p);
+}
+
static int kvm_host_page_count(void *addr)
{
return page_count(virt_to_page(addr));
@@ -625,10 +653,10 @@ static int get_user_mapping_size(struct kvm *kvm, u64 addr)
static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = {
.zalloc_page = stage2_memcache_zalloc_page,
- .zalloc_pages_exact = kvm_host_zalloc_pages_exact,
- .free_pages_exact = free_pages_exact,
+ .zalloc_pages_exact = kvm_s2_zalloc_pages_exact,
+ .free_pages_exact = kvm_s2_free_pages_exact,
.get_page = kvm_host_get_page,
- .put_page = kvm_host_put_page,
+ .put_page = kvm_s2_put_page,
.page_count = kvm_host_page_count,
.phys_to_virt = kvm_host_va,
.virt_to_phys = kvm_host_pa,
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 26aeb1408e56..c7dd6ad779af 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -39,6 +39,7 @@ config LOONGARCH
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
+ select ARCH_KEEP_MEMBLOCK
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
select ARCH_SPARSEMEM_ENABLE
@@ -51,6 +52,7 @@ config LOONGARCH
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+ select ARCH_WANT_LD_ORPHAN_WARN
select ARCH_WANTS_NO_INSTR
select BUILDTIME_TABLE_SORT
select COMMON_CLK
diff --git a/arch/loongarch/include/asm/acpi.h b/arch/loongarch/include/asm/acpi.h
index c5108213876c..17162f494b9b 100644
--- a/arch/loongarch/include/asm/acpi.h
+++ b/arch/loongarch/include/asm/acpi.h
@@ -15,7 +15,7 @@ extern int acpi_pci_disabled;
extern int acpi_noirq;
#define acpi_os_ioremap acpi_os_ioremap
-void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
static inline void disable_acpi(void)
{
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
index f1c928648a4a..335398482038 100644
--- a/arch/loongarch/kernel/acpi.c
+++ b/arch/loongarch/kernel/acpi.c
@@ -48,7 +48,7 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
early_memunmap(map, size);
}
-void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
if (!memblock_is_memory(phys))
return ioremap(phys, size);
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
index 7f4889df4a17..8f5b7986374b 100644
--- a/arch/loongarch/kernel/signal.c
+++ b/arch/loongarch/kernel/signal.c
@@ -529,11 +529,11 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
signal_setup_done(ret, ksig, 0);
}
-void arch_do_signal_or_restart(struct pt_regs *regs, bool has_signal)
+void arch_do_signal_or_restart(struct pt_regs *regs)
{
struct ksignal ksig;
- if (has_signal && get_signal(&ksig)) {
+ if (get_signal(&ksig)) {
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
return;
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
index 69c76f26c1c5..f4831df5d2f9 100644
--- a/arch/loongarch/kernel/vmlinux.lds.S
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -77,6 +77,8 @@ SECTIONS
PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT)
#endif
+ .rela.dyn : ALIGN(8) { *(.rela.dyn) *(.rela*) }
+
.init.bss : {
*(.init.bss)
}
diff --git a/arch/loongarch/lib/dump_tlb.c b/arch/loongarch/lib/dump_tlb.c
index cda2c6bc7f09..c2cc7ce343c9 100644
--- a/arch/loongarch/lib/dump_tlb.c
+++ b/arch/loongarch/lib/dump_tlb.c
@@ -18,11 +18,11 @@ void dump_tlb_regs(void)
{
const int field = 2 * sizeof(unsigned long);
- pr_info("Index : %0x\n", read_csr_tlbidx());
- pr_info("PageSize : %0x\n", read_csr_pagesize());
- pr_info("EntryHi : %0*llx\n", field, read_csr_entryhi());
- pr_info("EntryLo0 : %0*llx\n", field, read_csr_entrylo0());
- pr_info("EntryLo1 : %0*llx\n", field, read_csr_entrylo1());
+ pr_info("Index : 0x%0x\n", read_csr_tlbidx());
+ pr_info("PageSize : 0x%0x\n", read_csr_pagesize());
+ pr_info("EntryHi : 0x%0*llx\n", field, read_csr_entryhi());
+ pr_info("EntryLo0 : 0x%0*llx\n", field, read_csr_entrylo0());
+ pr_info("EntryLo1 : 0x%0*llx\n", field, read_csr_entrylo1());
}
static void dump_tlb(int first, int last)
@@ -33,8 +33,8 @@ static void dump_tlb(int first, int last)
unsigned int s_index, s_asid;
unsigned int pagesize, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
- int pwidth = 11;
- int vwidth = 11;
+ int pwidth = 16;
+ int vwidth = 16;
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
s_entryhi = read_csr_entryhi();
@@ -64,22 +64,22 @@ static void dump_tlb(int first, int last)
/*
* Only print entries in use
*/
- pr_info("Index: %2d pgsize=%x ", i, (1 << pagesize));
+ pr_info("Index: %4d pgsize=0x%x ", i, (1 << pagesize));
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
- pr_cont("va=%0*lx asid=%0*lx",
+ pr_cont("va=0x%0*lx asid=0x%0*lx",
vwidth, (entryhi & ~0x1fffUL), asidwidth, asid & asidmask);
/* NR/NX are in awkward places, so mask them off separately */
pa = entrylo0 & ~(ENTRYLO_NR | ENTRYLO_NX);
pa = pa & PAGE_MASK;
pr_cont("\n\t[");
- pr_cont("ri=%d xi=%d ",
+ pr_cont("nr=%d nx=%d ",
(entrylo0 & ENTRYLO_NR) ? 1 : 0,
(entrylo0 & ENTRYLO_NX) ? 1 : 0);
- pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
+ pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld] [",
pwidth, pa, c0,
(entrylo0 & ENTRYLO_D) ? 1 : 0,
(entrylo0 & ENTRYLO_V) ? 1 : 0,
@@ -88,10 +88,10 @@ static void dump_tlb(int first, int last)
/* NR/NX are in awkward places, so mask them off separately */
pa = entrylo1 & ~(ENTRYLO_NR | ENTRYLO_NX);
pa = pa & PAGE_MASK;
- pr_cont("ri=%d xi=%d ",
+ pr_cont("nr=%d nx=%d ",
(entrylo1 & ENTRYLO_NR) ? 1 : 0,
(entrylo1 & ENTRYLO_NX) ? 1 : 0);
- pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
+ pr_cont("pa=0x%0*llx c=%d d=%d v=%d g=%d plv=%lld]\n",
pwidth, pa, c1,
(entrylo1 & ENTRYLO_D) ? 1 : 0,
(entrylo1 & ENTRYLO_V) ? 1 : 0,
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
index 7094a68c9b83..0532ed5ba43d 100644
--- a/arch/loongarch/mm/init.c
+++ b/arch/loongarch/mm/init.c
@@ -131,18 +131,6 @@ int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
return ret;
}
-#ifdef CONFIG_NUMA
-int memory_add_physaddr_to_nid(u64 start)
-{
- int nid;
-
- nid = pa_to_nid(start);
- return nid;
-}
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-#endif
-
-#ifdef CONFIG_MEMORY_HOTREMOVE
void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
unsigned long start_pfn = start >> PAGE_SHIFT;
@@ -154,6 +142,13 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
page += vmem_altmap_offset(altmap);
__remove_pages(start_pfn, nr_pages, altmap);
}
+
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 start)
+{
+ return pa_to_nid(start);
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif
#endif
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ec21f8999249..25dd4c5a8ef5 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2669,7 +2669,6 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
bool
- select SPARSEMEM_STATIC if !SGI_IP27
config NUMA
bool "NUMA Support"
diff --git a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
index bf13e35871b2..aa7bbf8d0df5 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-cmd-queue.c
@@ -57,14 +57,11 @@ EXPORT_SYMBOL_GPL(__cvmx_cmd_queue_state_ptr);
static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
{
char *alloc_name = "cvmx_cmd_queues";
-#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
extern uint64_t octeon_reserve32_memory;
-#endif
if (likely(__cvmx_cmd_queue_state_ptr))
return CVMX_CMD_QUEUE_SUCCESS;
-#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
if (octeon_reserve32_memory)
__cvmx_cmd_queue_state_ptr =
cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
@@ -73,7 +70,6 @@ static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
(CONFIG_CAVIUM_RESERVE32 <<
20) - 1, 128, alloc_name);
else
-#endif
__cvmx_cmd_queue_state_ptr =
cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
128,
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 9cb9ed44bcaf..fd8043f6ff8a 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -127,6 +127,16 @@ static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
int irq, int line, int bit)
{
+ struct device_node *of_node;
+ int ret;
+
+ of_node = irq_domain_get_of_node(domain);
+ if (!of_node)
+ return -EINVAL;
+ ret = irq_alloc_desc_at(irq, of_node_to_nid(of_node));
+ if (ret < 0)
+ return ret;
+
return irq_domain_associate(domain, irq, line << 6 | bit);
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index cbd83205518d..e7f994393ae8 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -284,10 +284,8 @@ void octeon_crash_smp_send_stop(void)
#endif /* CONFIG_KEXEC */
-#ifdef CONFIG_CAVIUM_RESERVE32
uint64_t octeon_reserve32_memory;
EXPORT_SYMBOL(octeon_reserve32_memory);
-#endif
#ifdef CONFIG_KEXEC
/* crashkernel cmdline parameter is parsed _after_ memory setup
@@ -666,9 +664,6 @@ void __init prom_init(void)
int i;
u64 t;
int argc;
-#ifdef CONFIG_CAVIUM_RESERVE32
- int64_t addr = -1;
-#endif
/*
* The bootloader passes a pointer to the boot descriptor in
* $a3, this is available as fw_arg3.
@@ -783,7 +778,7 @@ void __init prom_init(void)
cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
cvmx_write_csr(CVMX_LED_EN, 1);
}
-#ifdef CONFIG_CAVIUM_RESERVE32
+
/*
* We need to temporarily allocate all memory in the reserve32
* region. This makes sure the kernel doesn't allocate this
@@ -794,14 +789,16 @@ void __init prom_init(void)
* Allocate memory for RESERVED32 aligned on 2MB boundary. This
* is in case we later use hugetlb entries with it.
*/
- addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
- 0, 0, 2 << 20,
- "CAVIUM_RESERVE32", 0);
- if (addr < 0)
- pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
- else
- octeon_reserve32_memory = addr;
-#endif
+ if (CONFIG_CAVIUM_RESERVE32) {
+ int64_t addr =
+ cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
+ 0, 0, 2 << 20,
+ "CAVIUM_RESERVE32", 0);
+ if (addr < 0)
+ pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
+ else
+ octeon_reserve32_memory = addr;
+ }
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
@@ -1079,7 +1076,6 @@ void __init plat_mem_setup(void)
cvmx_bootmem_unlock();
#endif /* CONFIG_CRASH_DUMP */
-#ifdef CONFIG_CAVIUM_RESERVE32
/*
* Now that we've allocated the kernel memory it is safe to
* free the reserved region. We free it here so that builtin
@@ -1087,7 +1083,6 @@ void __init plat_mem_setup(void)
*/
if (octeon_reserve32_memory)
cvmx_bootmem_free_named("CAVIUM_RESERVE32");
-#endif /* CONFIG_CAVIUM_RESERVE32 */
if (total == 0)
panic("Unable to allocate memory from "
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index b494d8d39290..edaec93a1a1f 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -955,13 +955,11 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
kvm_vcpu_halt(vcpu);
/*
- * We we are runnable, then definitely go off to user space to
+ * We are runnable, then definitely go off to user space to
* check if any I/O interrupts are pending.
*/
- if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ if (kvm_arch_vcpu_runnable(vcpu))
vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
- }
}
return EMULATE_DONE;
diff --git a/arch/mips/loongson32/ls1c/board.c b/arch/mips/loongson32/ls1c/board.c
index e9de6da0ce51..9dcfe9de55b0 100644
--- a/arch/mips/loongson32/ls1c/board.c
+++ b/arch/mips/loongson32/ls1c/board.c
@@ -15,7 +15,6 @@ static struct platform_device *ls1c_platform_devices[] __initdata = {
static int __init ls1c_platform_init(void)
{
ls1x_serial_set_uartclk(&ls1x_uart_pdev);
- ls1x_rtc_set_extclk(&ls1x_rtc_pdev);
return platform_add_devices(ls1c_platform_devices,
ARRAY_SIZE(ls1c_platform_devices));
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 9aede2447011..a98940e64243 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -224,8 +224,18 @@ config MLONGCALLS
Enabling this option will probably slow down your kernel.
config 64BIT
- def_bool "$(ARCH)" = "parisc64"
+ def_bool y if "$(ARCH)" = "parisc64"
+ bool "64-bit kernel" if "$(ARCH)" = "parisc"
depends on PA8X00
+ help
+ Enable this if you want to support 64bit kernel on PA-RISC platform.
+
+ At the moment, only people willing to use more than 2GB of RAM,
+ or having a 64bit-only capable PA-RISC machine should say Y here.
+
+ Since there is no 64bit userland on PA-RISC, there is no point to
+ enable this option otherwise. The 64bit kernel is significantly bigger
+ and slower than the 32bit one.
choice
prompt "Kernel page size"
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index fbb882cb8dbb..b05055f3ba4b 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -480,7 +480,7 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
*irq_stack_in_use = 1;
}
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
execute_on_irq_stack(__do_softirq, 0);
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 398e0b5e485f..ed6db13a1d7c 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -83,6 +83,8 @@ enum {
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
+ FW_FEATURE_NATIVE_POSSIBLE = 0,
+ FW_FEATURE_NATIVE_ALWAYS = 0,
FW_FEATURE_POSSIBLE =
#ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_POSSIBLE |
@@ -93,6 +95,9 @@ enum {
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_POSSIBLE |
#endif
+#ifdef CONFIG_PPC_HASH_MMU_NATIVE
+ FW_FEATURE_NATIVE_ALWAYS |
+#endif
0,
FW_FEATURE_ALWAYS =
#ifdef CONFIG_PPC_PSERIES
@@ -104,6 +109,9 @@ enum {
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_ALWAYS &
#endif
+#ifdef CONFIG_PPC_HASH_MMU_NATIVE
+ FW_FEATURE_NATIVE_ALWAYS &
+#endif
FW_FEATURE_POSSIBLE,
#else /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 26ede09c521d..983551859891 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -113,7 +113,14 @@ static inline void __hard_RI_enable(void)
static inline notrace unsigned long irq_soft_mask_return(void)
{
- return READ_ONCE(local_paca->irq_soft_mask);
+ unsigned long flags;
+
+ asm volatile(
+ "lbz %0,%1(13)"
+ : "=r" (flags)
+ : "i" (offsetof(struct paca_struct, irq_soft_mask)));
+
+ return flags;
}
/*
@@ -140,24 +147,46 @@ static inline notrace void irq_soft_mask_set(unsigned long mask)
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
WARN_ON(mask && !(mask & IRQS_DISABLED));
- WRITE_ONCE(local_paca->irq_soft_mask, mask);
- barrier();
+ asm volatile(
+ "stb %0,%1(13)"
+ :
+ : "r" (mask),
+ "i" (offsetof(struct paca_struct, irq_soft_mask))
+ : "memory");
}
static inline notrace unsigned long irq_soft_mask_set_return(unsigned long mask)
{
- unsigned long flags = irq_soft_mask_return();
+ unsigned long flags;
- irq_soft_mask_set(mask);
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ WARN_ON(mask && !(mask & IRQS_DISABLED));
+#endif
+
+ asm volatile(
+ "lbz %0,%1(13); stb %2,%1(13)"
+ : "=&r" (flags)
+ : "i" (offsetof(struct paca_struct, irq_soft_mask)),
+ "r" (mask)
+ : "memory");
return flags;
}
static inline notrace unsigned long irq_soft_mask_or_return(unsigned long mask)
{
- unsigned long flags = irq_soft_mask_return();
+ unsigned long flags, tmp;
+
+ asm volatile(
+ "lbz %0,%2(13); or %1,%0,%3; stb %1,%2(13)"
+ : "=&r" (flags), "=r" (tmp)
+ : "i" (offsetof(struct paca_struct, irq_soft_mask)),
+ "r" (mask)
+ : "memory");
- irq_soft_mask_set(flags | mask);
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ WARN_ON((mask | flags) && !((mask | flags) & IRQS_DISABLED));
+#endif
return flags;
}
@@ -282,7 +311,8 @@ static inline bool pmi_irq_pending(void)
flags = irq_soft_mask_set_return(IRQS_ALL_DISABLED); \
local_paca->irq_happened |= PACA_IRQ_HARD_DIS; \
if (!arch_irqs_disabled_flags(flags)) { \
- WRITE_ONCE(local_paca->saved_r1, current_stack_pointer);\
+ asm volatile("std%X0 %1,%0" : "=m" (local_paca->saved_r1) \
+ : "r" (current_stack_pointer)); \
trace_hardirqs_off(); \
} \
} while(0)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 0f17268c1f0b..9ede61a5a469 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -199,7 +199,7 @@ static inline void check_stack_overflow(unsigned long sp)
}
}
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static __always_inline void call_do_softirq(const void *sp)
{
/* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
@@ -335,7 +335,7 @@ void *mcheckirq_ctx[NR_CPUS] __read_mostly;
void *softirq_ctx[NR_CPUS] __read_mostly;
void *hardirq_ctx[NR_CPUS] __read_mostly;
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
call_do_softirq(softirq_ctx[smp_processor_id()]);
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 433965bf37b4..855b59892c5c 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -245,6 +245,15 @@ static int __init pcibios_init(void)
printk(KERN_INFO "PCI: Probing PCI hardware\n");
+#ifdef CONFIG_PPC_PCI_BUS_NUM_DOMAIN_DEPENDENT
+ /*
+ * Enable PCI domains in /proc when PCI bus numbers are not unique
+ * across all PCI domains to prevent conflicts. And keep PCI domain 0
+ * backward compatible in /proc for video cards.
+ */
+ pci_add_flags(PCI_ENABLE_PROC_DOMAINS | PCI_COMPAT_DOMAIN_0);
+#endif
+
if (pci_has_flag(PCI_REASSIGN_ALL_BUS))
pci_assign_all_buses = 1;
diff --git a/arch/powerpc/kernel/rtas_entry.S b/arch/powerpc/kernel/rtas_entry.S
index 9a434d42e660..6ce95ddadbcd 100644
--- a/arch/powerpc/kernel/rtas_entry.S
+++ b/arch/powerpc/kernel/rtas_entry.S
@@ -109,8 +109,12 @@ __enter_rtas:
* its critical regions (as specified in PAPR+ section 7.2.1). MSR[S]
* is not impacted by RFI_TO_KERNEL (only urfid can unset it). So if
* MSR[S] is set, it will remain when entering RTAS.
+ * If we're in HV mode, RTAS must also run in HV mode, so extract MSR_HV
+ * from the saved MSR value and insert into the value RTAS will use.
*/
+ extrdi r0, r6, 1, 63 - MSR_HV_LG
LOAD_REG_IMMEDIATE(r6, MSR_ME | MSR_RI)
+ insrdi r6, r0, 1, 63 - MSR_HV_LG
li r0,0
mtmsrd r0,1 /* disable RI before using SRR0/1 */
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index cb3358886203..6c1db3b6de2d 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -18,6 +18,7 @@
.p2align 3
#define __SYSCALL(nr, entry) .8byte entry
#else
+ .p2align 2
#define __SYSCALL(nr, entry) .long entry
#endif
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index d6abed6e51e6..9fc4dd8f66eb 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -499,7 +499,6 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
if (msr & MSR_POW) {
if (!vcpu->arch.pending_exceptions) {
kvm_vcpu_halt(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->stat.generic.halt_wakeup++;
/* Unset POW bit after we woke up */
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index a1f2978b2a86..b2c89e850d7a 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -393,7 +393,6 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_CEDE:
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
kvm_vcpu_halt(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->stat.generic.halt_wakeup++;
return EMULATE_DONE;
case H_LOGICAL_CI_LOAD:
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 06c5830a93f9..7b4920e9fd26 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -719,7 +719,6 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable();
kvm_vcpu_halt(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
hard_irq_disable();
kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index fb1490761c87..ec9c1e3c2ff4 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -239,7 +239,6 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
case EV_HCALL_TOKEN(EV_IDLE):
r = EV_SUCCESS;
kvm_vcpu_halt(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
break;
default:
r = EV_UNIMPLEMENTED;
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index 20f6ed813bff..2f8385523a13 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -124,9 +124,6 @@ struct papr_scm_priv {
/* The bits which needs to be overridden */
u64 health_bitmap_inject_mask;
-
- /* array to have event_code and stat_id mappings */
- u8 *nvdimm_events_map;
};
static int papr_scm_pmem_flush(struct nd_region *nd_region,
@@ -350,6 +347,25 @@ static ssize_t drc_pmem_query_stats(struct papr_scm_priv *p,
#ifdef CONFIG_PERF_EVENTS
#define to_nvdimm_pmu(_pmu) container_of(_pmu, struct nvdimm_pmu, pmu)
+static const char * const nvdimm_events_map[] = {
+ [1] = "CtlResCt",
+ [2] = "CtlResTm",
+ [3] = "PonSecs ",
+ [4] = "MemLife ",
+ [5] = "CritRscU",
+ [6] = "HostLCnt",
+ [7] = "HostSCnt",
+ [8] = "HostSDur",
+ [9] = "HostLDur",
+ [10] = "MedRCnt ",
+ [11] = "MedWCnt ",
+ [12] = "MedRDur ",
+ [13] = "MedWDur ",
+ [14] = "CchRHCnt",
+ [15] = "CchWHCnt",
+ [16] = "FastWCnt",
+};
+
static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev, u64 *count)
{
struct papr_scm_perf_stat *stat;
@@ -357,11 +373,15 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev,
struct papr_scm_priv *p = dev_get_drvdata(dev);
int rc, size;
+ /* Invalid eventcode */
+ if (event->attr.config == 0 || event->attr.config >= ARRAY_SIZE(nvdimm_events_map))
+ return -EINVAL;
+
/* Allocate request buffer enough to hold single performance stat */
size = sizeof(struct papr_scm_perf_stats) +
sizeof(struct papr_scm_perf_stat);
- if (!p || !p->nvdimm_events_map)
+ if (!p)
return -EINVAL;
stats = kzalloc(size, GFP_KERNEL);
@@ -370,7 +390,7 @@ static int papr_scm_pmu_get_value(struct perf_event *event, struct device *dev,
stat = &stats->scm_statistic[0];
memcpy(&stat->stat_id,
- &p->nvdimm_events_map[event->attr.config * sizeof(stat->stat_id)],
+ nvdimm_events_map[event->attr.config],
sizeof(stat->stat_id));
stat->stat_val = 0;
@@ -458,56 +478,6 @@ static void papr_scm_pmu_del(struct perf_event *event, int flags)
papr_scm_pmu_read(event);
}
-static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu *nd_pmu)
-{
- struct papr_scm_perf_stat *stat;
- struct papr_scm_perf_stats *stats;
- u32 available_events;
- int index, rc = 0;
-
- if (!p->stat_buffer_len)
- return -ENOENT;
-
- available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats))
- / sizeof(struct papr_scm_perf_stat);
- if (available_events == 0)
- return -EOPNOTSUPP;
-
- /* Allocate the buffer for phyp where stats are written */
- stats = kzalloc(p->stat_buffer_len, GFP_KERNEL);
- if (!stats) {
- rc = -ENOMEM;
- return rc;
- }
-
- /* Called to get list of events supported */
- rc = drc_pmem_query_stats(p, stats, 0);
- if (rc)
- goto out;
-
- /*
- * Allocate memory and populate nvdimm_event_map.
- * Allocate an extra element for NULL entry
- */
- p->nvdimm_events_map = kcalloc(available_events + 1,
- sizeof(stat->stat_id),
- GFP_KERNEL);
- if (!p->nvdimm_events_map) {
- rc = -ENOMEM;
- goto out;
- }
-
- /* Copy all stat_ids to event map */
- for (index = 0, stat = stats->scm_statistic;
- index < available_events; index++, ++stat) {
- memcpy(&p->nvdimm_events_map[index * sizeof(stat->stat_id)],
- &stat->stat_id, sizeof(stat->stat_id));
- }
-out:
- kfree(stats);
- return rc;
-}
-
static void papr_scm_pmu_register(struct papr_scm_priv *p)
{
struct nvdimm_pmu *nd_pmu;
@@ -519,9 +489,10 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
goto pmu_err_print;
}
- rc = papr_scm_pmu_check_events(p, nd_pmu);
- if (rc)
+ if (!p->stat_buffer_len) {
+ rc = -ENOENT;
goto pmu_check_events_err;
+ }
nd_pmu->pmu.task_ctx_nr = perf_invalid_context;
nd_pmu->pmu.name = nvdimm_name(p->nvdimm);
@@ -539,7 +510,7 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
rc = register_nvdimm_pmu(nd_pmu, p->pdev);
if (rc)
- goto pmu_register_err;
+ goto pmu_check_events_err;
/*
* Set archdata.priv value to nvdimm_pmu structure, to handle the
@@ -548,8 +519,6 @@ static void papr_scm_pmu_register(struct papr_scm_priv *p)
p->pdev->archdata.priv = nd_pmu;
return;
-pmu_register_err:
- kfree(p->nvdimm_events_map);
pmu_check_events_err:
kfree(nd_pmu);
pmu_err_print:
@@ -1560,7 +1529,6 @@ static int papr_scm_remove(struct platform_device *pdev)
unregister_nvdimm_pmu(pdev->archdata.priv);
pdev->archdata.priv = NULL;
- kfree(p->nvdimm_events_map);
kfree(p->bus_desc.provider_name);
kfree(p);
diff --git a/arch/powerpc/platforms/pseries/plpks.c b/arch/powerpc/platforms/pseries/plpks.c
index 52aaa2894606..f4b5b5a64db3 100644
--- a/arch/powerpc/platforms/pseries/plpks.c
+++ b/arch/powerpc/platforms/pseries/plpks.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <linux/types.h>
#include <asm/hvcall.h>
+#include <asm/machdep.h>
#include "plpks.h"
@@ -457,4 +458,4 @@ static __init int pseries_plpks_init(void)
return rc;
}
-arch_initcall(pseries_plpks_init);
+machine_arch_initcall(pseries, pseries_plpks_init);
diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi
index 74493344ea41..6d9d455fa160 100644
--- a/arch/riscv/boot/dts/microchip/mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi
@@ -185,7 +185,7 @@
ranges;
cctrllr: cache-controller@2010000 {
- compatible = "sifive,fu540-c000-ccache", "cache";
+ compatible = "microchip,mpfs-ccache", "sifive,fu540-c000-ccache", "cache";
reg = <0x0 0x2010000 0x0 0x1000>;
cache-block-size = <64>;
cache-level = <2>;
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
index 83d6d4d2b1df..26a446a34057 100644
--- a/arch/riscv/include/asm/kvm_vcpu_sbi.h
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -33,4 +33,16 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
u32 type, u64 flags);
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid);
+#ifdef CONFIG_RISCV_SBI_V01
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
+#endif
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
+
#endif /* __RISCV_KVM_VCPU_SBI_H__ */
diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c
index 7eb90a47b571..0bb52761a3f7 100644
--- a/arch/riscv/kvm/vcpu_insn.c
+++ b/arch/riscv/kvm/vcpu_insn.c
@@ -191,7 +191,6 @@ void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu)
kvm_vcpu_srcu_read_unlock(vcpu);
kvm_vcpu_halt(vcpu);
kvm_vcpu_srcu_read_lock(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
}
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index d45e7da3f0d3..f96991d230bf 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -32,23 +32,13 @@ static int kvm_linux_err_map_sbi(int err)
};
}
-#ifdef CONFIG_RISCV_SBI_V01
-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
-#else
+#ifndef CONFIG_RISCV_SBI_V01
static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
.extid_start = -1UL,
.extid_end = -1UL,
.handler = NULL,
};
#endif
-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
-extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
&vcpu_sbi_ext_v01,
diff --git a/arch/riscv/kvm/vcpu_timer.c b/arch/riscv/kvm/vcpu_timer.c
index 16f50c46ba39..185f2386a747 100644
--- a/arch/riscv/kvm/vcpu_timer.c
+++ b/arch/riscv/kvm/vcpu_timer.c
@@ -299,7 +299,6 @@ static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu_csr *csr;
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
kvm_riscv_vcpu_update_timedelta(vcpu);
@@ -307,7 +306,6 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
if (!t->sstc_enabled)
return;
- csr = &vcpu->arch.guest_csr;
#if defined(CONFIG_32BIT)
csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
@@ -324,13 +322,11 @@ void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu_csr *csr;
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
if (!t->sstc_enabled)
return;
- csr = &vcpu->arch.guest_csr;
t = &vcpu->arch.timer;
#if defined(CONFIG_32BIT)
t->next_cycles = csr_read(CSR_VSTIMECMP);
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index 5e49e4b4a4cc..86c56616e5de 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -118,10 +118,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
if (!numpages)
return 0;
- mmap_read_lock(&init_mm);
+ mmap_write_lock(&init_mm);
ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
&masks);
- mmap_read_unlock(&init_mm);
+ mmap_write_unlock(&init_mm);
flush_tlb_kernel_range(start, end);
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index f6dfde577ce8..2a827002934b 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -40,8 +40,6 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_USERFAULTFD=y
-# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
@@ -74,6 +72,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
@@ -93,6 +92,10 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m
+CONFIG_ZSWAP=y
+CONFIG_ZSMALLOC_STAT=y
+CONFIG_SLUB_STATS=y
+# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -102,14 +105,12 @@ CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
-CONFIG_ZSWAP=y
-CONFIG_ZSMALLOC=y
-CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y
CONFIG_GUP_TEST=y
CONFIG_ANON_VMA_NAME=y
+CONFIG_USERFAULTFD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -167,6 +168,7 @@ CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
@@ -493,7 +495,6 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_ASIX is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CADENCE is not set
# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
@@ -509,7 +510,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_WANGXUN is not set
# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
@@ -518,16 +519,18 @@ CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETERION is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
# CONFIG_NET_VENDOR_PENSANDO is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
@@ -535,9 +538,9 @@ CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@@ -570,6 +573,8 @@ CONFIG_VIRTIO_CONSOLE=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_RANDOM_TRUST_CPU is not set
+# CONFIG_RANDOM_TRUST_BOOTLOADER is not set
CONFIG_PPS=m
# CONFIG_PTP_1588_CLOCK is not set
# CONFIG_HWMON is not set
@@ -727,18 +732,26 @@ CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SHA3_256_S390=m
+CONFIG_CRYPTO_SHA3_512_S390=m
+CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_TI=m
+CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
@@ -746,11 +759,14 @@ CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m
@@ -766,16 +782,6 @@ CONFIG_CRYPTO_STATS=y
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
-CONFIG_CRYPTO_SHA1_S390=m
-CONFIG_CRYPTO_SHA256_S390=m
-CONFIG_CRYPTO_SHA512_S390=m
-CONFIG_CRYPTO_SHA3_256_S390=m
-CONFIG_CRYPTO_SHA3_512_S390=m
-CONFIG_CRYPTO_DES_S390=m
-CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_CHACHA_S390=m
-CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m
CONFIG_CRYPTO_LIB_CURVE25519=m
@@ -797,6 +803,7 @@ CONFIG_HEADERS_INSTALL=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SLUB_DEBUG_ON=y
CONFIG_PAGE_OWNER=y
CONFIG_DEBUG_RODATA_TEST=y
CONFIG_DEBUG_WX=y
@@ -808,8 +815,6 @@ CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_DEBUG_OBJECTS_WORK=y
CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
-CONFIG_SLUB_DEBUG_ON=y
-CONFIG_SLUB_STATS=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
CONFIG_DEBUG_VM_PGFLAGS=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 706df3a4a867..fb780e80e4c8 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -38,8 +38,6 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_USERFAULTFD=y
-# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
@@ -69,6 +67,7 @@ CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
@@ -88,6 +87,9 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BFQ_GROUP_IOSCHED=y
CONFIG_BINFMT_MISC=m
+CONFIG_ZSWAP=y
+CONFIG_ZSMALLOC_STAT=y
+# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -95,13 +97,11 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
-CONFIG_ZSWAP=y
-CONFIG_ZSMALLOC=y
-CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y
CONFIG_ANON_VMA_NAME=y
+CONFIG_USERFAULTFD=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -159,6 +159,7 @@ CONFIG_BRIDGE_NETFILTER=m
CONFIG_NETFILTER_NETLINK_HOOK=m
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_PROCFS=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
@@ -484,7 +485,6 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_ASIX is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CADENCE is not set
# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
@@ -500,7 +500,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_WANGXUN is not set
# CONFIG_NET_VENDOR_LITEX is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
@@ -509,16 +509,18 @@ CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NETERION is not set
# CONFIG_NET_VENDOR_NETRONOME is not set
-# CONFIG_NET_VENDOR_NI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
# CONFIG_NET_VENDOR_PENSANDO is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
@@ -526,9 +528,9 @@ CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_ROCKER is not set
# CONFIG_NET_VENDOR_SAMSUNG is not set
# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
# CONFIG_NET_VENDOR_STMICRO is not set
@@ -561,6 +563,8 @@ CONFIG_VIRTIO_CONSOLE=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_RANDOM_TRUST_CPU is not set
+# CONFIG_RANDOM_TRUST_BOOTLOADER is not set
# CONFIG_PTP_1588_CLOCK is not set
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
@@ -713,18 +717,26 @@ CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_HCTR2=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_BLAKE2S=m
+CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_MD4=m
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_SHA3_256_S390=m
+CONFIG_CRYPTO_SHA3_512_S390=m
+CONFIG_CRYPTO_SM3_GENERIC=m
CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_AES_TI=m
+CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
@@ -732,11 +744,14 @@ CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_DES=m
+CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_FCRYPT=m
CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_CHACHA_S390=m
CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_ARIA=m
CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_SM4_GENERIC=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m
@@ -752,16 +767,6 @@ CONFIG_CRYPTO_STATS=y
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
-CONFIG_CRYPTO_SHA1_S390=m
-CONFIG_CRYPTO_SHA256_S390=m
-CONFIG_CRYPTO_SHA512_S390=m
-CONFIG_CRYPTO_SHA3_256_S390=m
-CONFIG_CRYPTO_SHA3_512_S390=m
-CONFIG_CRYPTO_DES_S390=m
-CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_CHACHA_S390=m
-CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m
CONFIG_PRIME_NUMBERS=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index f4976f611b94..a5576b8d4081 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,4 +1,3 @@
-# CONFIG_SWAP is not set
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BPF_SYSCALL=y
@@ -9,7 +8,6 @@ CONFIG_BPF_SYSCALL=y
# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
-# CONFIG_COMPAT_BRK is not set
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
# CONFIG_COMPAT is not set
@@ -28,6 +26,8 @@ CONFIG_CRASH_DUMP=y
# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_SWAP is not set
+# CONFIG_COMPAT_BRK is not set
# CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set
CONFIG_NET=y
@@ -53,10 +53,12 @@ CONFIG_ZFCP=y
# CONFIG_HVC_IUCV is not set
# CONFIG_HW_RANDOM_S390 is not set
# CONFIG_HMC_DRV is not set
+# CONFIG_S390_UV_UAPI is not set
# CONFIG_S390_TAPE is not set
# CONFIG_VMCP is not set
# CONFIG_MONWRITER is not set
# CONFIG_S390_VMUR is not set
+# CONFIG_RANDOM_TRUST_BOOTLOADER is not set
# CONFIG_HID is not set
# CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index f22beda9e6d5..ccdbccfde148 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -28,9 +28,11 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
static inline int prepare_hugepage_range(struct file *file,
unsigned long addr, unsigned long len)
{
- if (len & ~HPAGE_MASK)
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
return -EINVAL;
- if (addr & ~HPAGE_MASK)
+ if (addr & ~huge_page_mask(h))
return -EINVAL;
return 0;
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index f39092e0ceaa..b1e98a9ed152 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1038,16 +1038,11 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
#define __KVM_HAVE_ARCH_VM_FREE
void kvm_arch_free_vm(struct kvm *kvm);
-#ifdef CONFIG_VFIO_PCI_ZDEV_KVM
-int kvm_s390_pci_register_kvm(struct zpci_dev *zdev, struct kvm *kvm);
-void kvm_s390_pci_unregister_kvm(struct zpci_dev *zdev);
-#else
-static inline int kvm_s390_pci_register_kvm(struct zpci_dev *dev,
- struct kvm *kvm)
-{
- return -EPERM;
-}
-static inline void kvm_s390_pci_unregister_kvm(struct zpci_dev *dev) {}
-#endif
+struct zpci_kvm_hook {
+ int (*kvm_register)(void *opaque, struct kvm *kvm);
+ void (*kvm_unregister)(void *opaque);
+};
+
+extern struct zpci_kvm_hook zpci_kvm_hook;
#endif
diff --git a/arch/s390/include/asm/softirq_stack.h b/arch/s390/include/asm/softirq_stack.h
index af68d6c1d584..1ac5115d3115 100644
--- a/arch/s390/include/asm/softirq_stack.h
+++ b/arch/s390/include/asm/softirq_stack.h
@@ -5,7 +5,7 @@
#include <asm/lowcore.h>
#include <asm/stacktrace.h>
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
static inline void do_softirq_own_stack(void)
{
call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 60ac66aab163..31cb9b00a36b 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -64,7 +64,7 @@ static inline unsigned long nmi_get_mcesa_size(void)
* structure. The structure is required for machine check happening
* early in the boot process.
*/
-static struct mcesa boot_mcesa __initdata __aligned(MCESA_MAX_SIZE);
+static struct mcesa boot_mcesa __aligned(MCESA_MAX_SIZE);
void __init nmi_alloc_mcesa_early(u64 *mcesad)
{
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ed4fbbbdd1b0..bbd4bde4f65d 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -479,6 +479,7 @@ static void __init setup_lowcore_dat_off(void)
put_abs_lowcore(restart_data, lc->restart_data);
put_abs_lowcore(restart_source, lc->restart_source);
put_abs_lowcore(restart_psw, lc->restart_psw);
+ put_abs_lowcore(mcesad, lc->mcesad);
mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
if (!mcck_stack)
@@ -507,8 +508,8 @@ static void __init setup_lowcore_dat_on(void)
S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
- __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
__ctl_set_bit(0, 28);
+ __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
put_abs_lowcore(restart_flags, RESTART_FLAG_CTLREGS);
put_abs_lowcore(program_new_psw, lc->program_new_psw);
for (cr = 0; cr < ARRAY_SIZE(lc->cregs_save_area); cr++)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 2e526f11b91e..5ea3830af0cc 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -131,6 +131,7 @@ SECTIONS
/*
* Table with the patch locations to undo expolines
*/
+ . = ALIGN(4);
.nospec_call_table : {
__nospec_call_start = . ;
*(.s390_indirect*)
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 082ec5f2c3a5..0243b6e38d36 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -489,6 +489,8 @@ enum prot_type {
PROT_TYPE_ALC = 2,
PROT_TYPE_DAT = 3,
PROT_TYPE_IEP = 4,
+ /* Dummy value for passing an initialized value when code != PGM_PROTECTION */
+ PROT_NONE,
};
static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar,
@@ -504,6 +506,10 @@ static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva,
switch (code) {
case PGM_PROTECTION:
switch (prot) {
+ case PROT_NONE:
+ /* We should never get here, acts like termination */
+ WARN_ON_ONCE(1);
+ break;
case PROT_TYPE_IEP:
tec->b61 = 1;
fallthrough;
@@ -968,8 +974,10 @@ static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
return rc;
} else {
gpa = kvm_s390_real_to_abs(vcpu, ga);
- if (kvm_is_error_gpa(vcpu->kvm, gpa))
+ if (kvm_is_error_gpa(vcpu->kvm, gpa)) {
rc = PGM_ADDRESSING;
+ prot = PROT_NONE;
+ }
}
if (rc)
return trans_exc(vcpu, rc, ga, ar, mode, prot);
@@ -1112,8 +1120,6 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
if (rc == PGM_PROTECTION && try_storage_prot_override)
rc = access_guest_page_with_key(vcpu->kvm, mode, gpas[idx],
data, fragment_len, PAGE_SPO_ACC);
- if (rc == PGM_PROTECTION)
- prot = PROT_TYPE_KEYC;
if (rc)
break;
len -= fragment_len;
@@ -1123,6 +1129,10 @@ int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
if (rc > 0) {
bool terminate = (mode == GACC_STORE) && (idx > 0);
+ if (rc == PGM_PROTECTION)
+ prot = PROT_TYPE_KEYC;
+ else
+ prot = PROT_NONE;
rc = trans_exc_ending(vcpu, rc, ga, ar, mode, prot, terminate);
}
out_unlock:
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index b9c944b262c7..ab569faf0df2 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -3324,7 +3324,7 @@ static void aen_host_forward(unsigned long si)
if (gaite->count == 0)
return;
if (gaite->aisb != 0)
- set_bit_inv(gaite->aisbo, (unsigned long *)gaite->aisb);
+ set_bit_inv(gaite->aisbo, phys_to_virt(gaite->aisb));
kvm = kvm_s390_pci_si_to_kvm(aift, si);
if (!kvm)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index edfd4bbd0cba..45d4b8182b07 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -505,7 +505,7 @@ int kvm_arch_init(void *opaque)
goto out;
}
- if (kvm_s390_pci_interp_allowed()) {
+ if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM)) {
rc = kvm_s390_pci_init();
if (rc) {
pr_err("Unable to allocate AIFT for PCI\n");
@@ -527,7 +527,7 @@ out:
void kvm_arch_exit(void)
{
kvm_s390_gib_destroy();
- if (kvm_s390_pci_interp_allowed())
+ if (IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM))
kvm_s390_pci_exit();
debug_unregister(kvm_s390_dbf);
debug_unregister(kvm_s390_dbf_uv);
@@ -4343,8 +4343,6 @@ retry:
goto retry;
}
- /* nothing to do, just clear the request */
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
/* we left the vsie handler, nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
diff --git a/arch/s390/kvm/pci.c b/arch/s390/kvm/pci.c
index 4946fb7757d6..c50c1645c0ae 100644
--- a/arch/s390/kvm/pci.c
+++ b/arch/s390/kvm/pci.c
@@ -58,7 +58,7 @@ static int zpci_setup_aipb(u8 nisc)
if (!zpci_aipb)
return -ENOMEM;
- aift->sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, 0);
+ aift->sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC, NULL);
if (!aift->sbv) {
rc = -ENOMEM;
goto free_aipb;
@@ -71,7 +71,7 @@ static int zpci_setup_aipb(u8 nisc)
rc = -ENOMEM;
goto free_sbv;
}
- aift->gait = (struct zpci_gaite *)page_to_phys(page);
+ aift->gait = (struct zpci_gaite *)page_to_virt(page);
zpci_aipb->aipb.faisb = virt_to_phys(aift->sbv->vector);
zpci_aipb->aipb.gait = virt_to_phys(aift->gait);
@@ -373,7 +373,7 @@ static int kvm_s390_pci_aif_disable(struct zpci_dev *zdev, bool force)
gaite->gisc = 0;
gaite->aisbo = 0;
gaite->gisa = 0;
- aift->kzdev[zdev->aisb] = 0;
+ aift->kzdev[zdev->aisb] = NULL;
/* Clear zdev info */
airq_iv_free_bit(aift->sbv, zdev->aisb);
airq_iv_release(zdev->aibv);
@@ -431,8 +431,9 @@ static void kvm_s390_pci_dev_release(struct zpci_dev *zdev)
* available, enable them and let userspace indicate whether or not they will
* be used (specify SHM bit to disable).
*/
-int kvm_s390_pci_register_kvm(struct zpci_dev *zdev, struct kvm *kvm)
+static int kvm_s390_pci_register_kvm(void *opaque, struct kvm *kvm)
{
+ struct zpci_dev *zdev = opaque;
int rc;
if (!zdev)
@@ -510,10 +511,10 @@ err:
kvm_put_kvm(kvm);
return rc;
}
-EXPORT_SYMBOL_GPL(kvm_s390_pci_register_kvm);
-void kvm_s390_pci_unregister_kvm(struct zpci_dev *zdev)
+static void kvm_s390_pci_unregister_kvm(void *opaque)
{
+ struct zpci_dev *zdev = opaque;
struct kvm *kvm;
if (!zdev)
@@ -566,7 +567,6 @@ out:
kvm_put_kvm(kvm);
}
-EXPORT_SYMBOL_GPL(kvm_s390_pci_unregister_kvm);
void kvm_s390_pci_init_list(struct kvm *kvm)
{
@@ -672,6 +672,12 @@ out:
int kvm_s390_pci_init(void)
{
+ zpci_kvm_hook.kvm_register = kvm_s390_pci_register_kvm;
+ zpci_kvm_hook.kvm_unregister = kvm_s390_pci_unregister_kvm;
+
+ if (!kvm_s390_pci_interp_allowed())
+ return 0;
+
aift = kzalloc(sizeof(struct zpci_aift), GFP_KERNEL);
if (!aift)
return -ENOMEM;
@@ -684,6 +690,12 @@ int kvm_s390_pci_init(void)
void kvm_s390_pci_exit(void)
{
+ zpci_kvm_hook.kvm_register = NULL;
+ zpci_kvm_hook.kvm_unregister = NULL;
+
+ if (!kvm_s390_pci_interp_allowed())
+ return;
+
mutex_destroy(&aift->aift_lock);
kfree(aift);
diff --git a/arch/s390/kvm/pci.h b/arch/s390/kvm/pci.h
index 3a3606c3a0fe..486d06ef563f 100644
--- a/arch/s390/kvm/pci.h
+++ b/arch/s390/kvm/pci.h
@@ -46,9 +46,9 @@ extern struct zpci_aift *aift;
static inline struct kvm *kvm_s390_pci_si_to_kvm(struct zpci_aift *aift,
unsigned long si)
{
- if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM) || aift->kzdev == 0 ||
- aift->kzdev[si] == 0)
- return 0;
+ if (!IS_ENABLED(CONFIG_VFIO_PCI_ZDEV_KVM) || !aift->kzdev ||
+ !aift->kzdev[si])
+ return NULL;
return aift->kzdev[si]->kvm;
};
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 09b6e756d521..9ab6ca6f7f59 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -421,8 +421,6 @@ retry:
if (unlikely(!(vma->vm_flags & access)))
goto out_up;
- if (is_vm_hugetlb_page(vma))
- address &= HPAGE_MASK;
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index bf557a1b789c..5ae31ca9dd44 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -5,5 +5,5 @@
obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
- pci_bus.o
+ pci_bus.o pci_kvm_hook.o
obj-$(CONFIG_PCI_IOV) += pci_iov.o
diff --git a/arch/s390/pci/pci_kvm_hook.c b/arch/s390/pci/pci_kvm_hook.c
new file mode 100644
index 000000000000..ff34baf50a3e
--- /dev/null
+++ b/arch/s390/pci/pci_kvm_hook.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VFIO ZPCI devices support
+ *
+ * Copyright (C) IBM Corp. 2022. All rights reserved.
+ * Author(s): Pierre Morel <pmorel@linux.ibm.com>
+ */
+#include <linux/kvm_host.h>
+
+struct zpci_kvm_hook zpci_kvm_hook;
+EXPORT_SYMBOL_GPL(zpci_kvm_hook);
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c
index 909276738078..4e6835de54cf 100644
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
@@ -149,7 +149,7 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL;
}
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
struct thread_info *curctx;
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index 41fa1be980a3..72da2e10e255 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -855,7 +855,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs)
set_irq_regs(old_regs);
}
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index cb98a05ee743..c601939a74b1 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4052,8 +4052,9 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr, void *data)
/* Disable guest PEBS if host PEBS is enabled. */
arr[pebs_enable].guest = 0;
} else {
- /* Disable guest PEBS for cross-mapped PEBS counters. */
+ /* Disable guest PEBS thoroughly for cross-mapped PEBS counters. */
arr[pebs_enable].guest &= ~kvm_pmu->host_cross_mapped_mask;
+ arr[global_ctrl].guest &= ~kvm_pmu->host_cross_mapped_mask;
/* Set hw GLOBAL_CTRL bits for PEBS counter when it runs for guest */
arr[global_ctrl].guest |= arr[pebs_enable].guest;
}
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 0a9407dc0859..3089ec352743 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -138,6 +138,9 @@
#define HV_X64_NESTED_GUEST_MAPPING_FLUSH BIT(18)
#define HV_X64_NESTED_MSR_BITMAP BIT(19)
+/* Nested features #2. These are HYPERV_CPUID_NESTED_FEATURES.EBX bits. */
+#define HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL BIT(0)
+
/*
* This is specific to AMD and specifies that enlightened TLB flush is
* supported. If guest opts in to this feature, ASID invalidations only
@@ -546,7 +549,7 @@ struct hv_enlightened_vmcs {
u64 guest_rip;
u32 hv_clean_fields;
- u32 hv_padding_32;
+ u32 padding32_1;
u32 hv_synthetic_controls;
struct {
u32 nested_flush_hypercall:1;
@@ -554,14 +557,25 @@ struct hv_enlightened_vmcs {
u32 reserved:30;
} __packed hv_enlightenments_control;
u32 hv_vp_id;
-
+ u32 padding32_2;
u64 hv_vm_id;
u64 partition_assist_page;
u64 padding64_4[4];
u64 guest_bndcfgs;
- u64 padding64_5[7];
+ u64 guest_ia32_perf_global_ctrl;
+ u64 guest_ia32_s_cet;
+ u64 guest_ssp;
+ u64 guest_ia32_int_ssp_table_addr;
+ u64 guest_ia32_lbr_ctl;
+ u64 padding64_5[2];
u64 xss_exit_bitmap;
- u64 padding64_6[7];
+ u64 encls_exiting_bitmap;
+ u64 host_ia32_perf_global_ctrl;
+ u64 tsc_multiplier;
+ u64 host_ia32_s_cet;
+ u64 host_ssp;
+ u64 host_ia32_int_ssp_table_addr;
+ u64 padding64_6;
} __packed;
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0
diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h
index 63f818aedf77..147cb8fdda92 100644
--- a/arch/x86/include/asm/irq_stack.h
+++ b/arch/x86/include/asm/irq_stack.h
@@ -203,7 +203,7 @@
IRQ_CONSTRAINTS, regs, vector); \
}
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
/*
* Macro to invoke __do_softirq on the irq stack. This is only called from
* task context when bottom halves are about to be reenabled and soft
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 51f777071584..82ba4a564e58 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -67,7 +67,7 @@ KVM_X86_OP(get_interrupt_shadow)
KVM_X86_OP(patch_hypercall)
KVM_X86_OP(inject_irq)
KVM_X86_OP(inject_nmi)
-KVM_X86_OP(queue_exception)
+KVM_X86_OP(inject_exception)
KVM_X86_OP(cancel_injection)
KVM_X86_OP(interrupt_allowed)
KVM_X86_OP(nmi_allowed)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 2c96c43c313a..7551b6f9c31c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -615,6 +615,8 @@ struct kvm_vcpu_hv {
u32 enlightenments_eax; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EAX */
u32 enlightenments_ebx; /* HYPERV_CPUID_ENLIGHTMENT_INFO.EBX */
u32 syndbg_cap_eax; /* HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX */
+ u32 nested_eax; /* HYPERV_CPUID_NESTED_FEATURES.EAX */
+ u32 nested_ebx; /* HYPERV_CPUID_NESTED_FEATURES.EBX */
} cpuid_cache;
};
@@ -639,6 +641,16 @@ struct kvm_vcpu_xen {
struct timer_list poll_timer;
};
+struct kvm_queued_exception {
+ bool pending;
+ bool injected;
+ bool has_error_code;
+ u8 vector;
+ u32 error_code;
+ unsigned long payload;
+ bool has_payload;
+};
+
struct kvm_vcpu_arch {
/*
* rip and regs accesses must go through
@@ -729,6 +741,7 @@ struct kvm_vcpu_arch {
struct fpu_guest guest_fpu;
u64 xcr0;
+ u64 guest_supported_xcr0;
struct kvm_pio_request pio;
void *pio_data;
@@ -737,16 +750,12 @@ struct kvm_vcpu_arch {
u8 event_exit_inst_len;
- struct kvm_queued_exception {
- bool pending;
- bool injected;
- bool has_error_code;
- u8 nr;
- u32 error_code;
- unsigned long payload;
- bool has_payload;
- u8 nested_apf;
- } exception;
+ bool exception_from_userspace;
+
+ /* Exceptions to be injected to the guest. */
+ struct kvm_queued_exception exception;
+ /* Exception VM-Exits to be synthesized to L1. */
+ struct kvm_queued_exception exception_vmexit;
struct kvm_queued_interrupt {
bool injected;
@@ -857,7 +866,6 @@ struct kvm_vcpu_arch {
u32 id;
bool send_user_only;
u32 host_apf_flags;
- unsigned long nested_apf_token;
bool delivery_as_pf_vmexit;
bool pageready_pending;
} apf;
@@ -1272,8 +1280,8 @@ struct kvm_arch {
bool tdp_mmu_enabled;
/*
- * List of struct kvm_mmu_pages being used as roots.
- * All struct kvm_mmu_pages in the list should have
+ * List of kvm_mmu_page structs being used as roots.
+ * All kvm_mmu_page structs in the list should have
* tdp_mmu_page set.
*
* For reads, this list is protected by:
@@ -1292,8 +1300,8 @@ struct kvm_arch {
struct list_head tdp_mmu_roots;
/*
- * List of struct kvmp_mmu_pages not being used as roots.
- * All struct kvm_mmu_pages in the list should have
+ * List of kvm_mmu_page structs not being used as roots.
+ * All kvm_mmu_page structs in the list should have
* tdp_mmu_page set and a tdp_mmu_root_count of 0.
*/
struct list_head tdp_mmu_pages;
@@ -1303,9 +1311,9 @@ struct kvm_arch {
* is held in read mode:
* - tdp_mmu_roots (above)
* - tdp_mmu_pages (above)
- * - the link field of struct kvm_mmu_pages used by the TDP MMU
+ * - the link field of kvm_mmu_page structs used by the TDP MMU
* - lpage_disallowed_mmu_pages
- * - the lpage_disallowed_link field of struct kvm_mmu_pages used
+ * - the lpage_disallowed_link field of kvm_mmu_page structs used
* by the TDP MMU
* It is acceptable, but not necessary, to acquire this lock when
* the thread holds the MMU lock in write mode.
@@ -1523,7 +1531,7 @@ struct kvm_x86_ops {
unsigned char *hypercall_addr);
void (*inject_irq)(struct kvm_vcpu *vcpu, bool reinjected);
void (*inject_nmi)(struct kvm_vcpu *vcpu);
- void (*queue_exception)(struct kvm_vcpu *vcpu);
+ void (*inject_exception)(struct kvm_vcpu *vcpu);
void (*cancel_injection)(struct kvm_vcpu *vcpu);
int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
@@ -1633,10 +1641,10 @@ struct kvm_x86_ops {
struct kvm_x86_nested_ops {
void (*leave_nested)(struct kvm_vcpu *vcpu);
+ bool (*is_exception_vmexit)(struct kvm_vcpu *vcpu, u8 vector,
+ u32 error_code);
int (*check_events)(struct kvm_vcpu *vcpu);
- bool (*handle_page_fault_workaround)(struct kvm_vcpu *vcpu,
- struct x86_exception *fault);
- bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
+ bool (*has_events)(struct kvm_vcpu *vcpu);
void (*triple_fault)(struct kvm_vcpu *vcpu);
int (*get_state)(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
@@ -1862,7 +1870,7 @@ void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long pay
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
-bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
+void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl);
bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index c371ef695fcc..498dc600bd5c 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -309,7 +309,7 @@ enum vmcs_field {
GUEST_LDTR_AR_BYTES = 0x00004820,
GUEST_TR_AR_BYTES = 0x00004822,
GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
- GUEST_ACTIVITY_STATE = 0X00004826,
+ GUEST_ACTIVITY_STATE = 0x00004826,
GUEST_SYSENTER_CS = 0x0000482A,
VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
HOST_IA32_SYSENTER_CS = 0x00004c00,
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index e5dd6da78713..01833ebf5e8e 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -132,7 +132,7 @@ int irq_init_percpu_irqstack(unsigned int cpu)
return 0;
}
-#ifndef CONFIG_PREEMPT_RT
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void)
{
struct irq_stack *irqstk;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 75dcf7a72605..7065462378e2 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -311,11 +311,19 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
+static bool kvm_cpuid_has_hyperv(struct kvm_cpuid_entry2 *entries, int nent)
+{
+ struct kvm_cpuid_entry2 *entry;
+
+ entry = cpuid_entry2_find(entries, nent, HYPERV_CPUID_INTERFACE,
+ KVM_CPUID_INDEX_NOT_SIGNIFICANT);
+ return entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX;
+}
+
static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *best;
- u64 guest_supported_xcr0;
best = kvm_find_cpuid_entry(vcpu, 1);
if (best && apic) {
@@ -327,10 +335,16 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_apic_set_version(vcpu);
}
- guest_supported_xcr0 =
+ vcpu->arch.guest_supported_xcr0 =
cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
- vcpu->arch.guest_fpu.fpstate->user_xfeatures = guest_supported_xcr0;
+ /*
+ * FP+SSE can always be saved/restored via KVM_{G,S}ET_XSAVE, even if
+ * XSAVE/XCRO are not exposed to the guest, and even if XSAVE isn't
+ * supported by the host.
+ */
+ vcpu->arch.guest_fpu.fpstate->user_xfeatures = vcpu->arch.guest_supported_xcr0 |
+ XFEATURE_MASK_FPSSE;
kvm_update_pv_runtime(vcpu);
@@ -341,7 +355,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.cr4_guest_rsvd_bits =
__cr4_reserved_bits(guest_cpuid_has, vcpu);
- kvm_hv_set_cpuid(vcpu);
+ kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
+ vcpu->arch.cpuid_nent));
/* Invoke the vendor callback only after the above state is updated. */
static_call(kvm_x86_vcpu_after_set_cpuid)(vcpu);
@@ -404,6 +419,12 @@ static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
return 0;
}
+ if (kvm_cpuid_has_hyperv(e2, nent)) {
+ r = kvm_hv_vcpu_init(vcpu);
+ if (r)
+ return r;
+ }
+
r = kvm_check_cpuid(vcpu, e2, nent);
if (r)
return r;
@@ -897,8 +918,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->edx = 0;
}
break;
- case 9:
- break;
case 0xa: { /* Architectural Performance Monitoring */
union cpuid10_eax eax;
union cpuid10_edx edx;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d5ec3a2ed5a4..3b27622d4642 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1137,9 +1137,11 @@ static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
- unsigned reg = ctxt->modrm_reg;
+ unsigned int reg;
- if (!(ctxt->d & ModRM))
+ if (ctxt->d & ModRM)
+ reg = ctxt->modrm_reg;
+ else
reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
if (ctxt->d & Sse) {
@@ -1953,7 +1955,7 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- if (ctxt->modrm_reg == VCPU_SREG_SS)
+ if (seg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
if (ctxt->op_bytes > 2)
rsp_increment(ctxt, ctxt->op_bytes - 2);
@@ -3645,13 +3647,10 @@ static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
r = ctxt->ops->set_msr_with_filter(ctxt, msr_index, msr_data);
- if (r == X86EMUL_IO_NEEDED)
- return r;
-
- if (r > 0)
+ if (r == X86EMUL_PROPAGATE_FAULT)
return emulate_gp(ctxt, 0);
- return r < 0 ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
+ return r;
}
static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
@@ -3662,15 +3661,14 @@ static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
r = ctxt->ops->get_msr_with_filter(ctxt, msr_index, &msr_data);
- if (r == X86EMUL_IO_NEEDED)
- return r;
-
- if (r)
+ if (r == X86EMUL_PROPAGATE_FAULT)
return emulate_gp(ctxt, 0);
- *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
- *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
- return X86EMUL_CONTINUE;
+ if (r == X86EMUL_CONTINUE) {
+ *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
+ *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
+ }
+ return r;
}
static int em_store_sreg(struct x86_emulate_ctxt *ctxt, int segment)
@@ -4132,6 +4130,9 @@ static int em_xsetbv(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ecx, edx;
+ if (!(ctxt->ops->get_cr(ctxt, 4) & X86_CR4_OSXSAVE))
+ return emulate_ud(ctxt);
+
eax = reg_read(ctxt, VCPU_REGS_RAX);
edx = reg_read(ctxt, VCPU_REGS_RDX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);
@@ -4168,8 +4169,7 @@ static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
ctxt->ops->get_dr(ctxt, 7, &dr7);
- /* Check if DR7.Global_Enable is set */
- return dr7 & (1 << 13);
+ return dr7 & DR7_GD;
}
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index ed804447589c..0adf4a437e85 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -38,9 +38,6 @@
#include "irq.h"
#include "fpu.h"
-/* "Hv#1" signature */
-#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
-
#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
@@ -934,11 +931,14 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
stimer_prepare_msg(stimer);
}
-static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
+int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu_hv *hv_vcpu;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
int i;
+ if (hv_vcpu)
+ return 0;
+
hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
if (!hv_vcpu)
return -ENOMEM;
@@ -962,11 +962,9 @@ int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
struct kvm_vcpu_hv_synic *synic;
int r;
- if (!to_hv_vcpu(vcpu)) {
- r = kvm_hv_vcpu_init(vcpu);
- if (r)
- return r;
- }
+ r = kvm_hv_vcpu_init(vcpu);
+ if (r)
+ return r;
synic = to_hv_synic(vcpu);
@@ -1660,10 +1658,8 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
if (!host && !vcpu->arch.hyperv_enabled)
return 1;
- if (!to_hv_vcpu(vcpu)) {
- if (kvm_hv_vcpu_init(vcpu))
- return 1;
- }
+ if (kvm_hv_vcpu_init(vcpu))
+ return 1;
if (kvm_hv_msr_partition_wide(msr)) {
int r;
@@ -1683,10 +1679,8 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
if (!host && !vcpu->arch.hyperv_enabled)
return 1;
- if (!to_hv_vcpu(vcpu)) {
- if (kvm_hv_vcpu_init(vcpu))
- return 1;
- }
+ if (kvm_hv_vcpu_init(vcpu))
+ return 1;
if (kvm_hv_msr_partition_wide(msr)) {
int r;
@@ -1987,49 +1981,49 @@ ret_success:
return HV_STATUS_SUCCESS;
}
-void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
+void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
{
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_cpuid_entry2 *entry;
- struct kvm_vcpu_hv *hv_vcpu;
- entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE);
- if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
- vcpu->arch.hyperv_enabled = true;
- } else {
- vcpu->arch.hyperv_enabled = false;
+ vcpu->arch.hyperv_enabled = hyperv_enabled;
+
+ if (!hv_vcpu) {
+ /*
+ * KVM should have already allocated kvm_vcpu_hv if Hyper-V is
+ * enabled in CPUID.
+ */
+ WARN_ON_ONCE(vcpu->arch.hyperv_enabled);
return;
}
- if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
- return;
+ memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache));
- hv_vcpu = to_hv_vcpu(vcpu);
+ if (!vcpu->arch.hyperv_enabled)
+ return;
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
if (entry) {
hv_vcpu->cpuid_cache.features_eax = entry->eax;
hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
hv_vcpu->cpuid_cache.features_edx = entry->edx;
- } else {
- hv_vcpu->cpuid_cache.features_eax = 0;
- hv_vcpu->cpuid_cache.features_ebx = 0;
- hv_vcpu->cpuid_cache.features_edx = 0;
}
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
if (entry) {
hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
- } else {
- hv_vcpu->cpuid_cache.enlightenments_eax = 0;
- hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
}
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
if (entry)
hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
- else
- hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_NESTED_FEATURES);
+ if (entry) {
+ hv_vcpu->cpuid_cache.nested_eax = entry->eax;
+ hv_vcpu->cpuid_cache.nested_ebx = entry->ebx;
+ }
}
int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
@@ -2552,7 +2546,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
case HYPERV_CPUID_NESTED_FEATURES:
ent->eax = evmcs_ver;
ent->eax |= HV_X64_NESTED_MSR_BITMAP;
-
+ ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
break;
case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index da2737f2a956..1030b1b50552 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -23,6 +23,9 @@
#include <linux/kvm_host.h>
+/* "Hv#1" signature */
+#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
+
/*
* The #defines related to the synthetic debugger are required by KDNet, but
* they are not documented in the Hyper-V TLFS because the synthetic debugger
@@ -141,7 +144,8 @@ void kvm_hv_request_tsc_page_update(struct kvm *kvm);
void kvm_hv_init_vm(struct kvm *kvm);
void kvm_hv_destroy_vm(struct kvm *kvm);
-void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu);
+int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
+void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled);
int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce);
int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9dda989a1cf0..d7639d126e6c 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -3025,17 +3025,8 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
struct kvm_lapic *apic = vcpu->arch.apic;
u8 sipi_vector;
int r;
- unsigned long pe;
- if (!lapic_in_kernel(vcpu))
- return 0;
-
- /*
- * Read pending events before calling the check_events
- * callback.
- */
- pe = smp_load_acquire(&apic->pending_events);
- if (!pe)
+ if (!kvm_apic_has_pending_init_or_sipi(vcpu))
return 0;
if (is_guest_mode(vcpu)) {
@@ -3043,38 +3034,31 @@ int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
if (r < 0)
return r == -EBUSY ? 0 : r;
/*
- * If an event has happened and caused a vmexit,
- * we know INITs are latched and therefore
- * we will not incorrectly deliver an APIC
- * event instead of a vmexit.
+ * Continue processing INIT/SIPI even if a nested VM-Exit
+ * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
+ * are blocked as a result of transitioning to VMX root mode.
*/
}
/*
- * INITs are latched while CPU is in specific states
- * (SMM, VMX root mode, SVM with GIF=0).
- * Because a CPU cannot be in these states immediately
- * after it has processed an INIT signal (and thus in
- * KVM_MP_STATE_INIT_RECEIVED state), just eat SIPIs
- * and leave the INIT pending.
+ * INITs are blocked while CPU is in specific states (SMM, VMX root
+ * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
+ * wait-for-SIPI (WFS).
*/
- if (kvm_vcpu_latch_init(vcpu)) {
+ if (!kvm_apic_init_sipi_allowed(vcpu)) {
WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
- if (test_bit(KVM_APIC_SIPI, &pe))
- clear_bit(KVM_APIC_SIPI, &apic->pending_events);
+ clear_bit(KVM_APIC_SIPI, &apic->pending_events);
return 0;
}
- if (test_bit(KVM_APIC_INIT, &pe)) {
- clear_bit(KVM_APIC_INIT, &apic->pending_events);
+ if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
kvm_vcpu_reset(vcpu, true);
if (kvm_vcpu_is_bsp(apic->vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
}
- if (test_bit(KVM_APIC_SIPI, &pe)) {
- clear_bit(KVM_APIC_SIPI, &apic->pending_events);
+ if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
/* evaluate pending_events before reading the vector */
smp_rmb();
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 117a46df5cc1..a5ac4a5a5179 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -7,6 +7,7 @@
#include <linux/kvm_host.h>
#include "hyperv.h"
+#include "kvm_cache_regs.h"
#define KVM_APIC_INIT 0
#define KVM_APIC_SIPI 1
@@ -223,11 +224,17 @@ static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
return lapic_in_kernel(vcpu) && vcpu->arch.apic->apicv_active;
}
-static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
+static inline bool kvm_apic_has_pending_init_or_sipi(struct kvm_vcpu *vcpu)
{
return lapic_in_kernel(vcpu) && vcpu->arch.apic->pending_events;
}
+static inline bool kvm_apic_init_sipi_allowed(struct kvm_vcpu *vcpu)
+{
+ return !is_smm(vcpu) &&
+ !static_call(kvm_x86_apic_init_signal_blocked)(vcpu);
+}
+
static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
{
return (irq->delivery_mode == APIC_DM_LOWEST ||
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 126fa9aec64c..6f81539061d6 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1596,6 +1596,8 @@ static void __rmap_add(struct kvm *kvm,
rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
rmap_count = pte_list_add(cache, spte, rmap_head);
+ if (rmap_count > kvm->stat.max_mmu_rmap_size)
+ kvm->stat.max_mmu_rmap_size = rmap_count;
if (rmap_count > RMAP_RECYCLE_THRESHOLD) {
kvm_zap_all_rmap_sptes(kvm, rmap_head);
kvm_flush_remote_tlbs_with_address(
@@ -1665,6 +1667,18 @@ static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
percpu_counter_add(&kvm_total_used_mmu_pages, nr);
}
+static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ kvm_mod_used_mmu_pages(kvm, +1);
+ kvm_account_pgtable_pages((void *)sp->spt, +1);
+}
+
+static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ kvm_mod_used_mmu_pages(kvm, -1);
+ kvm_account_pgtable_pages((void *)sp->spt, -1);
+}
+
static void kvm_mmu_free_shadow_page(struct kvm_mmu_page *sp)
{
MMU_WARN_ON(!is_empty_shadow_page(sp->spt));
@@ -2122,7 +2136,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm,
*/
sp->mmu_valid_gen = kvm->arch.mmu_valid_gen;
list_add(&sp->link, &kvm->arch.active_mmu_pages);
- kvm_mod_used_mmu_pages(kvm, +1);
+ kvm_account_mmu_page(kvm, sp);
sp->gfn = gfn;
sp->role = role;
@@ -2456,7 +2470,7 @@ static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm,
list_add(&sp->link, invalid_list);
else
list_move(&sp->link, invalid_list);
- kvm_mod_used_mmu_pages(kvm, -1);
+ kvm_unaccount_mmu_page(kvm, sp);
} else {
/*
* Remove the active root from the active page list, the root
@@ -4290,7 +4304,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
vcpu->arch.l1tf_flush_l1d = true;
if (!flags) {
- trace_kvm_page_fault(fault_address, error_code);
+ trace_kvm_page_fault(vcpu, fault_address, error_code);
if (kvm_event_needs_reinjection(vcpu))
kvm_mmu_unprotect_page_virt(vcpu, fault_address);
@@ -5361,19 +5375,6 @@ void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu)
__kvm_mmu_free_obsolete_roots(vcpu->kvm, &vcpu->arch.guest_mmu);
}
-static bool need_remote_flush(u64 old, u64 new)
-{
- if (!is_shadow_present_pte(old))
- return false;
- if (!is_shadow_present_pte(new))
- return true;
- if ((old ^ new) & SPTE_BASE_ADDR_MASK)
- return true;
- old ^= shadow_nx_mask;
- new ^= shadow_nx_mask;
- return (old & ~new & SPTE_PERM_MASK) != 0;
-}
-
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
int *bytes)
{
@@ -5519,7 +5520,7 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
mmu_page_zap_pte(vcpu->kvm, sp, spte, NULL);
if (gentry && sp->role.level != PG_LEVEL_4K)
++vcpu->kvm->stat.mmu_pde_zapped;
- if (need_remote_flush(entry, *spte))
+ if (is_shadow_present_pte(entry))
flush = true;
++spte;
}
@@ -6085,47 +6086,18 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
const struct kvm_memory_slot *memslot,
int start_level)
{
- bool flush = false;
-
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
- flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
- start_level, KVM_MAX_HUGEPAGE_LEVEL,
- false);
+ slot_handle_level(kvm, memslot, slot_rmap_write_protect,
+ start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
- flush |= kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
+ kvm_tdp_mmu_wrprot_slot(kvm, memslot, start_level);
read_unlock(&kvm->mmu_lock);
}
-
- /*
- * Flush TLBs if any SPTEs had to be write-protected to ensure that
- * guest writes are reflected in the dirty bitmap before the memslot
- * update completes, i.e. before enabling dirty logging is visible to
- * userspace.
- *
- * Perform the TLB flush outside the mmu_lock to reduce the amount of
- * time the lock is held. However, this does mean that another CPU can
- * now grab mmu_lock and encounter a write-protected SPTE while CPUs
- * still have a writable mapping for the associated GFN in their TLB.
- *
- * This is safe but requires KVM to be careful when making decisions
- * based on the write-protection status of an SPTE. Specifically, KVM
- * also write-protects SPTEs to monitor changes to guest page tables
- * during shadow paging, and must guarantee no CPUs can write to those
- * page before the lock is dropped. As mentioned in the previous
- * paragraph, a write-protected SPTE is no guarantee that CPU cannot
- * perform writes. So to determine if a TLB flush is truly required, KVM
- * will clear a separate software-only bit (MMU-writable) and skip the
- * flush if-and-only-if this bit was already clear.
- *
- * See is_writable_pte() for more details.
- */
- if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
static inline bool need_topup(struct kvm_mmu_memory_cache *cache, int min)
@@ -6493,32 +6465,30 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
const struct kvm_memory_slot *memslot)
{
- bool flush = false;
-
if (kvm_memslots_have_rmaps(kvm)) {
write_lock(&kvm->mmu_lock);
/*
* Clear dirty bits only on 4k SPTEs since the legacy MMU only
* support dirty logging at a 4k granularity.
*/
- flush = slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
+ slot_handle_level_4k(kvm, memslot, __rmap_clear_dirty, false);
write_unlock(&kvm->mmu_lock);
}
if (is_tdp_mmu_enabled(kvm)) {
read_lock(&kvm->mmu_lock);
- flush |= kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
+ kvm_tdp_mmu_clear_dirty_slot(kvm, memslot);
read_unlock(&kvm->mmu_lock);
}
/*
+ * The caller will flush the TLBs after this function returns.
+ *
* It's also safe to flush TLBs out of mmu lock here as currently this
* function is only used for dirty logging, in which case flushing TLB
* out of mmu lock also guarantees no dirty pages will be lost in
* dirty_bitmap.
*/
- if (flush)
- kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
}
void kvm_mmu_zap_all(struct kvm *kvm)
@@ -6746,10 +6716,12 @@ int kvm_mmu_vendor_module_init(void)
ret = register_shrinker(&mmu_shrinker, "x86-mmu");
if (ret)
- goto out;
+ goto out_shrinker;
return 0;
+out_shrinker:
+ percpu_counter_destroy(&kvm_total_used_mmu_pages);
out:
mmu_destroy_caches();
return ret;
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 39e0205e7300..5ab5f94dcb6f 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -472,7 +472,7 @@ error:
#if PTTYPE == PTTYPE_EPT
/*
- * Use PFERR_RSVD_MASK in error_code to to tell if EPT
+ * Use PFERR_RSVD_MASK in error_code to tell if EPT
* misconfiguration requires to be injected. The detection is
* done by is_rsvd_bits_set() above.
*
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index f3744eea45f5..7670c13ce251 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -343,7 +343,7 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
}
/*
- * An shadow-present leaf SPTE may be non-writable for 3 possible reasons:
+ * A shadow-present leaf SPTE may be non-writable for 4 possible reasons:
*
* 1. To intercept writes for dirty logging. KVM write-protects huge pages
* so that they can be split be split down into the dirty logging
@@ -361,8 +361,13 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
* read-only memslot or guest memory backed by a read-only VMA. Writes to
* such pages are disallowed entirely.
*
- * To keep track of why a given SPTE is write-protected, KVM uses 2
- * software-only bits in the SPTE:
+ * 4. To emulate the Accessed bit for SPTEs without A/D bits. Note, in this
+ * case, the SPTE is access-protected, not just write-protected!
+ *
+ * For cases #1 and #4, KVM can safely make such SPTEs writable without taking
+ * mmu_lock as capturing the Accessed/Dirty state doesn't require taking it.
+ * To differentiate #1 and #4 from #2 and #3, KVM uses two software-only bits
+ * in the SPTE:
*
* shadow_mmu_writable_mask, aka MMU-writable -
* Cleared on SPTEs that KVM is currently write-protecting for shadow paging
@@ -391,7 +396,8 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
* shadow page tables between vCPUs. Write-protecting an SPTE for dirty logging
* (which does not clear the MMU-writable bit), does not flush TLBs before
* dropping the lock, as it only needs to synchronize guest writes with the
- * dirty bitmap.
+ * dirty bitmap. Similarly, making the SPTE inaccessible (and non-writable) for
+ * access-tracking via the clear_young() MMU notifier also does not flush TLBs.
*
* So, there is the problem: clearing the MMU-writable bit can encounter a
* write-protected SPTE while CPUs still have writable mappings for that SPTE
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index bf2ccf9debca..672f0432d777 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -372,6 +372,16 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
}
}
+static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ kvm_account_pgtable_pages((void *)sp->spt, +1);
+}
+
+static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp)
+{
+ kvm_account_pgtable_pages((void *)sp->spt, -1);
+}
+
/**
* tdp_mmu_unlink_sp() - Remove a shadow page from the list of used pages
*
@@ -384,6 +394,7 @@ static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp,
bool shared)
{
+ tdp_unaccount_mmu_page(kvm, sp);
if (shared)
spin_lock(&kvm->arch.tdp_mmu_pages_lock);
else
@@ -1132,6 +1143,7 @@ static int tdp_mmu_link_sp(struct kvm *kvm, struct tdp_iter *iter,
if (account_nx)
account_huge_nx_page(kvm, sp);
spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
+ tdp_account_mmu_page(kvm, sp);
return 0;
}
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 02f9e4f245bd..d9b9a0f0db17 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -106,9 +106,19 @@ static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
return;
if (pmc->perf_event && pmc->perf_event->attr.precise_ip) {
- /* Indicate PEBS overflow PMI to guest. */
- skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
- (unsigned long *)&pmu->global_status);
+ if (!in_pmi) {
+ /*
+ * TODO: KVM is currently _choosing_ to not generate records
+ * for emulated instructions, avoiding BUFFER_OVF PMI when
+ * there are no records. Strictly speaking, it should be done
+ * as well in the right context to improve sampling accuracy.
+ */
+ skip_pmi = true;
+ } else {
+ /* Indicate PEBS overflow PMI to guest. */
+ skip_pmi = __test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
+ (unsigned long *)&pmu->global_status);
+ }
} else {
__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
}
@@ -227,8 +237,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
get_sample_period(pmc, pmc->counter)))
return false;
- if (!test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) &&
- pmc->perf_event->attr.precise_ip)
+ if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) !=
+ (!!pmc->perf_event->attr.precise_ip))
return false;
/* reuse perf_event to serve as pmc_reprogram_counter() does*/
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 76dcc8a3e849..4c620999d230 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -55,28 +55,6 @@ static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
nested_svm_vmexit(svm);
}
-static bool nested_svm_handle_page_fault_workaround(struct kvm_vcpu *vcpu,
- struct x86_exception *fault)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- struct vmcb *vmcb = svm->vmcb;
-
- WARN_ON(!is_guest_mode(vcpu));
-
- if (vmcb12_is_intercept(&svm->nested.ctl,
- INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
- !WARN_ON_ONCE(svm->nested.nested_run_pending)) {
- vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
- vmcb->control.exit_code_hi = 0;
- vmcb->control.exit_info_1 = fault->error_code;
- vmcb->control.exit_info_2 = fault->address;
- nested_svm_vmexit(svm);
- return true;
- }
-
- return false;
-}
-
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -468,7 +446,7 @@ static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
unsigned int nr;
if (vcpu->arch.exception.injected) {
- nr = vcpu->arch.exception.nr;
+ nr = vcpu->arch.exception.vector;
exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
if (vcpu->arch.exception.has_error_code) {
@@ -781,11 +759,15 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
struct vcpu_svm *svm = to_svm(vcpu);
int ret;
- trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
- vmcb12->save.rip,
- vmcb12->control.int_ctl,
- vmcb12->control.event_inj,
- vmcb12->control.nested_ctl);
+ trace_kvm_nested_vmenter(svm->vmcb->save.rip,
+ vmcb12_gpa,
+ vmcb12->save.rip,
+ vmcb12->control.int_ctl,
+ vmcb12->control.event_inj,
+ vmcb12->control.nested_ctl,
+ vmcb12->control.nested_cr3,
+ vmcb12->save.cr3,
+ KVM_ISA_SVM);
trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
@@ -1304,44 +1286,46 @@ int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
return 0;
}
-static bool nested_exit_on_exception(struct vcpu_svm *svm)
+static bool nested_svm_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
+ u32 error_code)
{
- unsigned int nr = svm->vcpu.arch.exception.nr;
+ struct vcpu_svm *svm = to_svm(vcpu);
- return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
+ return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector));
}
-static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
+static void nested_svm_inject_exception_vmexit(struct kvm_vcpu *vcpu)
{
- unsigned int nr = svm->vcpu.arch.exception.nr;
+ struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
+ struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
- vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
+ vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + ex->vector;
vmcb->control.exit_code_hi = 0;
- if (svm->vcpu.arch.exception.has_error_code)
- vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
+ if (ex->has_error_code)
+ vmcb->control.exit_info_1 = ex->error_code;
/*
* EXITINFO2 is undefined for all exception intercepts other
* than #PF.
*/
- if (nr == PF_VECTOR) {
- if (svm->vcpu.arch.exception.nested_apf)
- vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
- else if (svm->vcpu.arch.exception.has_payload)
- vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
+ if (ex->vector == PF_VECTOR) {
+ if (ex->has_payload)
+ vmcb->control.exit_info_2 = ex->payload;
else
- vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
- } else if (nr == DB_VECTOR) {
- /* See inject_pending_event. */
- kvm_deliver_exception_payload(&svm->vcpu);
- if (svm->vcpu.arch.dr7 & DR7_GD) {
- svm->vcpu.arch.dr7 &= ~DR7_GD;
- kvm_update_dr7(&svm->vcpu);
+ vmcb->control.exit_info_2 = vcpu->arch.cr2;
+ } else if (ex->vector == DB_VECTOR) {
+ /* See kvm_check_and_inject_events(). */
+ kvm_deliver_exception_payload(vcpu, ex);
+
+ if (vcpu->arch.dr7 & DR7_GD) {
+ vcpu->arch.dr7 &= ~DR7_GD;
+ kvm_update_dr7(vcpu);
}
- } else
- WARN_ON(svm->vcpu.arch.exception.has_payload);
+ } else {
+ WARN_ON(ex->has_payload);
+ }
nested_svm_vmexit(svm);
}
@@ -1353,10 +1337,22 @@ static inline bool nested_exit_on_init(struct vcpu_svm *svm)
static int svm_check_nested_events(struct kvm_vcpu *vcpu)
{
- struct vcpu_svm *svm = to_svm(vcpu);
- bool block_nested_events =
- kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
struct kvm_lapic *apic = vcpu->arch.apic;
+ struct vcpu_svm *svm = to_svm(vcpu);
+ /*
+ * Only a pending nested run blocks a pending exception. If there is a
+ * previously injected event, the pending exception occurred while said
+ * event was being delivered and thus needs to be handled.
+ */
+ bool block_nested_exceptions = svm->nested.nested_run_pending;
+ /*
+ * New events (not exceptions) are only recognized at instruction
+ * boundaries. If an event needs reinjection, then KVM is handling a
+ * VM-Exit that occurred _during_ instruction execution; new events are
+ * blocked until the instruction completes.
+ */
+ bool block_nested_events = block_nested_exceptions ||
+ kvm_event_needs_reinjection(vcpu);
if (lapic_in_kernel(vcpu) &&
test_bit(KVM_APIC_INIT, &apic->pending_events)) {
@@ -1368,18 +1364,16 @@ static int svm_check_nested_events(struct kvm_vcpu *vcpu)
return 0;
}
- if (vcpu->arch.exception.pending) {
- /*
- * Only a pending nested run can block a pending exception.
- * Otherwise an injected NMI/interrupt should either be
- * lost or delivered to the nested hypervisor in the EXITINTINFO
- * vmcb field, while delivering the pending exception.
- */
- if (svm->nested.nested_run_pending)
+ if (vcpu->arch.exception_vmexit.pending) {
+ if (block_nested_exceptions)
return -EBUSY;
- if (!nested_exit_on_exception(svm))
- return 0;
- nested_svm_inject_exception_vmexit(svm);
+ nested_svm_inject_exception_vmexit(vcpu);
+ return 0;
+ }
+
+ if (vcpu->arch.exception.pending) {
+ if (block_nested_exceptions)
+ return -EBUSY;
return 0;
}
@@ -1720,8 +1714,8 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
struct kvm_x86_nested_ops svm_nested_ops = {
.leave_nested = svm_leave_nested,
+ .is_exception_vmexit = nested_svm_is_exception_vmexit,
.check_events = svm_check_nested_events,
- .handle_page_fault_workaround = nested_svm_handle_page_fault_workaround,
.triple_fault = nested_svm_triple_fault,
.get_nested_state_pages = svm_get_nested_state_pages,
.get_state = svm_get_nested_state,
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index f24613a108c5..b68956299fa8 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -23,107 +23,52 @@ enum pmu_type {
PMU_TYPE_EVNTSEL,
};
-enum index {
- INDEX_ZERO = 0,
- INDEX_ONE,
- INDEX_TWO,
- INDEX_THREE,
- INDEX_FOUR,
- INDEX_FIVE,
- INDEX_ERROR,
-};
-
-static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
+static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
{
- struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+ unsigned int num_counters = pmu->nr_arch_gp_counters;
- if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
- if (type == PMU_TYPE_COUNTER)
- return MSR_F15H_PERF_CTR;
- else
- return MSR_F15H_PERF_CTL;
- } else {
- if (type == PMU_TYPE_COUNTER)
- return MSR_K7_PERFCTR0;
- else
- return MSR_K7_EVNTSEL0;
- }
-}
+ if (pmc_idx >= num_counters)
+ return NULL;
-static enum index msr_to_index(u32 msr)
-{
- switch (msr) {
- case MSR_F15H_PERF_CTL0:
- case MSR_F15H_PERF_CTR0:
- case MSR_K7_EVNTSEL0:
- case MSR_K7_PERFCTR0:
- return INDEX_ZERO;
- case MSR_F15H_PERF_CTL1:
- case MSR_F15H_PERF_CTR1:
- case MSR_K7_EVNTSEL1:
- case MSR_K7_PERFCTR1:
- return INDEX_ONE;
- case MSR_F15H_PERF_CTL2:
- case MSR_F15H_PERF_CTR2:
- case MSR_K7_EVNTSEL2:
- case MSR_K7_PERFCTR2:
- return INDEX_TWO;
- case MSR_F15H_PERF_CTL3:
- case MSR_F15H_PERF_CTR3:
- case MSR_K7_EVNTSEL3:
- case MSR_K7_PERFCTR3:
- return INDEX_THREE;
- case MSR_F15H_PERF_CTL4:
- case MSR_F15H_PERF_CTR4:
- return INDEX_FOUR;
- case MSR_F15H_PERF_CTL5:
- case MSR_F15H_PERF_CTR5:
- return INDEX_FIVE;
- default:
- return INDEX_ERROR;
- }
+ return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
}
static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
enum pmu_type type)
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+ unsigned int idx;
if (!vcpu->kvm->arch.enable_pmu)
return NULL;
switch (msr) {
- case MSR_F15H_PERF_CTL0:
- case MSR_F15H_PERF_CTL1:
- case MSR_F15H_PERF_CTL2:
- case MSR_F15H_PERF_CTL3:
- case MSR_F15H_PERF_CTL4:
- case MSR_F15H_PERF_CTL5:
+ case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
return NULL;
- fallthrough;
+ /*
+ * Each PMU counter has a pair of CTL and CTR MSRs. CTLn
+ * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd.
+ */
+ idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
+ if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL))
+ return NULL;
+ break;
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
if (type != PMU_TYPE_EVNTSEL)
return NULL;
+ idx = msr - MSR_K7_EVNTSEL0;
break;
- case MSR_F15H_PERF_CTR0:
- case MSR_F15H_PERF_CTR1:
- case MSR_F15H_PERF_CTR2:
- case MSR_F15H_PERF_CTR3:
- case MSR_F15H_PERF_CTR4:
- case MSR_F15H_PERF_CTR5:
- if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
- return NULL;
- fallthrough;
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
if (type != PMU_TYPE_COUNTER)
return NULL;
+ idx = msr - MSR_K7_PERFCTR0;
break;
default:
return NULL;
}
- return &pmu->gp_counters[msr_to_index(msr)];
+ return amd_pmc_idx_to_pmc(pmu, idx);
}
static bool amd_hw_event_available(struct kvm_pmc *pmc)
@@ -139,22 +84,6 @@ static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
return true;
}
-static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
-{
- unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
- struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
-
- if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
- /*
- * The idx is contiguous. The MSRs are not. The counter MSRs
- * are interleaved with the event select MSRs.
- */
- pmc_idx *= 2;
- }
-
- return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
-}
-
static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -168,15 +97,7 @@ static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
unsigned int idx, u64 *mask)
{
- struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
- struct kvm_pmc *counters;
-
- idx &= ~(3u << 30);
- if (idx >= pmu->nr_arch_gp_counters)
- return NULL;
- counters = pmu->gp_counters;
-
- return &counters[idx];
+ return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
}
static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f3813dbacb9f..58f0077d9357 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -461,24 +461,22 @@ static int svm_update_soft_interrupt_rip(struct kvm_vcpu *vcpu)
return 0;
}
-static void svm_queue_exception(struct kvm_vcpu *vcpu)
+static void svm_inject_exception(struct kvm_vcpu *vcpu)
{
+ struct kvm_queued_exception *ex = &vcpu->arch.exception;
struct vcpu_svm *svm = to_svm(vcpu);
- unsigned nr = vcpu->arch.exception.nr;
- bool has_error_code = vcpu->arch.exception.has_error_code;
- u32 error_code = vcpu->arch.exception.error_code;
- kvm_deliver_exception_payload(vcpu);
+ kvm_deliver_exception_payload(vcpu, ex);
- if (kvm_exception_is_soft(nr) &&
+ if (kvm_exception_is_soft(ex->vector) &&
svm_update_soft_interrupt_rip(vcpu))
return;
- svm->vmcb->control.event_inj = nr
+ svm->vmcb->control.event_inj = ex->vector
| SVM_EVTINJ_VALID
- | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
+ | (ex->has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
| SVM_EVTINJ_TYPE_EXEPT;
- svm->vmcb->control.event_inj_err = error_code;
+ svm->vmcb->control.event_inj_err = ex->error_code;
}
static void svm_init_erratum_383(void)
@@ -1975,7 +1973,7 @@ static int npf_interception(struct kvm_vcpu *vcpu)
u64 fault_address = svm->vmcb->control.exit_info_2;
u64 error_code = svm->vmcb->control.exit_info_1;
- trace_kvm_page_fault(fault_address, error_code);
+ trace_kvm_page_fault(vcpu, fault_address, error_code);
return kvm_mmu_page_fault(vcpu, fault_address, error_code,
static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
svm->vmcb->control.insn_bytes : NULL,
@@ -2341,7 +2339,8 @@ void svm_set_gif(struct vcpu_svm *svm, bool value)
enable_gif(svm);
if (svm->vcpu.arch.smi_pending ||
svm->vcpu.arch.nmi_pending ||
- kvm_cpu_has_injectable_intr(&svm->vcpu))
+ kvm_cpu_has_injectable_intr(&svm->vcpu) ||
+ kvm_apic_has_pending_init_or_sipi(&svm->vcpu))
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
} else {
disable_gif(svm);
@@ -3522,7 +3521,7 @@ void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
/* Note, this is called iff the local APIC is in-kernel. */
if (!READ_ONCE(vcpu->arch.apic->apicv_active)) {
- /* Process the interrupt via inject_pending_event */
+ /* Process the interrupt via kvm_check_and_inject_events(). */
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
return;
@@ -4697,15 +4696,7 @@ static bool svm_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- /*
- * TODO: Last condition latch INIT signals on vCPU when
- * vCPU is in guest-mode and vmcb12 defines intercept on INIT.
- * To properly emulate the INIT intercept,
- * svm_check_nested_events() should call nested_svm_vmexit()
- * if an INIT signal is pending.
- */
- return !gif_set(svm) ||
- (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT));
+ return !gif_set(svm);
}
static void svm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
@@ -4798,7 +4789,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.patch_hypercall = svm_patch_hypercall,
.inject_irq = svm_inject_irq,
.inject_nmi = svm_inject_nmi,
- .queue_exception = svm_queue_exception,
+ .inject_exception = svm_inject_exception,
.cancel_injection = svm_cancel_injection,
.interrupt_allowed = svm_interrupt_allowed,
.nmi_allowed = svm_nmi_allowed,
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 2120d7c060a9..bc25589ad588 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -394,20 +394,25 @@ TRACE_EVENT(kvm_inj_exception,
* Tracepoint for page fault.
*/
TRACE_EVENT(kvm_page_fault,
- TP_PROTO(unsigned long fault_address, unsigned int error_code),
- TP_ARGS(fault_address, error_code),
+ TP_PROTO(struct kvm_vcpu *vcpu, u64 fault_address, u64 error_code),
+ TP_ARGS(vcpu, fault_address, error_code),
TP_STRUCT__entry(
- __field( unsigned long, fault_address )
- __field( unsigned int, error_code )
+ __field( unsigned int, vcpu_id )
+ __field( unsigned long, guest_rip )
+ __field( u64, fault_address )
+ __field( u64, error_code )
),
TP_fast_assign(
+ __entry->vcpu_id = vcpu->vcpu_id;
+ __entry->guest_rip = kvm_rip_read(vcpu);
__entry->fault_address = fault_address;
__entry->error_code = error_code;
),
- TP_printk("address %lx error_code %x",
+ TP_printk("vcpu %u rip 0x%lx address 0x%016llx error_code 0x%llx",
+ __entry->vcpu_id, __entry->guest_rip,
__entry->fault_address, __entry->error_code)
);
@@ -589,10 +594,12 @@ TRACE_EVENT(kvm_pv_eoi,
/*
* Tracepoint for nested VMRUN
*/
-TRACE_EVENT(kvm_nested_vmrun,
+TRACE_EVENT(kvm_nested_vmenter,
TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
- __u32 event_inj, bool npt),
- TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, npt),
+ __u32 event_inj, bool tdp_enabled, __u64 guest_tdp_pgd,
+ __u64 guest_cr3, __u32 isa),
+ TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled,
+ guest_tdp_pgd, guest_cr3, isa),
TP_STRUCT__entry(
__field( __u64, rip )
@@ -600,7 +607,9 @@ TRACE_EVENT(kvm_nested_vmrun,
__field( __u64, nested_rip )
__field( __u32, int_ctl )
__field( __u32, event_inj )
- __field( bool, npt )
+ __field( bool, tdp_enabled )
+ __field( __u64, guest_pgd )
+ __field( __u32, isa )
),
TP_fast_assign(
@@ -609,14 +618,24 @@ TRACE_EVENT(kvm_nested_vmrun,
__entry->nested_rip = nested_rip;
__entry->int_ctl = int_ctl;
__entry->event_inj = event_inj;
- __entry->npt = npt;
- ),
-
- TP_printk("rip: 0x%016llx vmcb: 0x%016llx nrip: 0x%016llx int_ctl: 0x%08x "
- "event_inj: 0x%08x npt: %s",
- __entry->rip, __entry->vmcb, __entry->nested_rip,
- __entry->int_ctl, __entry->event_inj,
- __entry->npt ? "on" : "off")
+ __entry->tdp_enabled = tdp_enabled;
+ __entry->guest_pgd = tdp_enabled ? guest_tdp_pgd : guest_cr3;
+ __entry->isa = isa;
+ ),
+
+ TP_printk("rip: 0x%016llx %s: 0x%016llx nested_rip: 0x%016llx "
+ "int_ctl: 0x%08x event_inj: 0x%08x nested_%s=%s %s: 0x%016llx",
+ __entry->rip,
+ __entry->isa == KVM_ISA_VMX ? "vmcs" : "vmcb",
+ __entry->vmcb,
+ __entry->nested_rip,
+ __entry->int_ctl,
+ __entry->event_inj,
+ __entry->isa == KVM_ISA_VMX ? "ept" : "npt",
+ __entry->tdp_enabled ? "y" : "n",
+ !__entry->tdp_enabled ? "guest_cr3" :
+ __entry->isa == KVM_ISA_VMX ? "nested_eptp" : "nested_cr3",
+ __entry->guest_pgd)
);
TRACE_EVENT(kvm_nested_intercepts,
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index c5e5dfef69c7..87c4e46daf37 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -65,6 +65,7 @@ struct vmcs_config {
u64 cpu_based_3rd_exec_ctrl;
u32 vmexit_ctrl;
u32 vmentry_ctrl;
+ u64 misc;
struct nested_vmx_msrs nested;
};
extern struct vmcs_config vmcs_config;
@@ -82,7 +83,8 @@ static inline bool cpu_has_vmx_basic_inout(void)
static inline bool cpu_has_virtual_nmis(void)
{
- return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
+ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS &&
+ vmcs_config.cpu_based_exec_ctrl & CPU_BASED_NMI_WINDOW_EXITING;
}
static inline bool cpu_has_vmx_preemption_timer(void)
@@ -224,11 +226,8 @@ static inline bool cpu_has_vmx_vmfunc(void)
static inline bool cpu_has_vmx_shadow_vmcs(void)
{
- u64 vmx_msr;
-
/* check if the cpu supports writing r/o exit information fields */
- rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
- if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
+ if (!(vmcs_config.misc & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
return false;
return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -370,10 +369,7 @@ static inline bool cpu_has_vmx_invvpid_global(void)
static inline bool cpu_has_vmx_intel_pt(void)
{
- u64 vmx_msr;
-
- rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
- return (vmx_msr & MSR_IA32_VMX_MISC_INTEL_PT) &&
+ return (vmcs_config.misc & MSR_IA32_VMX_MISC_INTEL_PT) &&
(vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_PT_USE_GPA) &&
(vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_RTIT_CTL);
}
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index 6a61b1ae7942..d8b23c96d627 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -10,6 +10,8 @@
#include "vmx.h"
#include "trace.h"
+#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
+
DEFINE_STATIC_KEY_FALSE(enable_evmcs);
#define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
@@ -28,6 +30,8 @@ const struct evmcs_field vmcs_field_to_evmcs_1[] = {
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
EVMCS1_FIELD(HOST_IA32_EFER, host_ia32_efer,
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+ EVMCS1_FIELD(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl,
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
EVMCS1_FIELD(HOST_CR0, host_cr0,
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
EVMCS1_FIELD(HOST_CR3, host_cr3,
@@ -78,6 +82,8 @@ const struct evmcs_field vmcs_field_to_evmcs_1[] = {
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
EVMCS1_FIELD(GUEST_IA32_EFER, guest_ia32_efer,
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+ EVMCS1_FIELD(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl,
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
EVMCS1_FIELD(GUEST_PDPTR0, guest_pdptr0,
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
EVMCS1_FIELD(GUEST_PDPTR1, guest_pdptr1,
@@ -126,6 +132,28 @@ const struct evmcs_field vmcs_field_to_evmcs_1[] = {
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
EVMCS1_FIELD(XSS_EXIT_BITMAP, xss_exit_bitmap,
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
+ EVMCS1_FIELD(ENCLS_EXITING_BITMAP, encls_exiting_bitmap,
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
+ EVMCS1_FIELD(TSC_MULTIPLIER, tsc_multiplier,
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2),
+ /*
+ * Not used by KVM:
+ *
+ * EVMCS1_FIELD(0x00006828, guest_ia32_s_cet,
+ * HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+ * EVMCS1_FIELD(0x0000682A, guest_ssp,
+ * HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC),
+ * EVMCS1_FIELD(0x0000682C, guest_ia32_int_ssp_table_addr,
+ * HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+ * EVMCS1_FIELD(0x00002816, guest_ia32_lbr_ctl,
+ * HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1),
+ * EVMCS1_FIELD(0x00006C18, host_ia32_s_cet,
+ * HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+ * EVMCS1_FIELD(0x00006C1A, host_ssp,
+ * HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+ * EVMCS1_FIELD(0x00006C1C, host_ia32_int_ssp_table_addr,
+ * HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1),
+ */
/* 64 bit read only */
EVMCS1_FIELD(GUEST_PHYSICAL_ADDRESS, guest_physical_address,
@@ -294,19 +322,6 @@ const struct evmcs_field vmcs_field_to_evmcs_1[] = {
};
const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
-#if IS_ENABLED(CONFIG_HYPERV)
-__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
-{
- vmcs_conf->cpu_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_EXEC_CTRL;
- vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL;
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
- vmcs_conf->cpu_based_3rd_exec_ctrl = 0;
-
- vmcs_conf->vmexit_ctrl &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
- vmcs_conf->vmentry_ctrl &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
-}
-#endif
-
bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa)
{
struct hv_vp_assist_page assist_page;
@@ -334,6 +349,9 @@ uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
* versions: lower 8 bits is the minimal version, higher 8 bits is the
* maximum supported version. KVM supports versions from 1 to
* KVM_EVMCS_VERSION.
+ *
+ * Note, do not check the Hyper-V is fully enabled in guest CPUID, this
+ * helper is used to _get_ the vCPU's supported CPUID.
*/
if (kvm_cpu_cap_get(X86_FEATURE_VMX) &&
(!vcpu || to_vmx(vcpu)->nested.enlightened_vmcs_enabled))
@@ -342,10 +360,67 @@ uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
return 0;
}
-void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
+enum evmcs_revision {
+ EVMCSv1_LEGACY,
+ NR_EVMCS_REVISIONS,
+};
+
+enum evmcs_ctrl_type {
+ EVMCS_EXIT_CTRLS,
+ EVMCS_ENTRY_CTRLS,
+ EVMCS_2NDEXEC,
+ EVMCS_PINCTRL,
+ EVMCS_VMFUNC,
+ NR_EVMCS_CTRLS,
+};
+
+static const u32 evmcs_unsupported_ctrls[NR_EVMCS_CTRLS][NR_EVMCS_REVISIONS] = {
+ [EVMCS_EXIT_CTRLS] = {
+ [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_VMEXIT_CTRL,
+ },
+ [EVMCS_ENTRY_CTRLS] = {
+ [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_VMENTRY_CTRL,
+ },
+ [EVMCS_2NDEXEC] = {
+ [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_2NDEXEC,
+ },
+ [EVMCS_PINCTRL] = {
+ [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_PINCTRL,
+ },
+ [EVMCS_VMFUNC] = {
+ [EVMCSv1_LEGACY] = EVMCS1_UNSUPPORTED_VMFUNC,
+ },
+};
+
+static u32 evmcs_get_unsupported_ctls(enum evmcs_ctrl_type ctrl_type)
+{
+ enum evmcs_revision evmcs_rev = EVMCSv1_LEGACY;
+
+ return evmcs_unsupported_ctrls[ctrl_type][evmcs_rev];
+}
+
+static bool evmcs_has_perf_global_ctrl(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
+
+ /*
+ * PERF_GLOBAL_CTRL has a quirk where some Windows guests may fail to
+ * boot if a PV CPUID feature flag is not also set. Treat the fields
+ * as unsupported if the flag is not set in guest CPUID. This should
+ * be called only for guest accesses, and all guest accesses should be
+ * gated on Hyper-V being enabled and initialized.
+ */
+ if (WARN_ON_ONCE(!hv_vcpu))
+ return false;
+
+ return hv_vcpu->cpuid_cache.nested_ebx & HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
+}
+
+void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{
u32 ctl_low = (u32)*pdata;
u32 ctl_high = (u32)(*pdata >> 32);
+ u32 unsupported_ctrls;
/*
* Hyper-V 2016 and 2019 try using these features even when eVMCS
@@ -354,77 +429,70 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
switch (msr_index) {
case MSR_IA32_VMX_EXIT_CTLS:
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
- ctl_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
+ unsupported_ctrls = evmcs_get_unsupported_ctls(EVMCS_EXIT_CTRLS);
+ if (!evmcs_has_perf_global_ctrl(vcpu))
+ unsupported_ctrls |= VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+ ctl_high &= ~unsupported_ctrls;
break;
case MSR_IA32_VMX_ENTRY_CTLS:
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
- ctl_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
+ unsupported_ctrls = evmcs_get_unsupported_ctls(EVMCS_ENTRY_CTRLS);
+ if (!evmcs_has_perf_global_ctrl(vcpu))
+ unsupported_ctrls |= VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ ctl_high &= ~unsupported_ctrls;
break;
case MSR_IA32_VMX_PROCBASED_CTLS2:
- ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
+ ctl_high &= ~evmcs_get_unsupported_ctls(EVMCS_2NDEXEC);
break;
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
case MSR_IA32_VMX_PINBASED_CTLS:
- ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+ ctl_high &= ~evmcs_get_unsupported_ctls(EVMCS_PINCTRL);
break;
case MSR_IA32_VMX_VMFUNC:
- ctl_low &= ~EVMCS1_UNSUPPORTED_VMFUNC;
+ ctl_low &= ~evmcs_get_unsupported_ctls(EVMCS_VMFUNC);
break;
}
*pdata = ctl_low | ((u64)ctl_high << 32);
}
+static bool nested_evmcs_is_valid_controls(enum evmcs_ctrl_type ctrl_type,
+ u32 val)
+{
+ return !(val & evmcs_get_unsupported_ctls(ctrl_type));
+}
+
int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
{
- int ret = 0;
- u32 unsupp_ctl;
-
- unsupp_ctl = vmcs12->pin_based_vm_exec_control &
- EVMCS1_UNSUPPORTED_PINCTRL;
- if (unsupp_ctl) {
- trace_kvm_nested_vmenter_failed(
- "eVMCS: unsupported pin-based VM-execution controls",
- unsupp_ctl);
- ret = -EINVAL;
- }
+ if (CC(!nested_evmcs_is_valid_controls(EVMCS_PINCTRL,
+ vmcs12->pin_based_vm_exec_control)))
+ return -EINVAL;
- unsupp_ctl = vmcs12->secondary_vm_exec_control &
- EVMCS1_UNSUPPORTED_2NDEXEC;
- if (unsupp_ctl) {
- trace_kvm_nested_vmenter_failed(
- "eVMCS: unsupported secondary VM-execution controls",
- unsupp_ctl);
- ret = -EINVAL;
- }
+ if (CC(!nested_evmcs_is_valid_controls(EVMCS_2NDEXEC,
+ vmcs12->secondary_vm_exec_control)))
+ return -EINVAL;
- unsupp_ctl = vmcs12->vm_exit_controls &
- EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
- if (unsupp_ctl) {
- trace_kvm_nested_vmenter_failed(
- "eVMCS: unsupported VM-exit controls",
- unsupp_ctl);
- ret = -EINVAL;
- }
+ if (CC(!nested_evmcs_is_valid_controls(EVMCS_EXIT_CTRLS,
+ vmcs12->vm_exit_controls)))
+ return -EINVAL;
- unsupp_ctl = vmcs12->vm_entry_controls &
- EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
- if (unsupp_ctl) {
- trace_kvm_nested_vmenter_failed(
- "eVMCS: unsupported VM-entry controls",
- unsupp_ctl);
- ret = -EINVAL;
- }
+ if (CC(!nested_evmcs_is_valid_controls(EVMCS_ENTRY_CTRLS,
+ vmcs12->vm_entry_controls)))
+ return -EINVAL;
- unsupp_ctl = vmcs12->vm_function_control & EVMCS1_UNSUPPORTED_VMFUNC;
- if (unsupp_ctl) {
- trace_kvm_nested_vmenter_failed(
- "eVMCS: unsupported VM-function controls",
- unsupp_ctl);
- ret = -EINVAL;
- }
+ /*
+ * VM-Func controls are 64-bit, but KVM currently doesn't support any
+ * controls in bits 63:32, i.e. dropping those bits on the consistency
+ * check is intentional.
+ */
+ if (WARN_ON_ONCE(vmcs12->vm_function_control >> 32))
+ return -EINVAL;
- return ret;
+ if (CC(!nested_evmcs_is_valid_controls(EVMCS_VMFUNC,
+ vmcs12->vm_function_control)))
+ return -EINVAL;
+
+ return 0;
}
int nested_enable_evmcs(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
index f886a8ff0342..6f746ef3c038 100644
--- a/arch/x86/kvm/vmx/evmcs.h
+++ b/arch/x86/kvm/vmx/evmcs.h
@@ -42,8 +42,6 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs);
* PLE_GAP = 0x00004020,
* PLE_WINDOW = 0x00004022,
* VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
- * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
- * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
*
* Currently unsupported in KVM:
* GUEST_IA32_RTIT_CTL = 0x00002814,
@@ -61,9 +59,8 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs);
SECONDARY_EXEC_TSC_SCALING | \
SECONDARY_EXEC_PAUSE_LOOP_EXITING)
#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL \
- (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
- VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
-#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
+ (VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
+#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (0)
#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
struct evmcs_field {
@@ -212,7 +209,6 @@ static inline void evmcs_load(u64 phys_addr)
vp_ap->enlighten_vmentry = 1;
}
-__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
#else /* !IS_ENABLED(CONFIG_HYPERV) */
static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
static inline void evmcs_write32(unsigned long field, u32 value) {}
@@ -243,7 +239,7 @@ bool nested_enlightened_vmentry(struct kvm_vcpu *vcpu, u64 *evmcs_gpa);
uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu);
int nested_enable_evmcs(struct kvm_vcpu *vcpu,
uint16_t *vmcs_version);
-void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata);
+void nested_evmcs_filter_control_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
int nested_evmcs_check_controls(struct vmcs12 *vmcs12);
#endif /* __KVM_X86_VMX_EVMCS_H */
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ddd4367d4826..8f67a9c4a287 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -439,61 +439,22 @@ static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
return inequality ^ bit;
}
-
-/*
- * KVM wants to inject page-faults which it got to the guest. This function
- * checks whether in a nested guest, we need to inject them to L1 or L2.
- */
-static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
+static bool nested_vmx_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
+ u32 error_code)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- unsigned int nr = vcpu->arch.exception.nr;
- bool has_payload = vcpu->arch.exception.has_payload;
- unsigned long payload = vcpu->arch.exception.payload;
-
- if (nr == PF_VECTOR) {
- if (vcpu->arch.exception.nested_apf) {
- *exit_qual = vcpu->arch.apf.nested_apf_token;
- return 1;
- }
- if (nested_vmx_is_page_fault_vmexit(vmcs12,
- vcpu->arch.exception.error_code)) {
- *exit_qual = has_payload ? payload : vcpu->arch.cr2;
- return 1;
- }
- } else if (vmcs12->exception_bitmap & (1u << nr)) {
- if (nr == DB_VECTOR) {
- if (!has_payload) {
- payload = vcpu->arch.dr6;
- payload &= ~DR6_BT;
- payload ^= DR6_ACTIVE_LOW;
- }
- *exit_qual = payload;
- } else
- *exit_qual = 0;
- return 1;
- }
- return 0;
-}
-
-static bool nested_vmx_handle_page_fault_workaround(struct kvm_vcpu *vcpu,
- struct x86_exception *fault)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- WARN_ON(!is_guest_mode(vcpu));
+ /*
+ * Drop bits 31:16 of the error code when performing the #PF mask+match
+ * check. All VMCS fields involved are 32 bits, but Intel CPUs never
+ * set bits 31:16 and VMX disallows setting bits 31:16 in the injected
+ * error code. Including the to-be-dropped bits in the check might
+ * result in an "impossible" or missed exit from L1's perspective.
+ */
+ if (vector == PF_VECTOR)
+ return nested_vmx_is_page_fault_vmexit(vmcs12, (u16)error_code);
- if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
- !WARN_ON_ONCE(to_vmx(vcpu)->nested.nested_run_pending)) {
- vmcs12->vm_exit_intr_error_code = fault->error_code;
- nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
- PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
- INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
- fault->address);
- return true;
- }
- return false;
+ return (vmcs12->exception_bitmap & (1u << vector));
}
static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
@@ -1607,6 +1568,10 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields
vmcs12->guest_rflags = evmcs->guest_rflags;
vmcs12->guest_interruptibility_info =
evmcs->guest_interruptibility_info;
+ /*
+ * Not present in struct vmcs12:
+ * vmcs12->guest_ssp = evmcs->guest_ssp;
+ */
}
if (unlikely(!(hv_clean_fields &
@@ -1653,6 +1618,13 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields
vmcs12->host_fs_selector = evmcs->host_fs_selector;
vmcs12->host_gs_selector = evmcs->host_gs_selector;
vmcs12->host_tr_selector = evmcs->host_tr_selector;
+ vmcs12->host_ia32_perf_global_ctrl = evmcs->host_ia32_perf_global_ctrl;
+ /*
+ * Not present in struct vmcs12:
+ * vmcs12->host_ia32_s_cet = evmcs->host_ia32_s_cet;
+ * vmcs12->host_ssp = evmcs->host_ssp;
+ * vmcs12->host_ia32_int_ssp_table_addr = evmcs->host_ia32_int_ssp_table_addr;
+ */
}
if (unlikely(!(hv_clean_fields &
@@ -1720,6 +1692,8 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields
vmcs12->tsc_offset = evmcs->tsc_offset;
vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
+ vmcs12->encls_exiting_bitmap = evmcs->encls_exiting_bitmap;
+ vmcs12->tsc_multiplier = evmcs->tsc_multiplier;
}
if (unlikely(!(hv_clean_fields &
@@ -1767,6 +1741,13 @@ static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields
vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
vmcs12->guest_activity_state = evmcs->guest_activity_state;
vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
+ vmcs12->guest_ia32_perf_global_ctrl = evmcs->guest_ia32_perf_global_ctrl;
+ /*
+ * Not present in struct vmcs12:
+ * vmcs12->guest_ia32_s_cet = evmcs->guest_ia32_s_cet;
+ * vmcs12->guest_ia32_lbr_ctl = evmcs->guest_ia32_lbr_ctl;
+ * vmcs12->guest_ia32_int_ssp_table_addr = evmcs->guest_ia32_int_ssp_table_addr;
+ */
}
/*
@@ -1869,12 +1850,23 @@ static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
* evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
* evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
* evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
+ * evmcs->guest_ia32_perf_global_ctrl = vmcs12->guest_ia32_perf_global_ctrl;
+ * evmcs->host_ia32_perf_global_ctrl = vmcs12->host_ia32_perf_global_ctrl;
+ * evmcs->encls_exiting_bitmap = vmcs12->encls_exiting_bitmap;
+ * evmcs->tsc_multiplier = vmcs12->tsc_multiplier;
*
* Not present in struct vmcs12:
* evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
* evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
* evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
* evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
+ * evmcs->host_ia32_s_cet = vmcs12->host_ia32_s_cet;
+ * evmcs->host_ssp = vmcs12->host_ssp;
+ * evmcs->host_ia32_int_ssp_table_addr = vmcs12->host_ia32_int_ssp_table_addr;
+ * evmcs->guest_ia32_s_cet = vmcs12->guest_ia32_s_cet;
+ * evmcs->guest_ia32_lbr_ctl = vmcs12->guest_ia32_lbr_ctl;
+ * evmcs->guest_ia32_int_ssp_table_addr = vmcs12->guest_ia32_int_ssp_table_addr;
+ * evmcs->guest_ssp = vmcs12->guest_ssp;
*/
evmcs->guest_es_selector = vmcs12->guest_es_selector;
@@ -1982,7 +1974,7 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
bool evmcs_gpa_changed = false;
u64 evmcs_gpa;
- if (likely(!vmx->nested.enlightened_vmcs_enabled))
+ if (likely(!guest_cpuid_has_evmcs(vcpu)))
return EVMPTRLD_DISABLED;
if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) {
@@ -2328,9 +2320,14 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs0
* are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
* on the related bits (if supported by the CPU) in the hope that
* we can avoid VMWrites during vmx_set_efer().
+ *
+ * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
+ * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to
+ * do the same for L2.
*/
exec_control = __vm_entry_controls_get(vmcs01);
- exec_control |= vmcs12->vm_entry_controls;
+ exec_control |= (vmcs12->vm_entry_controls &
+ ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
if (cpu_has_load_ia32_efer()) {
if (guest_efer & EFER_LMA)
@@ -2863,7 +2860,7 @@ static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
nested_check_vm_entry_controls(vcpu, vmcs12))
return -EINVAL;
- if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled)
+ if (guest_cpuid_has_evmcs(vcpu))
return nested_evmcs_check_controls(vmcs12);
return 0;
@@ -3145,7 +3142,7 @@ static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
* L2 was running), map it here to make sure vmcs12 changes are
* properly reflected.
*/
- if (vmx->nested.enlightened_vmcs_enabled &&
+ if (guest_cpuid_has_evmcs(vcpu) &&
vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
enum nested_evmptrld_status evmptrld_status =
nested_vmx_handle_enlightened_vmptrld(vcpu, false);
@@ -3364,12 +3361,24 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
};
u32 failed_index;
+ trace_kvm_nested_vmenter(kvm_rip_read(vcpu),
+ vmx->nested.current_vmptr,
+ vmcs12->guest_rip,
+ vmcs12->guest_intr_status,
+ vmcs12->vm_entry_intr_info_field,
+ vmcs12->secondary_vm_exec_control & SECONDARY_EXEC_ENABLE_EPT,
+ vmcs12->ept_pointer,
+ vmcs12->guest_cr3,
+ KVM_ISA_VMX);
+
kvm_service_local_tlb_flush_requests(vcpu);
evaluate_pending_interrupts = exec_controls_get(vmx) &
(CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
+ if (!evaluate_pending_interrupts)
+ evaluate_pending_interrupts |= kvm_apic_has_pending_init_or_sipi(vcpu);
if (!vmx->nested.nested_run_pending ||
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
@@ -3450,18 +3459,10 @@ enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
}
/*
- * If L1 had a pending IRQ/NMI until it executed
- * VMLAUNCH/VMRESUME which wasn't delivered because it was
- * disallowed (e.g. interrupts disabled), L0 needs to
- * evaluate if this pending event should cause an exit from L2
- * to L1 or delivered directly to L2 (e.g. In case L1 don't
- * intercept EXTERNAL_INTERRUPT).
- *
- * Usually this would be handled by the processor noticing an
- * IRQ/NMI window request, or checking RVI during evaluation of
- * pending virtual interrupts. However, this setting was done
- * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
- * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
+ * Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI
+ * when it executed VMLAUNCH/VMRESUME, as entering non-root mode can
+ * effectively unblock various events, e.g. INIT/SIPI cause VM-Exit
+ * unconditionally.
*/
if (unlikely(evaluate_pending_interrupts))
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -3718,7 +3719,7 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
is_double_fault(exit_intr_info))) {
vmcs12->idt_vectoring_info_field = 0;
} else if (vcpu->arch.exception.injected) {
- nr = vcpu->arch.exception.nr;
+ nr = vcpu->arch.exception.vector;
idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
if (kvm_exception_is_soft(nr)) {
@@ -3819,19 +3820,40 @@ mmio_needed:
return -ENXIO;
}
-static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
- unsigned long exit_qual)
+static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
{
+ struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
+ u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- unsigned int nr = vcpu->arch.exception.nr;
- u32 intr_info = nr | INTR_INFO_VALID_MASK;
+ unsigned long exit_qual;
+
+ if (ex->has_payload) {
+ exit_qual = ex->payload;
+ } else if (ex->vector == PF_VECTOR) {
+ exit_qual = vcpu->arch.cr2;
+ } else if (ex->vector == DB_VECTOR) {
+ exit_qual = vcpu->arch.dr6;
+ exit_qual &= ~DR6_BT;
+ exit_qual ^= DR6_ACTIVE_LOW;
+ } else {
+ exit_qual = 0;
+ }
- if (vcpu->arch.exception.has_error_code) {
- vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
+ if (ex->has_error_code) {
+ /*
+ * Intel CPUs do not generate error codes with bits 31:16 set,
+ * and more importantly VMX disallows setting bits 31:16 in the
+ * injected error code for VM-Entry. Drop the bits to mimic
+ * hardware and avoid inducing failure on nested VM-Entry if L1
+ * chooses to inject the exception back to L2. AMD CPUs _do_
+ * generate "full" 32-bit error codes, so KVM allows userspace
+ * to inject exception error codes with bits 31:16 set.
+ */
+ vmcs12->vm_exit_intr_error_code = (u16)ex->error_code;
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
}
- if (kvm_exception_is_soft(nr))
+ if (kvm_exception_is_soft(ex->vector))
intr_info |= INTR_TYPE_SOFT_EXCEPTION;
else
intr_info |= INTR_TYPE_HARD_EXCEPTION;
@@ -3844,16 +3866,39 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
}
/*
- * Returns true if a debug trap is pending delivery.
+ * Returns true if a debug trap is (likely) pending delivery. Infer the class
+ * of a #DB (trap-like vs. fault-like) from the exception payload (to-be-DR6).
+ * Using the payload is flawed because code breakpoints (fault-like) and data
+ * breakpoints (trap-like) set the same bits in DR6 (breakpoint detected), i.e.
+ * this will return false positives if a to-be-injected code breakpoint #DB is
+ * pending (from KVM's perspective, but not "pending" across an instruction
+ * boundary). ICEBP, a.k.a. INT1, is also not reflected here even though it
+ * too is trap-like.
*
- * In KVM, debug traps bear an exception payload. As such, the class of a #DB
- * exception may be inferred from the presence of an exception payload.
+ * KVM "works" despite these flaws as ICEBP isn't currently supported by the
+ * emulator, Monitor Trap Flag is not marked pending on intercepted #DBs (the
+ * #DB has already happened), and MTF isn't marked pending on code breakpoints
+ * from the emulator (because such #DBs are fault-like and thus don't trigger
+ * actions that fire on instruction retire).
+ */
+static unsigned long vmx_get_pending_dbg_trap(struct kvm_queued_exception *ex)
+{
+ if (!ex->pending || ex->vector != DB_VECTOR)
+ return 0;
+
+ /* General Detect #DBs are always fault-like. */
+ return ex->payload & ~DR6_BD;
+}
+
+/*
+ * Returns true if there's a pending #DB exception that is lower priority than
+ * a pending Monitor Trap Flag VM-Exit. TSS T-flag #DBs are not emulated by
+ * KVM, but could theoretically be injected by userspace. Note, this code is
+ * imperfect, see above.
*/
-static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
+static bool vmx_is_low_priority_db_trap(struct kvm_queued_exception *ex)
{
- return vcpu->arch.exception.pending &&
- vcpu->arch.exception.nr == DB_VECTOR &&
- vcpu->arch.exception.payload;
+ return vmx_get_pending_dbg_trap(ex) & ~DR6_BT;
}
/*
@@ -3865,9 +3910,11 @@ static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
*/
static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
{
- if (vmx_pending_dbg_trap(vcpu))
- vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
- vcpu->arch.exception.payload);
+ unsigned long pending_dbg;
+
+ pending_dbg = vmx_get_pending_dbg_trap(&vcpu->arch.exception);
+ if (pending_dbg)
+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, pending_dbg);
}
static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
@@ -3876,21 +3923,113 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
to_vmx(vcpu)->nested.preemption_timer_expired;
}
+static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
+{
+ return nested_vmx_preemption_timer_pending(vcpu) ||
+ to_vmx(vcpu)->nested.mtf_pending;
+}
+
+/*
+ * Per the Intel SDM's table "Priority Among Concurrent Events", with minor
+ * edits to fill in missing examples, e.g. #DB due to split-lock accesses,
+ * and less minor edits to splice in the priority of VMX Non-Root specific
+ * events, e.g. MTF and NMI/INTR-window exiting.
+ *
+ * 1 Hardware Reset and Machine Checks
+ * - RESET
+ * - Machine Check
+ *
+ * 2 Trap on Task Switch
+ * - T flag in TSS is set (on task switch)
+ *
+ * 3 External Hardware Interventions
+ * - FLUSH
+ * - STOPCLK
+ * - SMI
+ * - INIT
+ *
+ * 3.5 Monitor Trap Flag (MTF) VM-exit[1]
+ *
+ * 4 Traps on Previous Instruction
+ * - Breakpoints
+ * - Trap-class Debug Exceptions (#DB due to TF flag set, data/I-O
+ * breakpoint, or #DB due to a split-lock access)
+ *
+ * 4.3 VMX-preemption timer expired VM-exit
+ *
+ * 4.6 NMI-window exiting VM-exit[2]
+ *
+ * 5 Nonmaskable Interrupts (NMI)
+ *
+ * 5.5 Interrupt-window exiting VM-exit and Virtual-interrupt delivery
+ *
+ * 6 Maskable Hardware Interrupts
+ *
+ * 7 Code Breakpoint Fault
+ *
+ * 8 Faults from Fetching Next Instruction
+ * - Code-Segment Limit Violation
+ * - Code Page Fault
+ * - Control protection exception (missing ENDBRANCH at target of indirect
+ * call or jump)
+ *
+ * 9 Faults from Decoding Next Instruction
+ * - Instruction length > 15 bytes
+ * - Invalid Opcode
+ * - Coprocessor Not Available
+ *
+ *10 Faults on Executing Instruction
+ * - Overflow
+ * - Bound error
+ * - Invalid TSS
+ * - Segment Not Present
+ * - Stack fault
+ * - General Protection
+ * - Data Page Fault
+ * - Alignment Check
+ * - x86 FPU Floating-point exception
+ * - SIMD floating-point exception
+ * - Virtualization exception
+ * - Control protection exception
+ *
+ * [1] Per the "Monitor Trap Flag" section: System-management interrupts (SMIs),
+ * INIT signals, and higher priority events take priority over MTF VM exits.
+ * MTF VM exits take priority over debug-trap exceptions and lower priority
+ * events.
+ *
+ * [2] Debug-trap exceptions and higher priority events take priority over VM exits
+ * caused by the VMX-preemption timer. VM exits caused by the VMX-preemption
+ * timer take priority over VM exits caused by the "NMI-window exiting"
+ * VM-execution control and lower priority events.
+ *
+ * [3] Debug-trap exceptions and higher priority events take priority over VM exits
+ * caused by "NMI-window exiting". VM exits caused by this control take
+ * priority over non-maskable interrupts (NMIs) and lower priority events.
+ *
+ * [4] Virtual-interrupt delivery has the same priority as that of VM exits due to
+ * the 1-setting of the "interrupt-window exiting" VM-execution control. Thus,
+ * non-maskable interrupts (NMIs) and higher priority events take priority over
+ * delivery of a virtual interrupt; delivery of a virtual interrupt takes
+ * priority over external interrupts and lower priority events.
+ */
static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long exit_qual;
- bool block_nested_events =
- vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
- bool mtf_pending = vmx->nested.mtf_pending;
struct kvm_lapic *apic = vcpu->arch.apic;
-
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
- * Clear the MTF state. If a higher priority VM-exit is delivered first,
- * this state is discarded.
+ * Only a pending nested run blocks a pending exception. If there is a
+ * previously injected event, the pending exception occurred while said
+ * event was being delivered and thus needs to be handled.
*/
- if (!block_nested_events)
- vmx->nested.mtf_pending = false;
+ bool block_nested_exceptions = vmx->nested.nested_run_pending;
+ /*
+ * New events (not exceptions) are only recognized at instruction
+ * boundaries. If an event needs reinjection, then KVM is handling a
+ * VM-Exit that occurred _during_ instruction execution; new events are
+ * blocked until the instruction completes.
+ */
+ bool block_nested_events = block_nested_exceptions ||
+ kvm_event_needs_reinjection(vcpu);
if (lapic_in_kernel(vcpu) &&
test_bit(KVM_APIC_INIT, &apic->pending_events)) {
@@ -3900,6 +4039,9 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
clear_bit(KVM_APIC_INIT, &apic->pending_events);
if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED)
nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
+
+ /* MTF is discarded if the vCPU is in WFS. */
+ vmx->nested.mtf_pending = false;
return 0;
}
@@ -3909,31 +4051,41 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
return -EBUSY;
clear_bit(KVM_APIC_SIPI, &apic->pending_events);
- if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
+ if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0,
apic->sipi_vector & 0xFFUL);
- return 0;
+ return 0;
+ }
+ /* Fallthrough, the SIPI is completely ignored. */
}
/*
- * Process any exceptions that are not debug traps before MTF.
+ * Process exceptions that are higher priority than Monitor Trap Flag:
+ * fault-like exceptions, TSS T flag #DB (not emulated by KVM, but
+ * could theoretically come in from userspace), and ICEBP (INT1).
*
- * Note that only a pending nested run can block a pending exception.
- * Otherwise an injected NMI/interrupt should either be
- * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO,
- * while delivering the pending exception.
+ * TODO: SMIs have higher priority than MTF and trap-like #DBs (except
+ * for TSS T flag #DBs). KVM also doesn't save/restore pending MTF
+ * across SMI/RSM as it should; that needs to be addressed in order to
+ * prioritize SMI over MTF and trap-like #DBs.
*/
-
- if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) {
- if (vmx->nested.nested_run_pending)
+ if (vcpu->arch.exception_vmexit.pending &&
+ !vmx_is_low_priority_db_trap(&vcpu->arch.exception_vmexit)) {
+ if (block_nested_exceptions)
return -EBUSY;
- if (!nested_vmx_check_exception(vcpu, &exit_qual))
- goto no_vmexit;
- nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
+
+ nested_vmx_inject_exception_vmexit(vcpu);
return 0;
}
- if (mtf_pending) {
+ if (vcpu->arch.exception.pending &&
+ !vmx_is_low_priority_db_trap(&vcpu->arch.exception)) {
+ if (block_nested_exceptions)
+ return -EBUSY;
+ goto no_vmexit;
+ }
+
+ if (vmx->nested.mtf_pending) {
if (block_nested_events)
return -EBUSY;
nested_vmx_update_pending_dbg(vcpu);
@@ -3941,15 +4093,20 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
return 0;
}
- if (vcpu->arch.exception.pending) {
- if (vmx->nested.nested_run_pending)
+ if (vcpu->arch.exception_vmexit.pending) {
+ if (block_nested_exceptions)
return -EBUSY;
- if (!nested_vmx_check_exception(vcpu, &exit_qual))
- goto no_vmexit;
- nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
+
+ nested_vmx_inject_exception_vmexit(vcpu);
return 0;
}
+ if (vcpu->arch.exception.pending) {
+ if (block_nested_exceptions)
+ return -EBUSY;
+ goto no_vmexit;
+ }
+
if (nested_vmx_preemption_timer_pending(vcpu)) {
if (block_nested_events)
return -EBUSY;
@@ -4255,14 +4412,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
nested_vmx_abort(vcpu,
VMX_ABORT_SAVE_GUEST_MSR_FAIL);
}
-
- /*
- * Drop what we picked up for L2 via vmx_complete_interrupts. It is
- * preserved above and would only end up incorrectly in L1.
- */
- vcpu->arch.nmi_injected = false;
- kvm_clear_exception_queue(vcpu);
- kvm_clear_interrupt_queue(vcpu);
}
/*
@@ -4538,6 +4687,9 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ /* Pending MTF traps are discarded on VM-Exit. */
+ vmx->nested.mtf_pending = false;
+
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
@@ -4602,6 +4754,17 @@ void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
WARN_ON_ONCE(nested_early_check);
}
+ /*
+ * Drop events/exceptions that were queued for re-injection to L2
+ * (picked up via vmx_complete_interrupts()), as well as exceptions
+ * that were pending for L2. Note, this must NOT be hoisted above
+ * prepare_vmcs12(), events/exceptions queued for re-injection need to
+ * be captured in vmcs12 (see vmcs12_save_pending_event()).
+ */
+ vcpu->arch.nmi_injected = false;
+ kvm_clear_exception_queue(vcpu);
+ kvm_clear_interrupt_queue(vcpu);
+
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
/* Update any VMCS fields that might have changed while L2 ran */
@@ -5030,8 +5193,8 @@ static int handle_vmxoff(struct kvm_vcpu *vcpu)
free_nested(vcpu);
- /* Process a latched INIT during time CPU was in VMX operation */
- kvm_make_request(KVM_REQ_EVENT, vcpu);
+ if (kvm_apic_has_pending_init_or_sipi(vcpu))
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
return nested_vmx_succeed(vcpu);
}
@@ -5067,7 +5230,7 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
* state. It is possible that the area will stay mapped as
* vmx->nested.hv_evmcs but this shouldn't be a problem.
*/
- if (likely(!vmx->nested.enlightened_vmcs_enabled ||
+ if (likely(!guest_cpuid_has_evmcs(vcpu) ||
!nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
if (vmptr == vmx->nested.current_vmptr)
nested_release_vmcs12(vcpu);
@@ -6463,6 +6626,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (ret)
goto error_guest_mode;
+ if (vmx->nested.mtf_pending)
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+
return 0;
error_guest_mode:
@@ -6522,8 +6688,10 @@ static u64 nested_vmx_calc_vmcs_enum_msr(void)
* bit in the high half is on if the corresponding bit in the control field
* may be on. See also vmx_control_verify().
*/
-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
+void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps)
{
+ struct nested_vmx_msrs *msrs = &vmcs_conf->nested;
+
/*
* Note that as a general rule, the high half of the MSRs (bits in
* the control fields which may be 1) should be initialized by the
@@ -6540,11 +6708,10 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
*/
/* pin-based controls */
- rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
- msrs->pinbased_ctls_low,
- msrs->pinbased_ctls_high);
- msrs->pinbased_ctls_low |=
+ msrs->pinbased_ctls_low =
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
+
+ msrs->pinbased_ctls_high = vmcs_conf->pin_based_exec_ctrl;
msrs->pinbased_ctls_high &=
PIN_BASED_EXT_INTR_MASK |
PIN_BASED_NMI_EXITING |
@@ -6555,50 +6722,47 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
PIN_BASED_VMX_PREEMPTION_TIMER;
/* exit controls */
- rdmsr(MSR_IA32_VMX_EXIT_CTLS,
- msrs->exit_ctls_low,
- msrs->exit_ctls_high);
msrs->exit_ctls_low =
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
+ msrs->exit_ctls_high = vmcs_conf->vmexit_ctrl;
msrs->exit_ctls_high &=
#ifdef CONFIG_X86_64
VM_EXIT_HOST_ADDR_SPACE_SIZE |
#endif
VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
- VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+ VM_EXIT_CLEAR_BNDCFGS;
msrs->exit_ctls_high |=
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
- VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
+ VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT |
+ VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
/* We support free control of debug control saving. */
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
/* entry controls */
- rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
- msrs->entry_ctls_low,
- msrs->entry_ctls_high);
msrs->entry_ctls_low =
VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
+
+ msrs->entry_ctls_high = vmcs_conf->vmentry_ctrl;
msrs->entry_ctls_high &=
#ifdef CONFIG_X86_64
VM_ENTRY_IA32E_MODE |
#endif
- VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS |
- VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
msrs->entry_ctls_high |=
- (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
+ (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER |
+ VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
/* We support free control of debug control loading. */
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
/* cpu-based controls */
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
- msrs->procbased_ctls_low,
- msrs->procbased_ctls_high);
msrs->procbased_ctls_low =
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
+
+ msrs->procbased_ctls_high = vmcs_conf->cpu_based_exec_ctrl;
msrs->procbased_ctls_high &=
CPU_BASED_INTR_WINDOW_EXITING |
CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
@@ -6632,12 +6796,9 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
* depend on CPUID bits, they are added later by
* vmx_vcpu_after_set_cpuid.
*/
- if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
- msrs->secondary_ctls_low,
- msrs->secondary_ctls_high);
-
msrs->secondary_ctls_low = 0;
+
+ msrs->secondary_ctls_high = vmcs_conf->cpu_based_2nd_exec_ctrl;
msrs->secondary_ctls_high &=
SECONDARY_EXEC_DESC |
SECONDARY_EXEC_ENABLE_RDTSCP |
@@ -6717,10 +6878,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING;
/* miscellaneous data */
- rdmsr(MSR_IA32_VMX_MISC,
- msrs->misc_low,
- msrs->misc_high);
- msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
+ msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA;
msrs->misc_low |=
MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
@@ -6814,9 +6972,9 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
struct kvm_x86_nested_ops vmx_nested_ops = {
.leave_nested = vmx_leave_nested,
+ .is_exception_vmexit = nested_vmx_is_exception_vmexit,
.check_events = vmx_check_nested_events,
- .handle_page_fault_workaround = nested_vmx_handle_page_fault_workaround,
- .hv_timer_pending = nested_vmx_preemption_timer_pending,
+ .has_events = vmx_has_nested_events,
.triple_fault = nested_vmx_triple_fault,
.get_state = vmx_get_nested_state,
.set_state = vmx_set_nested_state,
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index 88b00a7359e4..6312c9541c3c 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -17,7 +17,7 @@ enum nvmx_vmentry_status {
};
void vmx_leave_nested(struct kvm_vcpu *vcpu);
-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
+void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps);
void nested_vmx_hardware_unsetup(void);
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
void nested_vmx_set_vmcs_shadowing_bitmap(void);
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index c399637a3a79..25b70a85bef5 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -68,15 +68,11 @@ static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
}
}
-/* function is called when global control register has been updated. */
-static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
+static void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
{
int bit;
- u64 diff = pmu->global_ctrl ^ data;
struct kvm_pmc *pmc;
- pmu->global_ctrl = data;
-
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
pmc = intel_pmc_idx_to_pmc(pmu, bit);
if (pmc)
@@ -397,7 +393,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
struct kvm_pmc *pmc;
u32 msr = msr_info->index;
u64 data = msr_info->data;
- u64 reserved_bits;
+ u64 reserved_bits, diff;
switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL:
@@ -418,7 +414,9 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (pmu->global_ctrl == data)
return 0;
if (kvm_valid_perf_global_ctrl(pmu, data)) {
- global_ctrl_changed(pmu, data);
+ diff = pmu->global_ctrl ^ data;
+ pmu->global_ctrl = data;
+ reprogram_counters(pmu, diff);
return 0;
}
break;
@@ -433,7 +431,9 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (pmu->pebs_enable == data)
return 0;
if (!(data & pmu->pebs_enable_mask)) {
+ diff = pmu->pebs_enable ^ data;
pmu->pebs_enable = data;
+ reprogram_counters(pmu, diff);
return 0;
}
break;
@@ -776,20 +776,23 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
{
struct kvm_pmc *pmc = NULL;
- int bit;
+ int bit, hw_idx;
for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
X86_PMC_IDX_MAX) {
pmc = intel_pmc_idx_to_pmc(pmu, bit);
if (!pmc || !pmc_speculative_in_use(pmc) ||
- !intel_pmc_is_enabled(pmc))
+ !intel_pmc_is_enabled(pmc) || !pmc->perf_event)
continue;
- if (pmc->perf_event && pmc->idx != pmc->perf_event->hw.idx) {
- pmu->host_cross_mapped_mask |=
- BIT_ULL(pmc->perf_event->hw.idx);
- }
+ /*
+ * A negative index indicates the event isn't mapped to a
+ * physical counter in the host, e.g. due to contention.
+ */
+ hw_idx = pmc->perf_event->hw.idx;
+ if (hw_idx != pmc->idx && hw_idx > -1)
+ pmu->host_cross_mapped_mask |= BIT_ULL(hw_idx);
}
}
diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c
index aba8cebdc587..8f95c7c01433 100644
--- a/arch/x86/kvm/vmx/sgx.c
+++ b/arch/x86/kvm/vmx/sgx.c
@@ -129,7 +129,7 @@ static int sgx_inject_fault(struct kvm_vcpu *vcpu, gva_t gva, int trapnr)
ex.address = gva;
ex.error_code_valid = true;
ex.nested_page_fault = false;
- kvm_inject_page_fault(vcpu, &ex);
+ kvm_inject_emulated_page_fault(vcpu, &ex);
} else {
kvm_inject_gp(vcpu, 0);
}
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 6de96b943804..8477d8bdd69c 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -189,13 +189,16 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
xor %ebx, %ebx
.Lclear_regs:
+ /* Discard @regs. The register is irrelevant, it just can't be RBX. */
+ pop %_ASM_AX
+
/*
* Clear all general purpose registers except RSP and RBX to prevent
* speculative use of the guest's values, even those that are reloaded
* via the stack. In theory, an L1 cache miss when restoring registers
* could lead to speculative execution with the guest's values.
* Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
- * free. RSP and RAX are exempt as RSP is restored by hardware during
+ * free. RSP and RBX are exempt as RSP is restored by hardware during
* VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
* value.
*/
@@ -216,9 +219,6 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
xor %r15d, %r15d
#endif
- /* "POP" @regs. */
- add $WORD_SIZE, %_ASM_SP
-
/*
* IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
* the first unbalanced RET after vmexit!
@@ -234,7 +234,6 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\
X86_FEATURE_RSB_VMEXIT_LITE
-
pop %_ASM_ARG2 /* @flags */
pop %_ASM_ARG1 /* @vmx */
@@ -293,22 +292,13 @@ SYM_FUNC_START(vmread_error_trampoline)
push %r10
push %r11
#endif
-#ifdef CONFIG_X86_64
+
/* Load @field and @fault to arg1 and arg2 respectively. */
- mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
- mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
-#else
- /* Parameters are passed on the stack for 32-bit (see asmlinkage). */
- push 3*WORD_SIZE(%ebp)
- push 2*WORD_SIZE(%ebp)
-#endif
+ mov 3*WORD_SIZE(%_ASM_BP), %_ASM_ARG2
+ mov 2*WORD_SIZE(%_ASM_BP), %_ASM_ARG1
call vmread_error
-#ifndef CONFIG_X86_64
- add $8, %esp
-#endif
-
/* Zero out @fault, which will be popped into the result register. */
_ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d7f8331d6f7e..9dba04b6b019 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -439,7 +439,7 @@ do { \
pr_warn_ratelimited(fmt); \
} while (0)
-asmlinkage void vmread_error(unsigned long field, bool fault)
+void vmread_error(unsigned long field, bool fault)
{
if (fault)
kvm_spurious_fault();
@@ -843,8 +843,7 @@ static bool msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr)
if (!(exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS))
return true;
- return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap,
- MSR_IA32_SPEC_CTRL);
+ return vmx_test_msr_bitmap_write(vmx->loaded_vmcs->msr_bitmap, msr);
}
unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
@@ -865,7 +864,7 @@ unsigned int __vmx_vcpu_run_flags(struct vcpu_vmx *vmx)
return flags;
}
-static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+static __always_inline void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long entry, unsigned long exit)
{
vm_entry_controls_clearbit(vmx, entry);
@@ -923,7 +922,7 @@ skip_guest:
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
}
-static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long entry, unsigned long exit,
unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
u64 guest_val, u64 host_val)
@@ -1653,17 +1652,25 @@ static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
/*
* Per the SDM, MTF takes priority over debug-trap exceptions besides
- * T-bit traps. As instruction emulation is completed (i.e. at the
- * instruction boundary), any #DB exception pending delivery must be a
- * debug-trap. Record the pending MTF state to be delivered in
+ * TSS T-bit traps and ICEBP (INT1). KVM doesn't emulate T-bit traps
+ * or ICEBP (in the emulator proper), and skipping of ICEBP after an
+ * intercepted #DB deliberately avoids single-step #DB and MTF updates
+ * as ICEBP is higher priority than both. As instruction emulation is
+ * completed at this point (i.e. KVM is at the instruction boundary),
+ * any #DB exception pending delivery must be a debug-trap of lower
+ * priority than MTF. Record the pending MTF state to be delivered in
* vmx_check_nested_events().
*/
if (nested_cpu_has_mtf(vmcs12) &&
(!vcpu->arch.exception.pending ||
- vcpu->arch.exception.nr == DB_VECTOR))
+ vcpu->arch.exception.vector == DB_VECTOR) &&
+ (!vcpu->arch.exception_vmexit.pending ||
+ vcpu->arch.exception_vmexit.vector == DB_VECTOR)) {
vmx->nested.mtf_pending = true;
- else
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ } else {
vmx->nested.mtf_pending = false;
+ }
}
static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
@@ -1685,32 +1692,40 @@ static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
}
-static void vmx_queue_exception(struct kvm_vcpu *vcpu)
+static void vmx_inject_exception(struct kvm_vcpu *vcpu)
{
+ struct kvm_queued_exception *ex = &vcpu->arch.exception;
+ u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned nr = vcpu->arch.exception.nr;
- bool has_error_code = vcpu->arch.exception.has_error_code;
- u32 error_code = vcpu->arch.exception.error_code;
- u32 intr_info = nr | INTR_INFO_VALID_MASK;
- kvm_deliver_exception_payload(vcpu);
+ kvm_deliver_exception_payload(vcpu, ex);
- if (has_error_code) {
- vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
+ if (ex->has_error_code) {
+ /*
+ * Despite the error code being architecturally defined as 32
+ * bits, and the VMCS field being 32 bits, Intel CPUs and thus
+ * VMX don't actually supporting setting bits 31:16. Hardware
+ * will (should) never provide a bogus error code, but AMD CPUs
+ * do generate error codes with bits 31:16 set, and so KVM's
+ * ABI lets userspace shove in arbitrary 32-bit values. Drop
+ * the upper bits to avoid VM-Fail, losing information that
+ * does't really exist is preferable to killing the VM.
+ */
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, (u16)ex->error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
}
if (vmx->rmode.vm86_active) {
int inc_eip = 0;
- if (kvm_exception_is_soft(nr))
+ if (kvm_exception_is_soft(ex->vector))
inc_eip = vcpu->arch.event_exit_inst_len;
- kvm_inject_realmode_interrupt(vcpu, nr, inc_eip);
+ kvm_inject_realmode_interrupt(vcpu, ex->vector, inc_eip);
return;
}
WARN_ON_ONCE(vmx->emulation_required);
- if (kvm_exception_is_soft(nr)) {
+ if (kvm_exception_is_soft(ex->vector)) {
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
vmx->vcpu.arch.event_exit_inst_len);
intr_info |= INTR_TYPE_SOFT_EXCEPTION;
@@ -1931,9 +1946,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
* sanity checking and refuse to boot. Filter all unsupported
* features out.
*/
- if (!msr_info->host_initiated &&
- vmx->nested.enlightened_vmcs_enabled)
- nested_evmcs_filter_control_msr(msr_info->index,
+ if (!msr_info->host_initiated && guest_cpuid_has_evmcs(vcpu))
+ nested_evmcs_filter_control_msr(vcpu, msr_info->index,
&msr_info->data);
break;
case MSR_IA32_RTIT_CTL:
@@ -2495,6 +2509,30 @@ static bool cpu_has_sgx(void)
return cpuid_eax(0) >= 0x12 && (cpuid_eax(0x12) & BIT(0));
}
+/*
+ * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
+ * can't be used due to errata where VM Exit may incorrectly clear
+ * IA32_PERF_GLOBAL_CTRL[34:32]. Work around the errata by using the
+ * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
+ */
+static bool cpu_has_perf_global_ctrl_bug(void)
+{
+ if (boot_cpu_data.x86 == 0x6) {
+ switch (boot_cpu_data.x86_model) {
+ case INTEL_FAM6_NEHALEM_EP: /* AAK155 */
+ case INTEL_FAM6_NEHALEM: /* AAP115 */
+ case INTEL_FAM6_WESTMERE: /* AAT100 */
+ case INTEL_FAM6_WESTMERE_EP: /* BC86,AAY89,BD102 */
+ case INTEL_FAM6_NEHALEM_EX: /* BA97 */
+ return true;
+ default:
+ break;
+ }
+ }
+
+ return false;
+}
+
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
u32 msr, u32 *result)
{
@@ -2527,13 +2565,13 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
struct vmx_capability *vmx_cap)
{
u32 vmx_msr_low, vmx_msr_high;
- u32 min, opt, min2, opt2;
u32 _pin_based_exec_control = 0;
u32 _cpu_based_exec_control = 0;
u32 _cpu_based_2nd_exec_control = 0;
u64 _cpu_based_3rd_exec_control = 0;
u32 _vmexit_control = 0;
u32 _vmentry_control = 0;
+ u64 misc_msr;
int i;
/*
@@ -2553,64 +2591,17 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
};
memset(vmcs_conf, 0, sizeof(*vmcs_conf));
- min = CPU_BASED_HLT_EXITING |
-#ifdef CONFIG_X86_64
- CPU_BASED_CR8_LOAD_EXITING |
- CPU_BASED_CR8_STORE_EXITING |
-#endif
- CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_UNCOND_IO_EXITING |
- CPU_BASED_MOV_DR_EXITING |
- CPU_BASED_USE_TSC_OFFSETTING |
- CPU_BASED_MWAIT_EXITING |
- CPU_BASED_MONITOR_EXITING |
- CPU_BASED_INVLPG_EXITING |
- CPU_BASED_RDPMC_EXITING;
-
- opt = CPU_BASED_TPR_SHADOW |
- CPU_BASED_USE_MSR_BITMAPS |
- CPU_BASED_ACTIVATE_SECONDARY_CONTROLS |
- CPU_BASED_ACTIVATE_TERTIARY_CONTROLS;
- if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
- &_cpu_based_exec_control) < 0)
+
+ if (adjust_vmx_controls(KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL,
+ KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL,
+ MSR_IA32_VMX_PROCBASED_CTLS,
+ &_cpu_based_exec_control))
return -EIO;
-#ifdef CONFIG_X86_64
- if (_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)
- _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
- ~CPU_BASED_CR8_STORE_EXITING;
-#endif
if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
- min2 = 0;
- opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
- SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
- SECONDARY_EXEC_WBINVD_EXITING |
- SECONDARY_EXEC_ENABLE_VPID |
- SECONDARY_EXEC_ENABLE_EPT |
- SECONDARY_EXEC_UNRESTRICTED_GUEST |
- SECONDARY_EXEC_PAUSE_LOOP_EXITING |
- SECONDARY_EXEC_DESC |
- SECONDARY_EXEC_ENABLE_RDTSCP |
- SECONDARY_EXEC_ENABLE_INVPCID |
- SECONDARY_EXEC_APIC_REGISTER_VIRT |
- SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
- SECONDARY_EXEC_SHADOW_VMCS |
- SECONDARY_EXEC_XSAVES |
- SECONDARY_EXEC_RDSEED_EXITING |
- SECONDARY_EXEC_RDRAND_EXITING |
- SECONDARY_EXEC_ENABLE_PML |
- SECONDARY_EXEC_TSC_SCALING |
- SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
- SECONDARY_EXEC_PT_USE_GPA |
- SECONDARY_EXEC_PT_CONCEAL_VMX |
- SECONDARY_EXEC_ENABLE_VMFUNC |
- SECONDARY_EXEC_BUS_LOCK_DETECTION |
- SECONDARY_EXEC_NOTIFY_VM_EXITING;
- if (cpu_has_sgx())
- opt2 |= SECONDARY_EXEC_ENCLS_EXITING;
- if (adjust_vmx_controls(min2, opt2,
+ if (adjust_vmx_controls(KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL,
+ KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL,
MSR_IA32_VMX_PROCBASED_CTLS2,
- &_cpu_based_2nd_exec_control) < 0)
+ &_cpu_based_2nd_exec_control))
return -EIO;
}
#ifndef CONFIG_X86_64
@@ -2628,13 +2619,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP,
&vmx_cap->ept, &vmx_cap->vpid);
- if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
- /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
- enabled */
- _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_INVLPG_EXITING);
- } else if (vmx_cap->ept) {
+ if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
+ vmx_cap->ept) {
pr_warn_once("EPT CAP should not exist if not support "
"1-setting enable EPT VM-execution control\n");
@@ -2654,32 +2640,24 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
vmx_cap->vpid = 0;
}
- if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS) {
- u64 opt3 = TERTIARY_EXEC_IPI_VIRT;
+ if (!cpu_has_sgx())
+ _cpu_based_2nd_exec_control &= ~SECONDARY_EXEC_ENCLS_EXITING;
- _cpu_based_3rd_exec_control = adjust_vmx_controls64(opt3,
+ if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
+ _cpu_based_3rd_exec_control =
+ adjust_vmx_controls64(KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL,
MSR_IA32_VMX_PROCBASED_CTLS3);
- }
- min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT;
-#ifdef CONFIG_X86_64
- min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
-#endif
- opt = VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL |
- VM_EXIT_LOAD_IA32_PAT |
- VM_EXIT_LOAD_IA32_EFER |
- VM_EXIT_CLEAR_BNDCFGS |
- VM_EXIT_PT_CONCEAL_PIP |
- VM_EXIT_CLEAR_IA32_RTIT_CTL;
- if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
- &_vmexit_control) < 0)
+ if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_EXIT_CONTROLS,
+ KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS,
+ MSR_IA32_VMX_EXIT_CTLS,
+ &_vmexit_control))
return -EIO;
- min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
- opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
- PIN_BASED_VMX_PREEMPTION_TIMER;
- if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
- &_pin_based_exec_control) < 0)
+ if (adjust_vmx_controls(KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL,
+ KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL,
+ MSR_IA32_VMX_PINBASED_CTLS,
+ &_pin_based_exec_control))
return -EIO;
if (cpu_has_broken_vmx_preemption_timer())
@@ -2688,15 +2666,10 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY))
_pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
- min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
- opt = VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
- VM_ENTRY_LOAD_IA32_PAT |
- VM_ENTRY_LOAD_IA32_EFER |
- VM_ENTRY_LOAD_BNDCFGS |
- VM_ENTRY_PT_CONCEAL_PIP |
- VM_ENTRY_LOAD_IA32_RTIT_CTL;
- if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
- &_vmentry_control) < 0)
+ if (adjust_vmx_controls(KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS,
+ KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS,
+ MSR_IA32_VMX_ENTRY_CTLS,
+ &_vmentry_control))
return -EIO;
for (i = 0; i < ARRAY_SIZE(vmcs_entry_exit_pairs); i++) {
@@ -2716,30 +2689,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
_vmexit_control &= ~x_ctrl;
}
- /*
- * Some cpus support VM_{ENTRY,EXIT}_IA32_PERF_GLOBAL_CTRL but they
- * can't be used due to an errata where VM Exit may incorrectly clear
- * IA32_PERF_GLOBAL_CTRL[34:32]. Workaround the errata by using the
- * MSR load mechanism to switch IA32_PERF_GLOBAL_CTRL.
- */
- if (boot_cpu_data.x86 == 0x6) {
- switch (boot_cpu_data.x86_model) {
- case 26: /* AAK155 */
- case 30: /* AAP115 */
- case 37: /* AAT100 */
- case 44: /* BC86,AAY89,BD102 */
- case 46: /* BA97 */
- _vmentry_control &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
- _vmexit_control &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
- pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
- "does not work properly. Using workaround\n");
- break;
- default:
- break;
- }
- }
-
-
rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
@@ -2756,6 +2705,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
if (((vmx_msr_high >> 18) & 15) != 6)
return -EIO;
+ rdmsrl(MSR_IA32_VMX_MISC, misc_msr);
+
vmcs_conf->size = vmx_msr_high & 0x1fff;
vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
@@ -2767,11 +2718,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
vmcs_conf->cpu_based_3rd_exec_ctrl = _cpu_based_3rd_exec_control;
vmcs_conf->vmexit_ctrl = _vmexit_control;
vmcs_conf->vmentry_ctrl = _vmentry_control;
-
-#if IS_ENABLED(CONFIG_HYPERV)
- if (enlightened_vmcs)
- evmcs_sanitize_exec_ctrls(vmcs_conf);
-#endif
+ vmcs_conf->misc = misc_msr;
return 0;
}
@@ -3038,10 +2985,15 @@ int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
return 0;
vcpu->arch.efer = efer;
+#ifdef CONFIG_X86_64
if (efer & EFER_LMA)
vm_entry_controls_setbit(vmx, VM_ENTRY_IA32E_MODE);
else
vm_entry_controls_clearbit(vmx, VM_ENTRY_IA32E_MODE);
+#else
+ if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm))
+ return 1;
+#endif
vmx_setup_uret_msrs(vmx);
return 0;
@@ -4328,18 +4280,37 @@ static u32 vmx_vmentry_ctrl(void)
if (vmx_pt_mode_is_system())
vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
VM_ENTRY_LOAD_IA32_RTIT_CTL);
- /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
- return vmentry_ctrl &
- ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
+ /*
+ * IA32e mode, and loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically.
+ */
+ vmentry_ctrl &= ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL |
+ VM_ENTRY_LOAD_IA32_EFER |
+ VM_ENTRY_IA32E_MODE);
+
+ if (cpu_has_perf_global_ctrl_bug())
+ vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+
+ return vmentry_ctrl;
}
static u32 vmx_vmexit_ctrl(void)
{
u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
+ /*
+ * Not used by KVM and never set in vmcs01 or vmcs02, but emulated for
+ * nested virtualization and thus allowed to be set in vmcs12.
+ */
+ vmexit_ctrl &= ~(VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER |
+ VM_EXIT_SAVE_VMX_PREEMPTION_TIMER);
+
if (vmx_pt_mode_is_system())
vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
VM_EXIT_CLEAR_IA32_RTIT_CTL);
+
+ if (cpu_has_perf_global_ctrl_bug())
+ vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
+
/* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
return vmexit_ctrl &
~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
@@ -4377,20 +4348,38 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
{
u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
+ /*
+ * Not used by KVM, but fully supported for nesting, i.e. are allowed in
+ * vmcs12 and propagated to vmcs02 when set in vmcs12.
+ */
+ exec_control &= ~(CPU_BASED_RDTSC_EXITING |
+ CPU_BASED_USE_IO_BITMAPS |
+ CPU_BASED_MONITOR_TRAP_FLAG |
+ CPU_BASED_PAUSE_EXITING);
+
+ /* INTR_WINDOW_EXITING and NMI_WINDOW_EXITING are toggled dynamically */
+ exec_control &= ~(CPU_BASED_INTR_WINDOW_EXITING |
+ CPU_BASED_NMI_WINDOW_EXITING);
+
if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
exec_control &= ~CPU_BASED_MOV_DR_EXITING;
- if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
+ if (!cpu_need_tpr_shadow(&vmx->vcpu))
exec_control &= ~CPU_BASED_TPR_SHADOW;
+
#ifdef CONFIG_X86_64
+ if (exec_control & CPU_BASED_TPR_SHADOW)
+ exec_control &= ~(CPU_BASED_CR8_LOAD_EXITING |
+ CPU_BASED_CR8_STORE_EXITING);
+ else
exec_control |= CPU_BASED_CR8_STORE_EXITING |
CPU_BASED_CR8_LOAD_EXITING;
#endif
- }
- if (!enable_ept)
- exec_control |= CPU_BASED_CR3_STORE_EXITING |
- CPU_BASED_CR3_LOAD_EXITING |
- CPU_BASED_INVLPG_EXITING;
+ /* No need to intercept CR3 access or INVPLG when using EPT. */
+ if (enable_ept)
+ exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
+ CPU_BASED_CR3_STORE_EXITING |
+ CPU_BASED_INVLPG_EXITING);
if (kvm_mwait_in_guest(vmx->vcpu.kvm))
exec_control &= ~(CPU_BASED_MWAIT_EXITING |
CPU_BASED_MONITOR_EXITING);
@@ -5156,8 +5145,10 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
* instruction. ICEBP generates a trap-like #DB, but
* despite its interception control being tied to #DB,
* is an instruction intercept, i.e. the VM-Exit occurs
- * on the ICEBP itself. Note, skipping ICEBP also
- * clears STI and MOVSS blocking.
+ * on the ICEBP itself. Use the inner "skip" helper to
+ * avoid single-step #DB and MTF updates, as ICEBP is
+ * higher priority. Note, skipping ICEBP still clears
+ * STI and MOVSS blocking.
*
* For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS
* if single-step is enabled in RFLAGS and STI or MOVSS
@@ -5639,7 +5630,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
- trace_kvm_page_fault(gpa, exit_qualification);
+ trace_kvm_page_fault(vcpu, gpa, exit_qualification);
/* Is it a read fault? */
error_code = (exit_qualification & EPT_VIOLATION_ACC_READ)
@@ -5711,7 +5702,7 @@ static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
return vmx->emulation_required && !vmx->rmode.vm86_active &&
- (vcpu->arch.exception.pending || vcpu->arch.exception.injected);
+ (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
}
static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
@@ -7431,7 +7422,7 @@ static int __init vmx_check_processor_compat(void)
if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
return -EIO;
if (nested)
- nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept);
+ nested_vmx_setup_ctls_msrs(&vmcs_conf, vmx_cap.ept);
if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
smp_processor_id());
@@ -8071,7 +8062,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.patch_hypercall = vmx_patch_hypercall,
.inject_irq = vmx_inject_irq,
.inject_nmi = vmx_inject_nmi,
- .queue_exception = vmx_queue_exception,
+ .inject_exception = vmx_inject_exception,
.cancel_injection = vmx_cancel_injection,
.interrupt_allowed = vmx_interrupt_allowed,
.nmi_allowed = vmx_nmi_allowed,
@@ -8228,6 +8219,10 @@ static __init int hardware_setup(void)
if (setup_vmcs_config(&vmcs_config, &vmx_capability) < 0)
return -EIO;
+ if (cpu_has_perf_global_ctrl_bug())
+ pr_warn_once("kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
+ "does not work properly. Using workaround\n");
+
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
@@ -8342,11 +8337,9 @@ static __init int hardware_setup(void)
if (enable_preemption_timer) {
u64 use_timer_freq = 5000ULL * 1000 * 1000;
- u64 vmx_msr;
- rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
cpu_preemption_timer_multi =
- vmx_msr & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
+ vmcs_config.misc & VMX_MISC_PREEMPTION_TIMER_RATE_MASK;
if (tsc_khz)
use_timer_freq = (u64)tsc_khz * 1000;
@@ -8382,8 +8375,7 @@ static __init int hardware_setup(void)
setup_default_sgx_lepubkeyhash();
if (nested) {
- nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
- vmx_capability.ept);
+ nested_vmx_setup_ctls_msrs(&vmcs_config, vmx_capability.ept);
r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
if (r)
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 24d58c2ffaa3..a3da84f4ea45 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -477,29 +477,145 @@ static inline u8 vmx_get_rvi(void)
return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
}
-#define BUILD_CONTROLS_SHADOW(lname, uname, bits) \
-static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
-{ \
- if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
- vmcs_write##bits(uname, val); \
- vmx->loaded_vmcs->controls_shadow.lname = val; \
- } \
-} \
-static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
-{ \
- return vmcs->controls_shadow.lname; \
-} \
-static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
-{ \
- return __##lname##_controls_get(vmx->loaded_vmcs); \
-} \
-static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
-{ \
- lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
-} \
-static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
-{ \
- lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
+#define __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
+ (VM_ENTRY_LOAD_DEBUG_CONTROLS)
+#ifdef CONFIG_X86_64
+ #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
+ (__KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS | \
+ VM_ENTRY_IA32E_MODE)
+#else
+ #define KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS \
+ __KVM_REQUIRED_VMX_VM_ENTRY_CONTROLS
+#endif
+#define KVM_OPTIONAL_VMX_VM_ENTRY_CONTROLS \
+ (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
+ VM_ENTRY_LOAD_IA32_PAT | \
+ VM_ENTRY_LOAD_IA32_EFER | \
+ VM_ENTRY_LOAD_BNDCFGS | \
+ VM_ENTRY_PT_CONCEAL_PIP | \
+ VM_ENTRY_LOAD_IA32_RTIT_CTL)
+
+#define __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
+ (VM_EXIT_SAVE_DEBUG_CONTROLS | \
+ VM_EXIT_ACK_INTR_ON_EXIT)
+#ifdef CONFIG_X86_64
+ #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
+ (__KVM_REQUIRED_VMX_VM_EXIT_CONTROLS | \
+ VM_EXIT_HOST_ADDR_SPACE_SIZE)
+#else
+ #define KVM_REQUIRED_VMX_VM_EXIT_CONTROLS \
+ __KVM_REQUIRED_VMX_VM_EXIT_CONTROLS
+#endif
+#define KVM_OPTIONAL_VMX_VM_EXIT_CONTROLS \
+ (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
+ VM_EXIT_SAVE_IA32_PAT | \
+ VM_EXIT_LOAD_IA32_PAT | \
+ VM_EXIT_SAVE_IA32_EFER | \
+ VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | \
+ VM_EXIT_LOAD_IA32_EFER | \
+ VM_EXIT_CLEAR_BNDCFGS | \
+ VM_EXIT_PT_CONCEAL_PIP | \
+ VM_EXIT_CLEAR_IA32_RTIT_CTL)
+
+#define KVM_REQUIRED_VMX_PIN_BASED_VM_EXEC_CONTROL \
+ (PIN_BASED_EXT_INTR_MASK | \
+ PIN_BASED_NMI_EXITING)
+#define KVM_OPTIONAL_VMX_PIN_BASED_VM_EXEC_CONTROL \
+ (PIN_BASED_VIRTUAL_NMIS | \
+ PIN_BASED_POSTED_INTR | \
+ PIN_BASED_VMX_PREEMPTION_TIMER)
+
+#define __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
+ (CPU_BASED_HLT_EXITING | \
+ CPU_BASED_CR3_LOAD_EXITING | \
+ CPU_BASED_CR3_STORE_EXITING | \
+ CPU_BASED_UNCOND_IO_EXITING | \
+ CPU_BASED_MOV_DR_EXITING | \
+ CPU_BASED_USE_TSC_OFFSETTING | \
+ CPU_BASED_MWAIT_EXITING | \
+ CPU_BASED_MONITOR_EXITING | \
+ CPU_BASED_INVLPG_EXITING | \
+ CPU_BASED_RDPMC_EXITING | \
+ CPU_BASED_INTR_WINDOW_EXITING)
+
+#ifdef CONFIG_X86_64
+ #define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
+ (__KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL | \
+ CPU_BASED_CR8_LOAD_EXITING | \
+ CPU_BASED_CR8_STORE_EXITING)
+#else
+ #define KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL \
+ __KVM_REQUIRED_VMX_CPU_BASED_VM_EXEC_CONTROL
+#endif
+
+#define KVM_OPTIONAL_VMX_CPU_BASED_VM_EXEC_CONTROL \
+ (CPU_BASED_RDTSC_EXITING | \
+ CPU_BASED_TPR_SHADOW | \
+ CPU_BASED_USE_IO_BITMAPS | \
+ CPU_BASED_MONITOR_TRAP_FLAG | \
+ CPU_BASED_USE_MSR_BITMAPS | \
+ CPU_BASED_NMI_WINDOW_EXITING | \
+ CPU_BASED_PAUSE_EXITING | \
+ CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | \
+ CPU_BASED_ACTIVATE_TERTIARY_CONTROLS)
+
+#define KVM_REQUIRED_VMX_SECONDARY_VM_EXEC_CONTROL 0
+#define KVM_OPTIONAL_VMX_SECONDARY_VM_EXEC_CONTROL \
+ (SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
+ SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
+ SECONDARY_EXEC_WBINVD_EXITING | \
+ SECONDARY_EXEC_ENABLE_VPID | \
+ SECONDARY_EXEC_ENABLE_EPT | \
+ SECONDARY_EXEC_UNRESTRICTED_GUEST | \
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING | \
+ SECONDARY_EXEC_DESC | \
+ SECONDARY_EXEC_ENABLE_RDTSCP | \
+ SECONDARY_EXEC_ENABLE_INVPCID | \
+ SECONDARY_EXEC_APIC_REGISTER_VIRT | \
+ SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
+ SECONDARY_EXEC_SHADOW_VMCS | \
+ SECONDARY_EXEC_XSAVES | \
+ SECONDARY_EXEC_RDSEED_EXITING | \
+ SECONDARY_EXEC_RDRAND_EXITING | \
+ SECONDARY_EXEC_ENABLE_PML | \
+ SECONDARY_EXEC_TSC_SCALING | \
+ SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
+ SECONDARY_EXEC_PT_USE_GPA | \
+ SECONDARY_EXEC_PT_CONCEAL_VMX | \
+ SECONDARY_EXEC_ENABLE_VMFUNC | \
+ SECONDARY_EXEC_BUS_LOCK_DETECTION | \
+ SECONDARY_EXEC_NOTIFY_VM_EXITING | \
+ SECONDARY_EXEC_ENCLS_EXITING)
+
+#define KVM_REQUIRED_VMX_TERTIARY_VM_EXEC_CONTROL 0
+#define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \
+ (TERTIARY_EXEC_IPI_VIRT)
+
+#define BUILD_CONTROLS_SHADOW(lname, uname, bits) \
+static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \
+{ \
+ if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
+ vmcs_write##bits(uname, val); \
+ vmx->loaded_vmcs->controls_shadow.lname = val; \
+ } \
+} \
+static inline u##bits __##lname##_controls_get(struct loaded_vmcs *vmcs) \
+{ \
+ return vmcs->controls_shadow.lname; \
+} \
+static inline u##bits lname##_controls_get(struct vcpu_vmx *vmx) \
+{ \
+ return __##lname##_controls_get(vmx->loaded_vmcs); \
+} \
+static __always_inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u##bits val) \
+{ \
+ BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
+ lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
+} \
+static __always_inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u##bits val) \
+{ \
+ BUILD_BUG_ON(!(val & (KVM_REQUIRED_VMX_##uname | KVM_OPTIONAL_VMX_##uname))); \
+ lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
}
BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS, 32)
BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS, 32)
@@ -626,4 +742,14 @@ static inline bool vmx_can_use_ipiv(struct kvm_vcpu *vcpu)
return lapic_in_kernel(vcpu) && enable_ipiv;
}
+static inline bool guest_cpuid_has_evmcs(struct kvm_vcpu *vcpu)
+{
+ /*
+ * eVMCS is exposed to the guest if Hyper-V is enabled in CPUID and
+ * eVMCS has been explicitly enabled by userspace.
+ */
+ return vcpu->arch.hyperv_enabled &&
+ to_vmx(vcpu)->nested.enlightened_vmcs_enabled;
+}
+
#endif /* __KVM_X86_VMX_H */
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index 5cfc49ddb1b4..ec268df83ed6 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -10,7 +10,7 @@
#include "vmcs.h"
#include "../x86.h"
-asmlinkage void vmread_error(unsigned long field, bool fault);
+void vmread_error(unsigned long field, bool fault);
__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
bool fault);
void vmwrite_error(unsigned long field, unsigned long value);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 205ebdc2b11b..4bd5f8a751de 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -173,8 +173,13 @@ bool __read_mostly enable_vmware_backdoor = false;
module_param(enable_vmware_backdoor, bool, S_IRUGO);
EXPORT_SYMBOL_GPL(enable_vmware_backdoor);
-static bool __read_mostly force_emulation_prefix = false;
-module_param(force_emulation_prefix, bool, S_IRUGO);
+/*
+ * Flags to manipulate forced emulation behavior (any non-zero value will
+ * enable forced emulation).
+ */
+#define KVM_FEP_CLEAR_RFLAGS_RF BIT(1)
+static int __read_mostly force_emulation_prefix;
+module_param(force_emulation_prefix, int, 0644);
int __read_mostly pi_inject_timer = -1;
module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
@@ -528,6 +533,7 @@ static int exception_class(int vector)
#define EXCPT_TRAP 1
#define EXCPT_ABORT 2
#define EXCPT_INTERRUPT 3
+#define EXCPT_DB 4
static int exception_type(int vector)
{
@@ -538,8 +544,14 @@ static int exception_type(int vector)
mask = 1 << vector;
- /* #DB is trap, as instruction watchpoints are handled elsewhere */
- if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
+ /*
+ * #DBs can be trap-like or fault-like, the caller must check other CPU
+ * state, e.g. DR6, to determine whether a #DB is a trap or fault.
+ */
+ if (mask & (1 << DB_VECTOR))
+ return EXCPT_DB;
+
+ if (mask & ((1 << BP_VECTOR) | (1 << OF_VECTOR)))
return EXCPT_TRAP;
if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
@@ -549,16 +561,13 @@ static int exception_type(int vector)
return EXCPT_FAULT;
}
-void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
+void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
+ struct kvm_queued_exception *ex)
{
- unsigned nr = vcpu->arch.exception.nr;
- bool has_payload = vcpu->arch.exception.has_payload;
- unsigned long payload = vcpu->arch.exception.payload;
-
- if (!has_payload)
+ if (!ex->has_payload)
return;
- switch (nr) {
+ switch (ex->vector) {
case DB_VECTOR:
/*
* "Certain debug exceptions may clear bit 0-3. The
@@ -583,8 +592,8 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
* So they need to be flipped for DR6.
*/
vcpu->arch.dr6 |= DR6_ACTIVE_LOW;
- vcpu->arch.dr6 |= payload;
- vcpu->arch.dr6 ^= payload & DR6_ACTIVE_LOW;
+ vcpu->arch.dr6 |= ex->payload;
+ vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW;
/*
* The #DB payload is defined as compatible with the 'pending
@@ -595,15 +604,30 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
vcpu->arch.dr6 &= ~BIT(12);
break;
case PF_VECTOR:
- vcpu->arch.cr2 = payload;
+ vcpu->arch.cr2 = ex->payload;
break;
}
- vcpu->arch.exception.has_payload = false;
- vcpu->arch.exception.payload = 0;
+ ex->has_payload = false;
+ ex->payload = 0;
}
EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
+static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector,
+ bool has_error_code, u32 error_code,
+ bool has_payload, unsigned long payload)
+{
+ struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
+
+ ex->vector = vector;
+ ex->injected = false;
+ ex->pending = true;
+ ex->has_error_code = has_error_code;
+ ex->error_code = error_code;
+ ex->has_payload = has_payload;
+ ex->payload = payload;
+}
+
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
unsigned nr, bool has_error, u32 error_code,
bool has_payload, unsigned long payload, bool reinject)
@@ -613,18 +637,31 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
kvm_make_request(KVM_REQ_EVENT, vcpu);
+ /*
+ * If the exception is destined for L2 and isn't being reinjected,
+ * morph it to a VM-Exit if L1 wants to intercept the exception. A
+ * previously injected exception is not checked because it was checked
+ * when it was original queued, and re-checking is incorrect if _L1_
+ * injected the exception, in which case it's exempt from interception.
+ */
+ if (!reinject && is_guest_mode(vcpu) &&
+ kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) {
+ kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code,
+ has_payload, payload);
+ return;
+ }
+
if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) {
queue:
if (reinject) {
/*
- * On vmentry, vcpu->arch.exception.pending is only
- * true if an event injection was blocked by
- * nested_run_pending. In that case, however,
- * vcpu_enter_guest requests an immediate exit,
- * and the guest shouldn't proceed far enough to
- * need reinjection.
+ * On VM-Entry, an exception can be pending if and only
+ * if event injection was blocked by nested_run_pending.
+ * In that case, however, vcpu_enter_guest() requests an
+ * immediate exit, and the guest shouldn't proceed far
+ * enough to need reinjection.
*/
- WARN_ON_ONCE(vcpu->arch.exception.pending);
+ WARN_ON_ONCE(kvm_is_exception_pending(vcpu));
vcpu->arch.exception.injected = true;
if (WARN_ON_ONCE(has_payload)) {
/*
@@ -639,17 +676,18 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu->arch.exception.injected = false;
}
vcpu->arch.exception.has_error_code = has_error;
- vcpu->arch.exception.nr = nr;
+ vcpu->arch.exception.vector = nr;
vcpu->arch.exception.error_code = error_code;
vcpu->arch.exception.has_payload = has_payload;
vcpu->arch.exception.payload = payload;
if (!is_guest_mode(vcpu))
- kvm_deliver_exception_payload(vcpu);
+ kvm_deliver_exception_payload(vcpu,
+ &vcpu->arch.exception);
return;
}
/* to check exception */
- prev_nr = vcpu->arch.exception.nr;
+ prev_nr = vcpu->arch.exception.vector;
if (prev_nr == DF_VECTOR) {
/* triple fault -> shutdown */
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
@@ -657,25 +695,22 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
}
class1 = exception_class(prev_nr);
class2 = exception_class(nr);
- if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
- || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
+ if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY) ||
+ (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
/*
- * Generate double fault per SDM Table 5-5. Set
- * exception.pending = true so that the double fault
- * can trigger a nested vmexit.
+ * Synthesize #DF. Clear the previously injected or pending
+ * exception so as not to incorrectly trigger shutdown.
*/
- vcpu->arch.exception.pending = true;
vcpu->arch.exception.injected = false;
- vcpu->arch.exception.has_error_code = true;
- vcpu->arch.exception.nr = DF_VECTOR;
- vcpu->arch.exception.error_code = 0;
- vcpu->arch.exception.has_payload = false;
- vcpu->arch.exception.payload = 0;
- } else
+ vcpu->arch.exception.pending = false;
+
+ kvm_queue_exception_e(vcpu, DF_VECTOR, 0);
+ } else {
/* replace previous exception with a new one in a hope
that instruction re-execution will regenerate lost
exception */
goto queue;
+ }
}
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
@@ -729,20 +764,22 @@ static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
++vcpu->stat.pf_guest;
- vcpu->arch.exception.nested_apf =
- is_guest_mode(vcpu) && fault->async_page_fault;
- if (vcpu->arch.exception.nested_apf) {
- vcpu->arch.apf.nested_apf_token = fault->address;
- kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
- } else {
+
+ /*
+ * Async #PF in L2 is always forwarded to L1 as a VM-Exit regardless of
+ * whether or not L1 wants to intercept "regular" #PF.
+ */
+ if (is_guest_mode(vcpu) && fault->async_page_fault)
+ kvm_queue_exception_vmexit(vcpu, PF_VECTOR,
+ true, fault->error_code,
+ true, fault->address);
+ else
kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
fault->address);
- }
}
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
-/* Returns true if the page fault was immediately morphed into a VM-Exit. */
-bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
+void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
struct kvm_mmu *fault_mmu;
@@ -760,26 +797,7 @@ bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
fault_mmu->root.hpa);
- /*
- * A workaround for KVM's bad exception handling. If KVM injected an
- * exception into L2, and L2 encountered a #PF while vectoring the
- * injected exception, manually check to see if L1 wants to intercept
- * #PF, otherwise queuing the #PF will lead to #DF or a lost exception.
- * In all other cases, defer the check to nested_ops->check_events(),
- * which will correctly handle priority (this does not). Note, other
- * exceptions, e.g. #GP, are theoretically affected, #PF is simply the
- * most problematic, e.g. when L0 and L1 are both intercepting #PF for
- * shadow paging.
- *
- * TODO: Rewrite exception handling to track injected and pending
- * (VM-Exit) exceptions separately.
- */
- if (unlikely(vcpu->arch.exception.injected && is_guest_mode(vcpu)) &&
- kvm_x86_ops.nested_ops->handle_page_fault_workaround(vcpu, fault))
- return true;
-
fault_mmu->inject_page_fault(vcpu, fault);
- return false;
}
EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault);
@@ -1011,15 +1029,10 @@ void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_load_host_xsave_state);
-static inline u64 kvm_guest_supported_xcr0(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.guest_fpu.fpstate->user_xfeatures;
-}
-
#ifdef CONFIG_X86_64
static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu)
{
- return kvm_guest_supported_xcr0(vcpu) & XFEATURE_MASK_USER_DYNAMIC;
+ return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC;
}
#endif
@@ -1042,7 +1055,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
* saving. However, xcr0 bit 0 is always set, even if the
* emulated CPU does not support XSAVE (see kvm_vcpu_reset()).
*/
- valid_bits = kvm_guest_supported_xcr0(vcpu) | XFEATURE_MASK_FP;
+ valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
if (xcr0 & ~valid_bits)
return 1;
@@ -1070,6 +1083,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu)
{
+ /* Note, #UD due to CR4.OSXSAVE=0 has priority over the intercept. */
if (static_call(kvm_x86_get_cpl)(vcpu) != 0 ||
__kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) {
kvm_inject_gp(vcpu, 0);
@@ -1557,12 +1571,32 @@ static const u32 msr_based_features_all[] = {
static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
static unsigned int num_msr_based_features;
+/*
+ * Some IA32_ARCH_CAPABILITIES bits have dependencies on MSRs that KVM
+ * does not yet virtualize. These include:
+ * 10 - MISC_PACKAGE_CTRLS
+ * 11 - ENERGY_FILTERING_CTL
+ * 12 - DOITM
+ * 18 - FB_CLEAR_CTRL
+ * 21 - XAPIC_DISABLE_STATUS
+ * 23 - OVERCLOCKING_STATUS
+ */
+
+#define KVM_SUPPORTED_ARCH_CAP \
+ (ARCH_CAP_RDCL_NO | ARCH_CAP_IBRS_ALL | ARCH_CAP_RSBA | \
+ ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \
+ ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
+ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
+ ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO)
+
static u64 kvm_get_arch_capabilities(void)
{
u64 data = 0;
- if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
+ if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, data);
+ data &= KVM_SUPPORTED_ARCH_CAP;
+ }
/*
* If nx_huge_pages is enabled, KVM's shadow paging will ensure that
@@ -1610,9 +1644,6 @@ static u64 kvm_get_arch_capabilities(void)
*/
}
- /* Guests don't need to know "Fill buffer clear control" exists */
- data &= ~ARCH_CAP_FB_CLEAR_CTRL;
-
return data;
}
@@ -4828,7 +4859,7 @@ static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu)
return (kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_accept_dm_intr(vcpu) &&
!kvm_event_needs_reinjection(vcpu) &&
- !vcpu->arch.exception.pending);
+ !kvm_is_exception_pending(vcpu));
}
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
@@ -5003,25 +5034,38 @@ static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
+ struct kvm_queued_exception *ex;
+
process_nmi(vcpu);
if (kvm_check_request(KVM_REQ_SMI, vcpu))
process_smi(vcpu);
/*
- * In guest mode, payload delivery should be deferred,
- * so that the L1 hypervisor can intercept #PF before
- * CR2 is modified (or intercept #DB before DR6 is
- * modified under nVMX). Unless the per-VM capability,
- * KVM_CAP_EXCEPTION_PAYLOAD, is set, we may not defer the delivery of
- * an exception payload and handle after a KVM_GET_VCPU_EVENTS. Since we
- * opportunistically defer the exception payload, deliver it if the
- * capability hasn't been requested before processing a
- * KVM_GET_VCPU_EVENTS.
+ * KVM's ABI only allows for one exception to be migrated. Luckily,
+ * the only time there can be two queued exceptions is if there's a
+ * non-exiting _injected_ exception, and a pending exiting exception.
+ * In that case, ignore the VM-Exiting exception as it's an extension
+ * of the injected exception.
+ */
+ if (vcpu->arch.exception_vmexit.pending &&
+ !vcpu->arch.exception.pending &&
+ !vcpu->arch.exception.injected)
+ ex = &vcpu->arch.exception_vmexit;
+ else
+ ex = &vcpu->arch.exception;
+
+ /*
+ * In guest mode, payload delivery should be deferred if the exception
+ * will be intercepted by L1, e.g. KVM should not modifying CR2 if L1
+ * intercepts #PF, ditto for DR6 and #DBs. If the per-VM capability,
+ * KVM_CAP_EXCEPTION_PAYLOAD, is not set, userspace may or may not
+ * propagate the payload and so it cannot be safely deferred. Deliver
+ * the payload if the capability hasn't been requested.
*/
if (!vcpu->kvm->arch.exception_payload_enabled &&
- vcpu->arch.exception.pending && vcpu->arch.exception.has_payload)
- kvm_deliver_exception_payload(vcpu);
+ ex->pending && ex->has_payload)
+ kvm_deliver_exception_payload(vcpu, ex);
/*
* The API doesn't provide the instruction length for software
@@ -5029,26 +5073,25 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
* isn't advanced, we should expect to encounter the exception
* again.
*/
- if (kvm_exception_is_soft(vcpu->arch.exception.nr)) {
+ if (kvm_exception_is_soft(ex->vector)) {
events->exception.injected = 0;
events->exception.pending = 0;
} else {
- events->exception.injected = vcpu->arch.exception.injected;
- events->exception.pending = vcpu->arch.exception.pending;
+ events->exception.injected = ex->injected;
+ events->exception.pending = ex->pending;
/*
* For ABI compatibility, deliberately conflate
* pending and injected exceptions when
* KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
*/
if (!vcpu->kvm->arch.exception_payload_enabled)
- events->exception.injected |=
- vcpu->arch.exception.pending;
+ events->exception.injected |= ex->pending;
}
- events->exception.nr = vcpu->arch.exception.nr;
- events->exception.has_error_code = vcpu->arch.exception.has_error_code;
- events->exception.error_code = vcpu->arch.exception.error_code;
- events->exception_has_payload = vcpu->arch.exception.has_payload;
- events->exception_payload = vcpu->arch.exception.payload;
+ events->exception.nr = ex->vector;
+ events->exception.has_error_code = ex->has_error_code;
+ events->exception.error_code = ex->error_code;
+ events->exception_has_payload = ex->has_payload;
+ events->exception_payload = ex->payload;
events->interrupt.injected =
vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
@@ -5118,9 +5161,22 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
return -EINVAL;
process_nmi(vcpu);
+
+ /*
+ * Flag that userspace is stuffing an exception, the next KVM_RUN will
+ * morph the exception to a VM-Exit if appropriate. Do this only for
+ * pending exceptions, already-injected exceptions are not subject to
+ * intercpetion. Note, userspace that conflates pending and injected
+ * is hosed, and will incorrectly convert an injected exception into a
+ * pending exception, which in turn may cause a spurious VM-Exit.
+ */
+ vcpu->arch.exception_from_userspace = events->exception.pending;
+
+ vcpu->arch.exception_vmexit.pending = false;
+
vcpu->arch.exception.injected = events->exception.injected;
vcpu->arch.exception.pending = events->exception.pending;
- vcpu->arch.exception.nr = events->exception.nr;
+ vcpu->arch.exception.vector = events->exception.nr;
vcpu->arch.exception.has_error_code = events->exception.has_error_code;
vcpu->arch.exception.error_code = events->exception.error_code;
vcpu->arch.exception.has_payload = events->exception_has_payload;
@@ -7244,6 +7300,7 @@ static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
int handle_ud(struct kvm_vcpu *vcpu)
{
static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
+ int fep_flags = READ_ONCE(force_emulation_prefix);
int emul_type = EMULTYPE_TRAP_UD;
char sig[5]; /* ud2; .ascii "kvm" */
struct x86_exception e;
@@ -7251,10 +7308,12 @@ int handle_ud(struct kvm_vcpu *vcpu)
if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0)))
return 1;
- if (force_emulation_prefix &&
+ if (fep_flags &&
kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu),
sig, sizeof(sig), &e) == 0 &&
memcmp(sig, kvm_emulate_prefix, sizeof(sig)) == 0) {
+ if (fep_flags & KVM_FEP_CLEAR_RFLAGS_RF)
+ kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF);
kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig));
emul_type = EMULTYPE_TRAP_UD_FORCED;
}
@@ -7920,14 +7979,20 @@ static int emulator_get_msr_with_filter(struct x86_emulate_ctxt *ctxt,
int r;
r = kvm_get_msr_with_filter(vcpu, msr_index, pdata);
+ if (r < 0)
+ return X86EMUL_UNHANDLEABLE;
- if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
- complete_emulated_rdmsr, r)) {
- /* Bounce to user space */
- return X86EMUL_IO_NEEDED;
+ if (r) {
+ if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
+ complete_emulated_rdmsr, r))
+ return X86EMUL_IO_NEEDED;
+
+ trace_kvm_msr_read_ex(msr_index);
+ return X86EMUL_PROPAGATE_FAULT;
}
- return r;
+ trace_kvm_msr_read(msr_index, *pdata);
+ return X86EMUL_CONTINUE;
}
static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
@@ -7937,14 +8002,20 @@ static int emulator_set_msr_with_filter(struct x86_emulate_ctxt *ctxt,
int r;
r = kvm_set_msr_with_filter(vcpu, msr_index, data);
+ if (r < 0)
+ return X86EMUL_UNHANDLEABLE;
- if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
- complete_emulated_msr_access, r)) {
- /* Bounce to user space */
- return X86EMUL_IO_NEEDED;
+ if (r) {
+ if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
+ complete_emulated_msr_access, r))
+ return X86EMUL_IO_NEEDED;
+
+ trace_kvm_msr_write_ex(msr_index, data);
+ return X86EMUL_PROPAGATE_FAULT;
}
- return r;
+ trace_kvm_msr_write(msr_index, data);
+ return X86EMUL_CONTINUE;
}
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
@@ -8148,18 +8219,17 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
}
}
-static bool inject_emulated_exception(struct kvm_vcpu *vcpu)
+static void inject_emulated_exception(struct kvm_vcpu *vcpu)
{
struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt;
- if (ctxt->exception.vector == PF_VECTOR)
- return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception);
- if (ctxt->exception.error_code_valid)
+ if (ctxt->exception.vector == PF_VECTOR)
+ kvm_inject_emulated_page_fault(vcpu, &ctxt->exception);
+ else if (ctxt->exception.error_code_valid)
kvm_queue_exception_e(vcpu, ctxt->exception.vector,
ctxt->exception.error_code);
else
kvm_queue_exception(vcpu, ctxt->exception.vector);
- return false;
}
static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu)
@@ -8535,8 +8605,46 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_skip_emulated_instruction);
-static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r)
+static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu)
+{
+ u32 shadow;
+
+ if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF)
+ return true;
+
+ /*
+ * Intel CPUs inhibit code #DBs when MOV/POP SS blocking is active,
+ * but AMD CPUs do not. MOV/POP SS blocking is rare, check that first
+ * to avoid the relatively expensive CPUID lookup.
+ */
+ shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
+ return (shadow & KVM_X86_SHADOW_INT_MOV_SS) &&
+ guest_cpuid_is_intel(vcpu);
+}
+
+static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu,
+ int emulation_type, int *r)
{
+ WARN_ON_ONCE(emulation_type & EMULTYPE_NO_DECODE);
+
+ /*
+ * Do not check for code breakpoints if hardware has already done the
+ * checks, as inferred from the emulation type. On NO_DECODE and SKIP,
+ * the instruction has passed all exception checks, and all intercepted
+ * exceptions that trigger emulation have lower priority than code
+ * breakpoints, i.e. the fact that the intercepted exception occurred
+ * means any code breakpoints have already been serviced.
+ *
+ * Note, KVM needs to check for code #DBs on EMULTYPE_TRAP_UD_FORCED as
+ * hardware has checked the RIP of the magic prefix, but not the RIP of
+ * the instruction being emulated. The intent of forced emulation is
+ * to behave as if KVM intercepted the instruction without an exception
+ * and without a prefix.
+ */
+ if (emulation_type & (EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
+ EMULTYPE_TRAP_UD | EMULTYPE_VMWARE_GP | EMULTYPE_PF))
+ return false;
+
if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
(vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
struct kvm_run *kvm_run = vcpu->run;
@@ -8556,7 +8664,7 @@ static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r)
}
if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
- !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
+ !kvm_is_code_breakpoint_inhibited(vcpu)) {
unsigned long eip = kvm_get_linear_rip(vcpu);
u32 dr6 = kvm_vcpu_check_hw_bp(eip, 0,
vcpu->arch.dr7,
@@ -8658,8 +8766,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
* are fault-like and are higher priority than any faults on
* the code fetch itself.
*/
- if (!(emulation_type & EMULTYPE_SKIP) &&
- kvm_vcpu_check_code_breakpoint(vcpu, &r))
+ if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r))
return r;
r = x86_decode_emulated_instruction(vcpu, emulation_type,
@@ -8757,8 +8864,7 @@ restart:
if (ctxt->have_exception) {
r = 1;
- if (inject_emulated_exception(vcpu))
- return r;
+ inject_emulated_exception(vcpu);
} else if (vcpu->arch.pio.count) {
if (!vcpu->arch.pio.in) {
/* FIXME: return into emulator if single-stepping. */
@@ -8788,6 +8894,12 @@ writeback:
unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
+
+ /*
+ * Note, EXCPT_DB is assumed to be fault-like as the emulator
+ * only supports code breakpoints and general detect #DB, both
+ * of which are fault-like.
+ */
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
@@ -9649,74 +9761,155 @@ int kvm_check_nested_events(struct kvm_vcpu *vcpu)
static void kvm_inject_exception(struct kvm_vcpu *vcpu)
{
- trace_kvm_inj_exception(vcpu->arch.exception.nr,
+ trace_kvm_inj_exception(vcpu->arch.exception.vector,
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code,
vcpu->arch.exception.injected);
if (vcpu->arch.exception.error_code && !is_protmode(vcpu))
vcpu->arch.exception.error_code = false;
- static_call(kvm_x86_queue_exception)(vcpu);
+ static_call(kvm_x86_inject_exception)(vcpu);
}
-static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
+/*
+ * Check for any event (interrupt or exception) that is ready to be injected,
+ * and if there is at least one event, inject the event with the highest
+ * priority. This handles both "pending" events, i.e. events that have never
+ * been injected into the guest, and "injected" events, i.e. events that were
+ * injected as part of a previous VM-Enter, but weren't successfully delivered
+ * and need to be re-injected.
+ *
+ * Note, this is not guaranteed to be invoked on a guest instruction boundary,
+ * i.e. doesn't guarantee that there's an event window in the guest. KVM must
+ * be able to inject exceptions in the "middle" of an instruction, and so must
+ * also be able to re-inject NMIs and IRQs in the middle of an instruction.
+ * I.e. for exceptions and re-injected events, NOT invoking this on instruction
+ * boundaries is necessary and correct.
+ *
+ * For simplicity, KVM uses a single path to inject all events (except events
+ * that are injected directly from L1 to L2) and doesn't explicitly track
+ * instruction boundaries for asynchronous events. However, because VM-Exits
+ * that can occur during instruction execution typically result in KVM skipping
+ * the instruction or injecting an exception, e.g. instruction and exception
+ * intercepts, and because pending exceptions have higher priority than pending
+ * interrupts, KVM still honors instruction boundaries in most scenarios.
+ *
+ * But, if a VM-Exit occurs during instruction execution, and KVM does NOT skip
+ * the instruction or inject an exception, then KVM can incorrecty inject a new
+ * asynchrounous event if the event became pending after the CPU fetched the
+ * instruction (in the guest). E.g. if a page fault (#PF, #NPF, EPT violation)
+ * occurs and is resolved by KVM, a coincident NMI, SMI, IRQ, etc... can be
+ * injected on the restarted instruction instead of being deferred until the
+ * instruction completes.
+ *
+ * In practice, this virtualization hole is unlikely to be observed by the
+ * guest, and even less likely to cause functional problems. To detect the
+ * hole, the guest would have to trigger an event on a side effect of an early
+ * phase of instruction execution, e.g. on the instruction fetch from memory.
+ * And for it to be a functional problem, the guest would need to depend on the
+ * ordering between that side effect, the instruction completing, _and_ the
+ * delivery of the asynchronous event.
+ */
+static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
+ bool *req_immediate_exit)
{
+ bool can_inject;
int r;
- bool can_inject = true;
- /* try to reinject previous events if any */
+ /*
+ * Process nested events first, as nested VM-Exit supercedes event
+ * re-injection. If there's an event queued for re-injection, it will
+ * be saved into the appropriate vmc{b,s}12 fields on nested VM-Exit.
+ */
+ if (is_guest_mode(vcpu))
+ r = kvm_check_nested_events(vcpu);
+ else
+ r = 0;
- if (vcpu->arch.exception.injected) {
- kvm_inject_exception(vcpu);
- can_inject = false;
- }
/*
- * Do not inject an NMI or interrupt if there is a pending
- * exception. Exceptions and interrupts are recognized at
- * instruction boundaries, i.e. the start of an instruction.
- * Trap-like exceptions, e.g. #DB, have higher priority than
- * NMIs and interrupts, i.e. traps are recognized before an
- * NMI/interrupt that's pending on the same instruction.
- * Fault-like exceptions, e.g. #GP and #PF, are the lowest
- * priority, but are only generated (pended) during instruction
- * execution, i.e. a pending fault-like exception means the
- * fault occurred on the *previous* instruction and must be
- * serviced prior to recognizing any new events in order to
- * fully complete the previous instruction.
+ * Re-inject exceptions and events *especially* if immediate entry+exit
+ * to/from L2 is needed, as any event that has already been injected
+ * into L2 needs to complete its lifecycle before injecting a new event.
+ *
+ * Don't re-inject an NMI or interrupt if there is a pending exception.
+ * This collision arises if an exception occurred while vectoring the
+ * injected event, KVM intercepted said exception, and KVM ultimately
+ * determined the fault belongs to the guest and queues the exception
+ * for injection back into the guest.
+ *
+ * "Injected" interrupts can also collide with pending exceptions if
+ * userspace ignores the "ready for injection" flag and blindly queues
+ * an interrupt. In that case, prioritizing the exception is correct,
+ * as the exception "occurred" before the exit to userspace. Trap-like
+ * exceptions, e.g. most #DBs, have higher priority than interrupts.
+ * And while fault-like exceptions, e.g. #GP and #PF, are the lowest
+ * priority, they're only generated (pended) during instruction
+ * execution, and interrupts are recognized at instruction boundaries.
+ * Thus a pending fault-like exception means the fault occurred on the
+ * *previous* instruction and must be serviced prior to recognizing any
+ * new events in order to fully complete the previous instruction.
*/
- else if (!vcpu->arch.exception.pending) {
- if (vcpu->arch.nmi_injected) {
- static_call(kvm_x86_inject_nmi)(vcpu);
- can_inject = false;
- } else if (vcpu->arch.interrupt.injected) {
- static_call(kvm_x86_inject_irq)(vcpu, true);
- can_inject = false;
- }
- }
+ if (vcpu->arch.exception.injected)
+ kvm_inject_exception(vcpu);
+ else if (kvm_is_exception_pending(vcpu))
+ ; /* see above */
+ else if (vcpu->arch.nmi_injected)
+ static_call(kvm_x86_inject_nmi)(vcpu);
+ else if (vcpu->arch.interrupt.injected)
+ static_call(kvm_x86_inject_irq)(vcpu, true);
+ /*
+ * Exceptions that morph to VM-Exits are handled above, and pending
+ * exceptions on top of injected exceptions that do not VM-Exit should
+ * either morph to #DF or, sadly, override the injected exception.
+ */
WARN_ON_ONCE(vcpu->arch.exception.injected &&
vcpu->arch.exception.pending);
/*
- * Call check_nested_events() even if we reinjected a previous event
- * in order for caller to determine if it should require immediate-exit
- * from L2 to L1 due to pending L1 events which require exit
- * from L2 to L1.
+ * Bail if immediate entry+exit to/from the guest is needed to complete
+ * nested VM-Enter or event re-injection so that a different pending
+ * event can be serviced (or if KVM needs to exit to userspace).
+ *
+ * Otherwise, continue processing events even if VM-Exit occurred. The
+ * VM-Exit will have cleared exceptions that were meant for L2, but
+ * there may now be events that can be injected into L1.
*/
- if (is_guest_mode(vcpu)) {
- r = kvm_check_nested_events(vcpu);
- if (r < 0)
- goto out;
- }
+ if (r < 0)
+ goto out;
+
+ /*
+ * A pending exception VM-Exit should either result in nested VM-Exit
+ * or force an immediate re-entry and exit to/from L2, and exception
+ * VM-Exits cannot be injected (flag should _never_ be set).
+ */
+ WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected ||
+ vcpu->arch.exception_vmexit.pending);
+
+ /*
+ * New events, other than exceptions, cannot be injected if KVM needs
+ * to re-inject a previous event. See above comments on re-injecting
+ * for why pending exceptions get priority.
+ */
+ can_inject = !kvm_event_needs_reinjection(vcpu);
- /* try to inject new event if pending */
if (vcpu->arch.exception.pending) {
- if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
+ /*
+ * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS
+ * value pushed on the stack. Trap-like exception and all #DBs
+ * leave RF as-is (KVM follows Intel's behavior in this regard;
+ * AMD states that code breakpoint #DBs excplitly clear RF=0).
+ *
+ * Note, most versions of Intel's SDM and AMD's APM incorrectly
+ * describe the behavior of General Detect #DBs, which are
+ * fault-like. They do _not_ set RF, a la code breakpoints.
+ */
+ if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT)
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
X86_EFLAGS_RF);
- if (vcpu->arch.exception.nr == DB_VECTOR) {
- kvm_deliver_exception_payload(vcpu);
+ if (vcpu->arch.exception.vector == DB_VECTOR) {
+ kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception);
if (vcpu->arch.dr7 & DR7_GD) {
vcpu->arch.dr7 &= ~DR7_GD;
kvm_update_dr7(vcpu);
@@ -9788,11 +9981,11 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit)
}
if (is_guest_mode(vcpu) &&
- kvm_x86_ops.nested_ops->hv_timer_pending &&
- kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
+ kvm_x86_ops.nested_ops->has_events &&
+ kvm_x86_ops.nested_ops->has_events(vcpu))
*req_immediate_exit = true;
- WARN_ON(vcpu->arch.exception.pending);
+ WARN_ON(kvm_is_exception_pending(vcpu));
return 0;
out:
@@ -10097,7 +10290,7 @@ void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu)
* When APICv gets disabled, we may still have injected interrupts
* pending. At the same time, KVM_REQ_EVENT may not be set as APICv was
* still active when the interrupt got accepted. Make sure
- * inject_pending_event() is called to check for that.
+ * kvm_check_and_inject_events() is called to check for that.
*/
if (!apic->apicv_active)
kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -10394,7 +10587,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
goto out;
}
- r = inject_pending_event(vcpu, &req_immediate_exit);
+ r = kvm_check_and_inject_events(vcpu, &req_immediate_exit);
if (r < 0) {
r = 0;
goto out;
@@ -10633,10 +10826,26 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
if (hv_timer)
kvm_lapic_switch_to_hv_timer(vcpu);
- if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
+ /*
+ * If the vCPU is not runnable, a signal or another host event
+ * of some kind is pending; service it without changing the
+ * vCPU's activity state.
+ */
+ if (!kvm_arch_vcpu_runnable(vcpu))
return 1;
}
+ /*
+ * Evaluate nested events before exiting the halted state. This allows
+ * the halt state to be recorded properly in the VMCS12's activity
+ * state field (AMD does not have a similar field and a VM-Exit always
+ * causes a spurious wakeup from HLT).
+ */
+ if (is_guest_mode(vcpu)) {
+ if (kvm_check_nested_events(vcpu) < 0)
+ return 0;
+ }
+
if (kvm_apic_accept_events(vcpu) < 0)
return 0;
switch(vcpu->arch.mp_state) {
@@ -10652,16 +10861,14 @@ static inline int vcpu_block(struct kvm_vcpu *vcpu)
case KVM_MP_STATE_INIT_RECEIVED:
break;
default:
- return -EINTR;
+ WARN_ON_ONCE(1);
+ break;
}
return 1;
}
static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
{
- if (is_guest_mode(vcpu))
- kvm_check_nested_events(vcpu);
-
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted);
}
@@ -10810,6 +11017,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
+ struct kvm_queued_exception *ex = &vcpu->arch.exception;
struct kvm_run *kvm_run = vcpu->run;
int r;
@@ -10838,7 +11046,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
r = 0;
goto out;
}
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
r = -EAGAIN;
if (signal_pending(current)) {
r = -EINTR;
@@ -10868,6 +11075,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
}
+ /*
+ * If userspace set a pending exception and L2 is active, convert it to
+ * a pending VM-Exit if L1 wants to intercept the exception.
+ */
+ if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) &&
+ kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector,
+ ex->error_code)) {
+ kvm_queue_exception_vmexit(vcpu, ex->vector,
+ ex->has_error_code, ex->error_code,
+ ex->has_payload, ex->payload);
+ ex->injected = false;
+ ex->pending = false;
+ }
+ vcpu->arch.exception_from_userspace = false;
+
if (unlikely(vcpu->arch.complete_userspace_io)) {
int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
vcpu->arch.complete_userspace_io = NULL;
@@ -10974,6 +11196,7 @@ static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
vcpu->arch.exception.pending = false;
+ vcpu->arch.exception_vmexit.pending = false;
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
@@ -11093,16 +11316,30 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
vcpu_load(vcpu);
- if (!lapic_in_kernel(vcpu) &&
- mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
+ switch (mp_state->mp_state) {
+ case KVM_MP_STATE_UNINITIALIZED:
+ case KVM_MP_STATE_HALTED:
+ case KVM_MP_STATE_AP_RESET_HOLD:
+ case KVM_MP_STATE_INIT_RECEIVED:
+ case KVM_MP_STATE_SIPI_RECEIVED:
+ if (!lapic_in_kernel(vcpu))
+ goto out;
+ break;
+
+ case KVM_MP_STATE_RUNNABLE:
+ break;
+
+ default:
goto out;
+ }
/*
- * KVM_MP_STATE_INIT_RECEIVED means the processor is in
- * INIT state; latched init should be reported using
- * KVM_SET_VCPU_EVENTS, so reject it here.
+ * Pending INITs are reported using KVM_SET_VCPU_EVENTS, disallow
+ * forcing the guest into INIT/SIPI if those events are supposed to be
+ * blocked. KVM prioritizes SMI over INIT, so reject INIT/SIPI state
+ * if an SMI is pending as well.
*/
- if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) &&
+ if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) &&
(mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
goto out;
@@ -11341,7 +11578,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
r = -EBUSY;
- if (vcpu->arch.exception.pending)
+ if (kvm_is_exception_pending(vcpu))
goto out;
if (dbg->control & KVM_GUESTDBG_INJECT_DB)
kvm_queue_exception(vcpu, DB_VECTOR);
@@ -11563,7 +11800,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64),
GFP_KERNEL_ACCOUNT);
if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks)
- goto fail_free_pio_data;
+ goto fail_free_mce_banks;
vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask,
@@ -11617,7 +11854,6 @@ free_wbinvd_dirty_mask:
fail_free_mce_banks:
kfree(vcpu->arch.mce_banks);
kfree(vcpu->arch.mci_ctl2_banks);
-fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
fail_free_lapic:
kvm_free_lapic(vcpu);
@@ -11724,8 +11960,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate;
/*
- * To avoid have the INIT path from kvm_apic_has_events() that be
- * called with loaded FPU and does not let userspace fix the state.
+ * All paths that lead to INIT are required to load the guest's
+ * FPU state (because most paths are buried in KVM_RUN).
*/
if (init_event)
kvm_put_guest_fpu(vcpu);
@@ -12054,6 +12290,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
goto out_page_track;
+ ret = static_call(kvm_x86_vm_init)(kvm);
+ if (ret)
+ goto out_uninit_mmu;
+
INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
atomic_set(&kvm->arch.noncoherent_dma_count, 0);
@@ -12089,8 +12329,10 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
kvm_hv_init_vm(kvm);
kvm_xen_init_vm(kvm);
- return static_call(kvm_x86_vm_init)(kvm);
+ return 0;
+out_uninit_mmu:
+ kvm_mmu_uninit_vm(kvm);
out_page_track:
kvm_page_track_cleanup(kvm);
out:
@@ -12473,6 +12715,50 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
} else {
kvm_mmu_slot_remove_write_access(kvm, new, PG_LEVEL_4K);
}
+
+ /*
+ * Unconditionally flush the TLBs after enabling dirty logging.
+ * A flush is almost always going to be necessary (see below),
+ * and unconditionally flushing allows the helpers to omit
+ * the subtly complex checks when removing write access.
+ *
+ * Do the flush outside of mmu_lock to reduce the amount of
+ * time mmu_lock is held. Flushing after dropping mmu_lock is
+ * safe as KVM only needs to guarantee the slot is fully
+ * write-protected before returning to userspace, i.e. before
+ * userspace can consume the dirty status.
+ *
+ * Flushing outside of mmu_lock requires KVM to be careful when
+ * making decisions based on writable status of an SPTE, e.g. a
+ * !writable SPTE doesn't guarantee a CPU can't perform writes.
+ *
+ * Specifically, KVM also write-protects guest page tables to
+ * monitor changes when using shadow paging, and must guarantee
+ * no CPUs can write to those page before mmu_lock is dropped.
+ * Because CPUs may have stale TLB entries at this point, a
+ * !writable SPTE doesn't guarantee CPUs can't perform writes.
+ *
+ * KVM also allows making SPTES writable outside of mmu_lock,
+ * e.g. to allow dirty logging without taking mmu_lock.
+ *
+ * To handle these scenarios, KVM uses a separate software-only
+ * bit (MMU-writable) to track if a SPTE is !writable due to
+ * a guest page table being write-protected (KVM clears the
+ * MMU-writable flag when write-protecting for shadow paging).
+ *
+ * The use of MMU-writable is also the primary motivation for
+ * the unconditional flush. Because KVM must guarantee that a
+ * CPU doesn't contain stale, writable TLB entries for a
+ * !MMU-writable SPTE, KVM must flush if it encounters any
+ * MMU-writable SPTE regardless of whether the actual hardware
+ * writable bit was set. I.e. KVM is almost guaranteed to need
+ * to flush, while unconditionally flushing allows the "remove
+ * write access" helpers to ignore MMU-writable entirely.
+ *
+ * See is_writable_pte() for more details (the case involving
+ * access-tracked SPTEs is particularly relevant).
+ */
+ kvm_arch_flush_remote_tlbs_memslot(kvm, new);
}
}
@@ -12519,13 +12805,14 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
if (!list_empty_careful(&vcpu->async_pf.done))
return true;
- if (kvm_apic_has_events(vcpu))
+ if (kvm_apic_has_pending_init_or_sipi(vcpu) &&
+ kvm_apic_init_sipi_allowed(vcpu))
return true;
if (vcpu->arch.pv.pv_unhalted)
return true;
- if (vcpu->arch.exception.pending)
+ if (kvm_is_exception_pending(vcpu))
return true;
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
@@ -12547,16 +12834,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return true;
if (is_guest_mode(vcpu) &&
- kvm_x86_ops.nested_ops->hv_timer_pending &&
- kvm_x86_ops.nested_ops->hv_timer_pending(vcpu))
+ kvm_x86_ops.nested_ops->has_events &&
+ kvm_x86_ops.nested_ops->has_events(vcpu))
return true;
if (kvm_xen_has_pending_events(vcpu))
return true;
- if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu))
- return true;
-
return false;
}
@@ -12780,7 +13064,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
{
if (unlikely(!lapic_in_kernel(vcpu) ||
kvm_event_needs_reinjection(vcpu) ||
- vcpu->arch.exception.pending))
+ kvm_is_exception_pending(vcpu)))
return false;
if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu))
@@ -13331,7 +13615,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
-EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmenter);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 1926d2cb8e79..829d3134c1eb 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -82,10 +82,18 @@ static inline unsigned int __shrink_ple_window(unsigned int val,
void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu);
int kvm_check_nested_events(struct kvm_vcpu *vcpu);
+static inline bool kvm_is_exception_pending(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.exception.pending ||
+ vcpu->arch.exception_vmexit.pending ||
+ kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+}
+
static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
{
vcpu->arch.exception.pending = false;
vcpu->arch.exception.injected = false;
+ vcpu->arch.exception_vmexit.pending = false;
}
static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
@@ -267,11 +275,6 @@ static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
return !(kvm->arch.disabled_quirks & quirk);
}
-static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
-{
- return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu);
-}
-
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
u64 get_kvmclock_ns(struct kvm *kvm);
@@ -286,7 +289,8 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
int handle_ud(struct kvm_vcpu *vcpu);
-void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
+void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu,
+ struct kvm_queued_exception *ex);
void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index 280cb5dc7341..93c628d3e3a9 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -1065,7 +1065,6 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
del_timer(&vcpu->arch.xen.poll_timer);
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
vcpu->arch.xen.poll_evtchn = 0;
diff --git a/block/blk-core.c b/block/blk-core.c
index a0d1104c5590..651057c4146b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -295,7 +295,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
while (!blk_try_enter_queue(q, pm)) {
if (flags & BLK_MQ_REQ_NOWAIT)
- return -EBUSY;
+ return -EAGAIN;
/*
* read pair of barrier in blk_freeze_queue_start(), we need to
@@ -325,7 +325,7 @@ int __bio_queue_enter(struct request_queue *q, struct bio *bio)
if (test_bit(GD_DEAD, &disk->state))
goto dead;
bio_wouldblock_error(bio);
- return -EBUSY;
+ return -EAGAIN;
}
/*
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 67e6dbc1ae81..e59c3069e835 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -309,6 +309,11 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
struct blk_plug plug;
int ret = 0;
+ /* make sure that "len << SECTOR_SHIFT" doesn't overflow */
+ if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
+ max_sectors = UINT_MAX >> SECTOR_SHIFT;
+ max_sectors &= ~bs_mask;
+
if (max_sectors == 0)
return -EOPNOTSUPP;
if ((sector | nr_sects) & bs_mask)
@@ -322,10 +327,10 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
bio->bi_iter.bi_sector = sector;
- bio->bi_iter.bi_size = len;
+ bio->bi_iter.bi_size = len << SECTOR_SHIFT;
- sector += len << SECTOR_SHIFT;
- nr_sects -= len << SECTOR_SHIFT;
+ sector += len;
+ nr_sects -= len;
if (!nr_sects) {
ret = submit_bio_wait(bio);
bio_put(bio);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 8559cea7f300..dee789f2f98f 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -283,7 +283,9 @@ static const char *const rqf_name[] = {
RQF_NAME(SPECIAL_PAYLOAD),
RQF_NAME(ZONE_WRITE_LOCKED),
RQF_NAME(MQ_POLL_SLEPT),
+ RQF_NAME(TIMED_OUT),
RQF_NAME(ELV),
+ RQF_NAME(RESV),
};
#undef RQF_NAME
diff --git a/block/partitions/core.c b/block/partitions/core.c
index fc1d70384825..b8112f52d388 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -596,6 +596,9 @@ static int blk_add_partitions(struct gendisk *disk)
if (disk->flags & GENHD_FL_NO_PART)
return 0;
+ if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state))
+ return 0;
+
state = check_partition(disk);
if (!state)
return 0;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 32b0e0b930c1..110a535648d2 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -209,6 +209,7 @@ static int amba_match(struct device *dev, struct device_driver *drv)
struct amba_device *pcdev = to_amba_device(dev);
struct amba_driver *pcdrv = to_amba_driver(drv);
+ mutex_lock(&pcdev->periphid_lock);
if (!pcdev->periphid) {
int ret = amba_read_periphid(pcdev);
@@ -218,11 +219,14 @@ static int amba_match(struct device *dev, struct device_driver *drv)
* permanent failure in reading pid and cid, simply map it to
* -EPROBE_DEFER.
*/
- if (ret)
+ if (ret) {
+ mutex_unlock(&pcdev->periphid_lock);
return -EPROBE_DEFER;
+ }
dev_set_uevent_suppress(dev, false);
kobject_uevent(&dev->kobj, KOBJ_ADD);
}
+ mutex_unlock(&pcdev->periphid_lock);
/* When driver_override is set, only bind to the matching driver */
if (pcdev->driver_override)
@@ -532,6 +536,7 @@ static void amba_device_release(struct device *dev)
if (d->res.parent)
release_resource(&d->res);
+ mutex_destroy(&d->periphid_lock);
kfree(d);
}
@@ -584,6 +589,7 @@ static void amba_device_initialize(struct amba_device *dev, const char *name)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
dev->dev.dma_parms = &dev->dma_parms;
dev->res.name = dev_name(&dev->dev);
+ mutex_init(&dev->periphid_lock);
}
/**
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c964d7c8c384..6428f6be69e3 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1385,6 +1385,18 @@ static int binder_inc_ref_for_node(struct binder_proc *proc,
}
ret = binder_inc_ref_olocked(ref, strong, target_list);
*rdata = ref->data;
+ if (ret && ref == new_ref) {
+ /*
+ * Cleanup the failed reference here as the target
+ * could now be dead and have already released its
+ * references by now. Calling on the new reference
+ * with strong=0 and a tmp_refs will not decrement
+ * the node. The new_ref gets kfree'd below.
+ */
+ binder_cleanup_ref_olocked(new_ref);
+ ref = NULL;
+ }
+
binder_proc_unlock(proc);
if (new_ref && ref != new_ref)
/*
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 51f4e1c5cd01..9b1778c00610 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -322,7 +322,6 @@ static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
*/
if (vma) {
vm_start = vma->vm_start;
- alloc->vma_vm_mm = vma->vm_mm;
mmap_assert_write_locked(alloc->vma_vm_mm);
} else {
mmap_assert_locked(alloc->vma_vm_mm);
@@ -795,7 +794,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
binder_insert_free_buffer(alloc, buffer);
alloc->free_async_space = alloc->buffer_size / 2;
binder_alloc_set_vma(alloc, vma);
- mmgrab(alloc->vma_vm_mm);
return 0;
@@ -1091,6 +1089,8 @@ static struct shrinker binder_shrinker = {
void binder_alloc_init(struct binder_alloc *alloc)
{
alloc->pid = current->group_leader->pid;
+ alloc->vma_vm_mm = current->mm;
+ mmgrab(alloc->vma_vm_mm);
mutex_init(&alloc->mutex);
INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 0424b59b695e..46cbe4471e78 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -724,7 +724,7 @@ const struct cpumask *cpu_clustergroup_mask(int cpu)
*/
if (cpumask_subset(cpu_coregroup_mask(cpu),
&cpu_topology[cpu].cluster_sibling))
- return get_cpu_mask(cpu);
+ return topology_sibling_cpumask(cpu);
return &cpu_topology[cpu].cluster_sibling;
}
@@ -735,7 +735,7 @@ void update_siblings_masks(unsigned int cpuid)
int cpu, ret;
ret = detect_cache_attributes(cpuid);
- if (ret)
+ if (ret && ret != -ENOENT)
pr_info("Early cacheinfo failed, ret = %d\n", ret);
/* update core and thread sibling masks */
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 70f79fc71539..ec69b43f926a 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -274,12 +274,42 @@ static int __init deferred_probe_timeout_setup(char *str)
}
__setup("deferred_probe_timeout=", deferred_probe_timeout_setup);
+/**
+ * driver_deferred_probe_check_state() - Check deferred probe state
+ * @dev: device to check
+ *
+ * Return:
+ * * -ENODEV if initcalls have completed and modules are disabled.
+ * * -ETIMEDOUT if the deferred probe timeout was set and has expired
+ * and modules are enabled.
+ * * -EPROBE_DEFER in other cases.
+ *
+ * Drivers or subsystems can opt-in to calling this function instead of directly
+ * returning -EPROBE_DEFER.
+ */
+int driver_deferred_probe_check_state(struct device *dev)
+{
+ if (!IS_ENABLED(CONFIG_MODULES) && initcalls_done) {
+ dev_warn(dev, "ignoring dependency for device, assuming no driver\n");
+ return -ENODEV;
+ }
+
+ if (!driver_deferred_probe_timeout && initcalls_done) {
+ dev_warn(dev, "deferred probe timeout, ignoring dependency\n");
+ return -ETIMEDOUT;
+ }
+
+ return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL_GPL(driver_deferred_probe_check_state);
+
static void deferred_probe_timeout_work_func(struct work_struct *work)
{
struct device_private *p;
fw_devlink_drivers_done();
+ driver_deferred_probe_timeout = 0;
driver_deferred_probe_trigger();
flush_work(&deferred_probe_work);
@@ -881,6 +911,11 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
+ /*
+ * Device can't match with a driver right now, so don't attempt
+ * to match or bind with other drivers on the bus.
+ */
+ return ret;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
@@ -1120,6 +1155,11 @@ static int __driver_attach(struct device *dev, void *data)
dev_dbg(dev, "Device match requests probe deferral\n");
dev->can_match = true;
driver_deferred_probe_add(dev);
+ /*
+ * Driver could not match with device, but may match with
+ * another device on the bus.
+ */
+ return 0;
} else if (ret < 0) {
dev_dbg(dev, "Bus failed to match device: %d\n", ret);
return ret;
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 15a75afe6b84..676b6275d5b5 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -63,6 +63,12 @@ int driver_set_override(struct device *dev, const char **override,
if (len >= (PAGE_SIZE - 1))
return -EINVAL;
+ /*
+ * Compute the real length of the string in case userspace sends us a
+ * bunch of \0 characters like python likes to do.
+ */
+ len = strlen(s);
+
if (!len) {
/* Empty string passed - clear override */
device_lock(dev);
diff --git a/drivers/base/firmware_loader/sysfs.c b/drivers/base/firmware_loader/sysfs.c
index 77bad32c481a..5b66b3d1fa16 100644
--- a/drivers/base/firmware_loader/sysfs.c
+++ b/drivers/base/firmware_loader/sysfs.c
@@ -93,10 +93,9 @@ static void fw_dev_release(struct device *dev)
{
struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
- if (fw_sysfs->fw_upload_priv) {
- free_fw_priv(fw_sysfs->fw_priv);
- kfree(fw_sysfs->fw_upload_priv);
- }
+ if (fw_sysfs->fw_upload_priv)
+ fw_upload_free(fw_sysfs);
+
kfree(fw_sysfs);
}
diff --git a/drivers/base/firmware_loader/sysfs.h b/drivers/base/firmware_loader/sysfs.h
index 5d8ff1675c79..df1d5add698f 100644
--- a/drivers/base/firmware_loader/sysfs.h
+++ b/drivers/base/firmware_loader/sysfs.h
@@ -106,12 +106,17 @@ extern struct device_attribute dev_attr_cancel;
extern struct device_attribute dev_attr_remaining_size;
int fw_upload_start(struct fw_sysfs *fw_sysfs);
+void fw_upload_free(struct fw_sysfs *fw_sysfs);
umode_t fw_upload_is_visible(struct kobject *kobj, struct attribute *attr, int n);
#else
static inline int fw_upload_start(struct fw_sysfs *fw_sysfs)
{
return 0;
}
+
+static inline void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+}
#endif
#endif /* __FIRMWARE_SYSFS_H */
diff --git a/drivers/base/firmware_loader/sysfs_upload.c b/drivers/base/firmware_loader/sysfs_upload.c
index 87044d52322a..a0af8f5f13d8 100644
--- a/drivers/base/firmware_loader/sysfs_upload.c
+++ b/drivers/base/firmware_loader/sysfs_upload.c
@@ -264,6 +264,15 @@ int fw_upload_start(struct fw_sysfs *fw_sysfs)
return 0;
}
+void fw_upload_free(struct fw_sysfs *fw_sysfs)
+{
+ struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+
+ free_fw_priv(fw_sysfs->fw_priv);
+ kfree(fw_upload_priv->fw_upload);
+ kfree(fw_upload_priv);
+}
+
/**
* firmware_upload_register() - register for the firmware upload sysfs API
* @module: kernel module of this device
@@ -377,6 +386,7 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
{
struct fw_sysfs *fw_sysfs = fw_upload->priv;
struct fw_upload_priv *fw_upload_priv = fw_sysfs->fw_upload_priv;
+ struct module *module = fw_upload_priv->module;
mutex_lock(&fw_upload_priv->lock);
if (fw_upload_priv->progress == FW_UPLOAD_PROG_IDLE) {
@@ -392,6 +402,6 @@ void firmware_upload_unregister(struct fw_upload *fw_upload)
unregister:
device_unregister(&fw_sysfs->dev);
- module_put(fw_upload_priv->module);
+ module_put(module);
}
EXPORT_SYMBOL_GPL(firmware_upload_unregister);
diff --git a/drivers/base/node.c b/drivers/base/node.c
index eb0f43784c2b..432d40a5f910 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -433,6 +433,7 @@ static ssize_t node_read_meminfo(struct device *dev,
"Node %d ShadowCallStack:%8lu kB\n"
#endif
"Node %d PageTables: %8lu kB\n"
+ "Node %d SecPageTables: %8lu kB\n"
"Node %d NFS_Unstable: %8lu kB\n"
"Node %d Bounce: %8lu kB\n"
"Node %d WritebackTmp: %8lu kB\n"
@@ -459,6 +460,7 @@ static ssize_t node_read_meminfo(struct device *dev,
nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
#endif
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
+ nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
nid, 0UL,
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 5a2e0232862e..55a10e6d4e2a 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2733,7 +2733,7 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device *base_dev,
mutex_unlock(&gpd_list_lock);
dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
__func__, PTR_ERR(pd));
- return -ENODEV;
+ return driver_deferred_probe_check_state(base_dev);
}
dev_dbg(dev, "adding to PM domain %s\n", pd->name);
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index 719323bc6c7f..37ab23a9d034 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -113,6 +113,7 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
const struct regmap_config *config)
{
size_t max_size = spi_max_transfer_size(spi);
+ size_t max_msg_size, reg_reserve_size;
struct regmap_bus *bus;
if (max_size != SIZE_MAX) {
@@ -120,9 +121,16 @@ static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
if (!bus)
return ERR_PTR(-ENOMEM);
+ max_msg_size = spi_max_message_size(spi);
+ reg_reserve_size = config->reg_bits / BITS_PER_BYTE
+ + config->pad_bits / BITS_PER_BYTE;
+ if (max_size + reg_reserve_size > max_msg_size)
+ max_size -= reg_reserve_size;
+
bus->free_on_exit = true;
bus->max_raw_read = max_size;
bus->max_raw_write = max_size;
+
return bus;
}
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index bda5c815e441..a28473470e66 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -226,6 +226,9 @@ struct xen_vbd {
sector_t size;
unsigned int flush_support:1;
unsigned int discard_secure:1;
+ /* Connect-time cached feature_persistent parameter value */
+ unsigned int feature_gnt_persistent_parm:1;
+ /* Persistent grants feature negotiation result */
unsigned int feature_gnt_persistent:1;
unsigned int overflow_max_grants:1;
};
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index ee7ad2fb432d..c0227dfa4688 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -907,7 +907,7 @@ again:
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
- be->blkif->vbd.feature_gnt_persistent);
+ be->blkif->vbd.feature_gnt_persistent_parm);
if (err) {
xenbus_dev_fatal(dev, err, "writing %s/feature-persistent",
dev->nodename);
@@ -1085,7 +1085,9 @@ static int connect_ring(struct backend_info *be)
return -ENOSYS;
}
- blkif->vbd.feature_gnt_persistent = feature_persistent &&
+ blkif->vbd.feature_gnt_persistent_parm = feature_persistent;
+ blkif->vbd.feature_gnt_persistent =
+ blkif->vbd.feature_gnt_persistent_parm &&
xenbus_read_unsigned(dev->otherend, "feature-persistent", 0);
blkif->vbd.overflow_max_grants = 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8e56e69fb4c4..35b9bcad9db9 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -213,6 +213,9 @@ struct blkfront_info
unsigned int feature_fua:1;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
+ /* Connect-time cached feature_persistent parameter */
+ unsigned int feature_persistent_parm:1;
+ /* Persistent grants feature negotiation result */
unsigned int feature_persistent:1;
unsigned int bounce:1;
unsigned int discard_granularity;
@@ -1756,6 +1759,12 @@ abort_transaction:
return err;
}
+/* Enable the persistent grants feature. */
+static bool feature_persistent = true;
+module_param(feature_persistent, bool, 0644);
+MODULE_PARM_DESC(feature_persistent,
+ "Enables the persistent grants feature");
+
/* Common code used when first setting up, and when resuming. */
static int talk_to_blkback(struct xenbus_device *dev,
struct blkfront_info *info)
@@ -1847,8 +1856,9 @@ again:
message = "writing protocol";
goto abort_transaction;
}
+ info->feature_persistent_parm = feature_persistent;
err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
- info->feature_persistent);
+ info->feature_persistent_parm);
if (err)
dev_warn(&dev->dev,
"writing persistent grants feature to xenbus");
@@ -1916,12 +1926,6 @@ static int negotiate_mq(struct blkfront_info *info)
return 0;
}
-/* Enable the persistent grants feature. */
-static bool feature_persistent = true;
-module_param(feature_persistent, bool, 0644);
-MODULE_PARM_DESC(feature_persistent,
- "Enables the persistent grants feature");
-
/*
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@@ -2281,7 +2285,7 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
blkfront_setup_discard(info);
- if (feature_persistent)
+ if (info->feature_persistent_parm)
info->feature_persistent =
!!xenbus_read_unsigned(info->xbdev->otherend,
"feature-persistent", 0);
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index f3aef77a6a4a..df0fbfee7b78 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -430,12 +430,25 @@ irqreturn_t mhi_irq_handler(int irq_number, void *dev)
{
struct mhi_event *mhi_event = dev;
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
- struct mhi_event_ctxt *er_ctxt =
- &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_event_ctxt *er_ctxt;
struct mhi_ring *ev_ring = &mhi_event->ring;
- dma_addr_t ptr = le64_to_cpu(er_ctxt->rp);
+ dma_addr_t ptr;
void *dev_rp;
+ /*
+ * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq()
+ * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt
+ * before handling the IRQs.
+ */
+ if (!mhi_cntrl->mhi_ctxt) {
+ dev_dbg(&mhi_cntrl->mhi_dev->dev,
+ "mhi_ctxt has been freed\n");
+ return IRQ_HANDLED;
+ }
+
+ er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ ptr = le64_to_cpu(er_ctxt->rp);
+
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev,
"Event ring rp points outside of the event ring\n");
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 84ca98ed1dad..32a932a065a6 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -480,6 +480,11 @@ static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
}
+static int uring_cmd_null(struct io_uring_cmd *ioucmd, unsigned int issue_flags)
+{
+ return 0;
+}
+
static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
{
size_t written = 0;
@@ -663,6 +668,7 @@ static const struct file_operations null_fops = {
.read_iter = read_iter_null,
.write_iter = write_iter_null,
.splice_write = splice_write_null,
+ .uring_cmd = uring_cmd_null,
};
static const struct file_operations __maybe_unused port_fops = {
diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c
index 73518009a0f2..876b37b8683c 100644
--- a/drivers/clk/bcm/clk-raspberrypi.c
+++ b/drivers/clk/bcm/clk-raspberrypi.c
@@ -203,7 +203,7 @@ static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_GET_CLOCK_RATE, &val);
if (ret)
- return ret;
+ return 0;
return val;
}
@@ -220,7 +220,7 @@ static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate,
ret = raspberrypi_clock_property(rpi->firmware, data,
RPI_FIRMWARE_SET_CLOCK_RATE, &_rate);
if (ret)
- dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d",
+ dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d\n",
clk_hw_get_name(hw), ret);
return ret;
@@ -288,7 +288,7 @@ static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi,
RPI_FIRMWARE_GET_MIN_CLOCK_RATE,
&min_rate);
if (ret) {
- dev_err(rpi->dev, "Failed to get clock %d min freq: %d",
+ dev_err(rpi->dev, "Failed to get clock %d min freq: %d\n",
id, ret);
return ERR_PTR(ret);
}
@@ -344,8 +344,13 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
struct rpi_firmware_get_clocks_response *clks;
int ret;
+ /*
+ * The firmware doesn't guarantee that the last element of
+ * RPI_FIRMWARE_GET_CLOCKS is zeroed. So allocate an additional
+ * zero element as sentinel.
+ */
clks = devm_kcalloc(rpi->dev,
- RPI_FIRMWARE_NUM_CLK_ID, sizeof(*clks),
+ RPI_FIRMWARE_NUM_CLK_ID + 1, sizeof(*clks),
GFP_KERNEL);
if (!clks)
return -ENOMEM;
@@ -360,7 +365,8 @@ static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi,
struct raspberrypi_clk_variant *variant;
if (clks->id > RPI_FIRMWARE_NUM_CLK_ID) {
- dev_err(rpi->dev, "Unknown clock id: %u", clks->id);
+ dev_err(rpi->dev, "Unknown clock id: %u (max: %u)\n",
+ clks->id, RPI_FIRMWARE_NUM_CLK_ID);
return -EINVAL;
}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 7fc191c15507..bd0b35cac83e 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -840,10 +840,9 @@ static void clk_core_unprepare(struct clk_core *core)
if (core->ops->unprepare)
core->ops->unprepare(core->hw);
- clk_pm_runtime_put(core);
-
trace_clk_unprepare_complete(core);
clk_core_unprepare(core->parent);
+ clk_pm_runtime_put(core);
}
static void clk_core_unprepare_lock(struct clk_core *core)
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index ef2a445c63a3..373e9438b57a 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -135,6 +135,7 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
continue;
if (!strncmp(n, tmp, strlen(tmp))) {
+ of_node_get(np);
found = true;
break;
}
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 205acb2c744d..e3885c90a3ac 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -295,7 +295,8 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
enum dma_resv_usage old_usage;
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
- if ((old->context == fence->context && old_usage >= usage) ||
+ if ((old->context == fence->context && old_usage >= usage &&
+ dma_fence_is_later(fence, old)) ||
dma_fence_is_signaled(old)) {
dma_resv_list_set(fobj, i, fence, usage);
dma_fence_put(old);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
index 4dde8edd53b6..3e8d4b51a814 100644
--- a/drivers/firmware/efi/capsule-loader.c
+++ b/drivers/firmware/efi/capsule-loader.c
@@ -243,29 +243,6 @@ failed:
}
/**
- * efi_capsule_flush - called by file close or file flush
- * @file: file pointer
- * @id: not used
- *
- * If a capsule is being partially uploaded then calling this function
- * will be treated as upload termination and will free those completed
- * buffer pages and -ECANCELED will be returned.
- **/
-static int efi_capsule_flush(struct file *file, fl_owner_t id)
-{
- int ret = 0;
- struct capsule_info *cap_info = file->private_data;
-
- if (cap_info->index > 0) {
- pr_err("capsule upload not complete\n");
- efi_free_all_buff_pages(cap_info);
- ret = -ECANCELED;
- }
-
- return ret;
-}
-
-/**
* efi_capsule_release - called by file close
* @inode: not used
* @file: file pointer
@@ -277,6 +254,13 @@ static int efi_capsule_release(struct inode *inode, struct file *file)
{
struct capsule_info *cap_info = file->private_data;
+ if (cap_info->index > 0 &&
+ (cap_info->header.headersize == 0 ||
+ cap_info->count < cap_info->total_size)) {
+ pr_err("capsule upload not complete\n");
+ efi_free_all_buff_pages(cap_info);
+ }
+
kfree(cap_info->pages);
kfree(cap_info->phys);
kfree(file->private_data);
@@ -324,7 +308,6 @@ static const struct file_operations efi_capsule_fops = {
.owner = THIS_MODULE,
.open = efi_capsule_open,
.write = efi_capsule_write,
- .flush = efi_capsule_flush,
.release = efi_capsule_release,
.llseek = no_llseek,
};
diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile
index d0537573501e..2c67f71f2375 100644
--- a/drivers/firmware/efi/libstub/Makefile
+++ b/drivers/firmware/efi/libstub/Makefile
@@ -37,6 +37,13 @@ KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \
$(call cc-option,-fno-addrsig) \
-D__DISABLE_EXPORTS
+#
+# struct randomization only makes sense for Linux internal types, which the EFI
+# stub code never touches, so let's turn off struct randomization for the stub
+# altogether
+#
+KBUILD_CFLAGS := $(filter-out $(RANDSTRUCT_CFLAGS), $(KBUILD_CFLAGS))
+
# remove SCS flags from all objects in this directory
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_SCS), $(KBUILD_CFLAGS))
# disable LTO
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 05ae8bcc9d67..43ca665af610 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -220,7 +220,6 @@ adjust_memory_range_protection(unsigned long start, unsigned long size)
unsigned long end, next;
unsigned long rounded_start, rounded_end;
unsigned long unprotect_start, unprotect_size;
- int has_system_memory = 0;
if (efi_dxe_table == NULL)
return;
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c
index a41551870759..74cc71bb3984 100644
--- a/drivers/gpio/gpio-104-dio-48e.c
+++ b/drivers/gpio/gpio-104-dio-48e.c
@@ -164,6 +164,7 @@ static void dio48e_irq_mask(struct irq_data *data)
dio48egpio->irq_mask &= ~BIT(0);
else
dio48egpio->irq_mask &= ~BIT(1);
+ gpiochip_disable_irq(chip, offset);
if (!dio48egpio->irq_mask)
/* disable interrupts */
@@ -191,6 +192,7 @@ static void dio48e_irq_unmask(struct irq_data *data)
iowrite8(0x00, &dio48egpio->reg->enable_interrupt);
}
+ gpiochip_enable_irq(chip, offset);
if (offset == 19)
dio48egpio->irq_mask |= BIT(0);
else
@@ -213,12 +215,14 @@ static int dio48e_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip dio48e_irqchip = {
+static const struct irq_chip dio48e_irqchip = {
.name = "104-dio-48e",
.irq_ack = dio48e_irq_ack,
.irq_mask = dio48e_irq_mask,
.irq_unmask = dio48e_irq_unmask,
- .irq_set_type = dio48e_irq_set_type
+ .irq_set_type = dio48e_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t dio48e_irq_handler(int irq, void *dev_id)
@@ -322,7 +326,7 @@ static int dio48e_probe(struct device *dev, unsigned int id)
dio48egpio->chip.set_multiple = dio48e_gpio_set_multiple;
girq = &dio48egpio->chip.irq;
- girq->chip = &dio48e_irqchip;
+ gpio_irq_chip_set_chip(girq, &dio48e_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 40be76efeed7..3286b914a2cf 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -113,6 +113,7 @@ static void idi_48_irq_mask(struct irq_data *data)
spin_lock_irqsave(&idi48gpio->lock, flags);
idi48gpio->irq_mask[boundary] &= ~mask;
+ gpiochip_disable_irq(chip, offset);
/* Exit early if there are still input lines with IRQ unmasked */
if (idi48gpio->irq_mask[boundary])
@@ -140,6 +141,7 @@ static void idi_48_irq_unmask(struct irq_data *data)
prev_irq_mask = idi48gpio->irq_mask[boundary];
+ gpiochip_enable_irq(chip, offset);
idi48gpio->irq_mask[boundary] |= mask;
/* Exit early if IRQ was already unmasked for this boundary */
@@ -164,12 +166,14 @@ static int idi_48_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip idi_48_irqchip = {
+static const struct irq_chip idi_48_irqchip = {
.name = "104-idi-48",
.irq_ack = idi_48_irq_ack,
.irq_mask = idi_48_irq_mask,
.irq_unmask = idi_48_irq_unmask,
- .irq_set_type = idi_48_irq_set_type
+ .irq_set_type = idi_48_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idi_48_irq_handler(int irq, void *dev_id)
@@ -267,7 +271,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
idi48gpio->chip.get_multiple = idi_48_gpio_get_multiple;
girq = &idi48gpio->chip.irq;
- girq->chip = &idi_48_irqchip;
+ gpio_irq_chip_set_chip(girq, &idi_48_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-104-idio-16.c b/drivers/gpio/gpio-104-idio-16.c
index 65a5f581d981..4756e583f223 100644
--- a/drivers/gpio/gpio-104-idio-16.c
+++ b/drivers/gpio/gpio-104-idio-16.c
@@ -174,10 +174,11 @@ static void idio_16_irq_mask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long mask = BIT(irqd_to_hwirq(data));
+ const unsigned long offset = irqd_to_hwirq(data);
unsigned long flags;
- idio16gpio->irq_mask &= ~mask;
+ idio16gpio->irq_mask &= ~BIT(offset);
+ gpiochip_disable_irq(chip, offset);
if (!idio16gpio->irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
@@ -192,11 +193,12 @@ static void idio_16_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
struct idio_16_gpio *const idio16gpio = gpiochip_get_data(chip);
- const unsigned long mask = BIT(irqd_to_hwirq(data));
+ const unsigned long offset = irqd_to_hwirq(data);
const unsigned long prev_irq_mask = idio16gpio->irq_mask;
unsigned long flags;
- idio16gpio->irq_mask |= mask;
+ gpiochip_enable_irq(chip, offset);
+ idio16gpio->irq_mask |= BIT(offset);
if (!prev_irq_mask) {
raw_spin_lock_irqsave(&idio16gpio->lock, flags);
@@ -217,12 +219,14 @@ static int idio_16_irq_set_type(struct irq_data *data, unsigned int flow_type)
return 0;
}
-static struct irq_chip idio_16_irqchip = {
+static const struct irq_chip idio_16_irqchip = {
.name = "104-idio-16",
.irq_ack = idio_16_irq_ack,
.irq_mask = idio_16_irq_mask,
.irq_unmask = idio_16_irq_unmask,
- .irq_set_type = idio_16_irq_set_type
+ .irq_set_type = idio_16_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t idio_16_irq_handler(int irq, void *dev_id)
@@ -299,7 +303,7 @@ static int idio_16_probe(struct device *dev, unsigned int id)
idio16gpio->out_state = 0xFFFF;
girq = &idio16gpio->chip.irq;
- girq->chip = &idio_16_irqchip;
+ gpio_irq_chip_set_chip(girq, &idio_16_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-ixp4xx.c b/drivers/gpio/gpio-ixp4xx.c
index 312309be0287..56656fb519f8 100644
--- a/drivers/gpio/gpio-ixp4xx.c
+++ b/drivers/gpio/gpio-ixp4xx.c
@@ -63,6 +63,14 @@ static void ixp4xx_gpio_irq_ack(struct irq_data *d)
__raw_writel(BIT(d->hwirq), g->base + IXP4XX_REG_GPIS);
}
+static void ixp4xx_gpio_mask_irq(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+ irq_chip_mask_parent(d);
+ gpiochip_disable_irq(gc, d->hwirq);
+}
+
static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -72,6 +80,7 @@ static void ixp4xx_gpio_irq_unmask(struct irq_data *d)
if (!(g->irq_edge & BIT(d->hwirq)))
ixp4xx_gpio_irq_ack(d);
+ gpiochip_enable_irq(gc, d->hwirq);
irq_chip_unmask_parent(d);
}
@@ -149,12 +158,14 @@ static int ixp4xx_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
}
-static struct irq_chip ixp4xx_gpio_irqchip = {
+static const struct irq_chip ixp4xx_gpio_irqchip = {
.name = "IXP4GPIO",
.irq_ack = ixp4xx_gpio_irq_ack,
- .irq_mask = irq_chip_mask_parent,
+ .irq_mask = ixp4xx_gpio_mask_irq,
.irq_unmask = ixp4xx_gpio_irq_unmask,
.irq_set_type = ixp4xx_gpio_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static int ixp4xx_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
@@ -263,7 +274,7 @@ static int ixp4xx_gpio_probe(struct platform_device *pdev)
g->gc.owner = THIS_MODULE;
girq = &g->gc.irq;
- girq->chip = &ixp4xx_gpio_irqchip;
+ gpio_irq_chip_set_chip(girq, &ixp4xx_gpio_irqchip);
girq->fwnode = g->fwnode;
girq->parent_domain = parent;
girq->child_to_parent_hwirq = ixp4xx_gpio_child_to_parent_hwirq;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 8943cea92764..a2e505a7545c 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -373,6 +373,13 @@ static void gpio_mockup_debugfs_setup(struct device *dev,
}
}
+static void gpio_mockup_debugfs_cleanup(void *data)
+{
+ struct gpio_mockup_chip *chip = data;
+
+ debugfs_remove_recursive(chip->dbg_dir);
+}
+
static void gpio_mockup_dispose_mappings(void *data)
{
struct gpio_mockup_chip *chip = data;
@@ -455,7 +462,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
gpio_mockup_debugfs_setup(dev, chip);
- return 0;
+ return devm_add_action_or_reset(dev, gpio_mockup_debugfs_cleanup, chip);
}
static const struct of_device_id gpio_mockup_of_match[] = {
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 15049822937a..3eb08cd1fdc0 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -169,6 +169,7 @@ static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type)
switch (flow_type) {
case IRQ_TYPE_EDGE_FALLING:
+ case IRQ_TYPE_LEVEL_LOW:
raw_spin_lock_irqsave(&mpc8xxx_gc->lock, flags);
gc->write_reg(mpc8xxx_gc->regs + GPIO_ICR,
gc->read_reg(mpc8xxx_gc->regs + GPIO_ICR)
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index d8a26e503ca5..f163f5ca857b 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -112,6 +112,8 @@ mediatek_gpio_irq_unmask(struct irq_data *d)
unsigned long flags;
u32 rise, fall, high, low;
+ gpiochip_enable_irq(gc, d->hwirq);
+
spin_lock_irqsave(&rg->lock, flags);
rise = mtk_gpio_r32(rg, GPIO_REG_REDGE);
fall = mtk_gpio_r32(rg, GPIO_REG_FEDGE);
@@ -143,6 +145,8 @@ mediatek_gpio_irq_mask(struct irq_data *d)
mtk_gpio_w32(rg, GPIO_REG_HLVL, high & ~BIT(pin));
mtk_gpio_w32(rg, GPIO_REG_LLVL, low & ~BIT(pin));
spin_unlock_irqrestore(&rg->lock, flags);
+
+ gpiochip_disable_irq(gc, d->hwirq);
}
static int
@@ -204,6 +208,16 @@ mediatek_gpio_xlate(struct gpio_chip *chip,
return gpio % MTK_BANK_WIDTH;
}
+static const struct irq_chip mt7621_irq_chip = {
+ .name = "mt7621-gpio",
+ .irq_mask_ack = mediatek_gpio_irq_mask,
+ .irq_mask = mediatek_gpio_irq_mask,
+ .irq_unmask = mediatek_gpio_irq_unmask,
+ .irq_set_type = mediatek_gpio_irq_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
static int
mediatek_gpio_bank_probe(struct device *dev, int bank)
{
@@ -238,11 +252,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
return -ENOMEM;
rg->chip.offset = bank * MTK_BANK_WIDTH;
- rg->irq_chip.name = dev_name(dev);
- rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
- rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
- rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
- rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
if (mtk->gpio_irq) {
struct gpio_irq_chip *girq;
@@ -262,7 +271,7 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
}
girq = &rg->chip.irq;
- girq->chip = &rg->irq_chip;
+ gpio_irq_chip_set_chip(girq, &mt7621_irq_chip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index ecd7d169470b..2925f4d8cef3 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -1175,7 +1175,9 @@ static int pca953x_suspend(struct device *dev)
{
struct pca953x_chip *chip = dev_get_drvdata(dev);
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, true);
+ mutex_unlock(&chip->i2c_lock);
if (atomic_read(&chip->wakeup_path))
device_set_wakeup_path(dev);
@@ -1198,13 +1200,17 @@ static int pca953x_resume(struct device *dev)
}
}
+ mutex_lock(&chip->i2c_lock);
regcache_cache_only(chip->regmap, false);
regcache_mark_dirty(chip->regmap);
ret = pca953x_regcache_sync(dev);
- if (ret)
+ if (ret) {
+ mutex_unlock(&chip->i2c_lock);
return ret;
+ }
ret = regcache_sync(chip->regmap);
+ mutex_unlock(&chip->i2c_lock);
if (ret) {
dev_err(dev, "Failed to restore register map: %d\n", ret);
return ret;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index c7fbfa3ae43b..1198ab0305d0 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -661,24 +661,17 @@ static int pxa_gpio_probe(struct platform_device *pdev)
if (IS_ERR(gpio_reg_base))
return PTR_ERR(gpio_reg_base);
- clk = clk_get(&pdev->dev, NULL);
+ clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
PTR_ERR(clk));
return PTR_ERR(clk);
}
- ret = clk_prepare_enable(clk);
- if (ret) {
- clk_put(clk);
- return ret;
- }
/* Initialize GPIO chips */
ret = pxa_init_gpio_chip(pchip, pxa_last_gpio + 1, gpio_reg_base);
- if (ret) {
- clk_put(clk);
+ if (ret)
return ret;
- }
/* clear all GPIO edge detects */
for_each_gpio_bank(gpio, c, pchip) {
diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c
index 63dcf42f7c20..d6418f89d3f6 100644
--- a/drivers/gpio/gpio-realtek-otto.c
+++ b/drivers/gpio/gpio-realtek-otto.c
@@ -46,10 +46,20 @@
* @lock: Lock for accessing the IRQ registers and values
* @intr_mask: Mask for interrupts lines
* @intr_type: Interrupt type selection
+ * @bank_read: Read a bank setting as a single 32-bit value
+ * @bank_write: Write a bank setting as a single 32-bit value
+ * @imr_line_pos: Bit shift of an IRQ line's IMR value.
+ *
+ * The DIR, DATA, and ISR registers consist of four 8-bit port values, packed
+ * into a single 32-bit register. Use @bank_read (@bank_write) to get (assign)
+ * a value from (to) these registers. The IMR register consists of four 16-bit
+ * port values, packed into two 32-bit registers. Use @imr_line_pos to get the
+ * bit shift of the 2-bit field for a line's IMR settings. Shifts larger than
+ * 32 overflow into the second register.
*
* Because the interrupt mask register (IMR) combines the function of IRQ type
* selection and masking, two extra values are stored. @intr_mask is used to
- * mask/unmask the interrupts for a GPIO port, and @intr_type is used to store
+ * mask/unmask the interrupts for a GPIO line, and @intr_type is used to store
* the selected interrupt types. The logical AND of these values is written to
* IMR on changes.
*/
@@ -59,10 +69,11 @@ struct realtek_gpio_ctrl {
void __iomem *cpumask_base;
struct cpumask cpu_irq_maskable;
raw_spinlock_t lock;
- u16 intr_mask[REALTEK_GPIO_PORTS_PER_BANK];
- u16 intr_type[REALTEK_GPIO_PORTS_PER_BANK];
- unsigned int (*port_offset_u8)(unsigned int port);
- unsigned int (*port_offset_u16)(unsigned int port);
+ u8 intr_mask[REALTEK_GPIO_MAX];
+ u8 intr_type[REALTEK_GPIO_MAX];
+ u32 (*bank_read)(void __iomem *reg);
+ void (*bank_write)(void __iomem *reg, u32 value);
+ unsigned int (*line_imr_pos)(unsigned int line);
};
/* Expand with more flags as devices with other quirks are added */
@@ -101,14 +112,22 @@ static struct realtek_gpio_ctrl *irq_data_to_ctrl(struct irq_data *data)
* port. The two interrupt mask registers store two bits per GPIO, so use u16
* values.
*/
-static unsigned int realtek_gpio_port_offset_u8(unsigned int port)
+static u32 realtek_gpio_bank_read_swapped(void __iomem *reg)
{
- return port;
+ return ioread32be(reg);
}
-static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
+static void realtek_gpio_bank_write_swapped(void __iomem *reg, u32 value)
{
- return 2 * port;
+ iowrite32be(value, reg);
+}
+
+static unsigned int realtek_gpio_line_imr_pos_swapped(unsigned int line)
+{
+ unsigned int port_pin = line % 8;
+ unsigned int port = line / 8;
+
+ return 2 * (8 * (port ^ 1) + port_pin);
}
/*
@@ -119,66 +138,67 @@ static unsigned int realtek_gpio_port_offset_u16(unsigned int port)
* per GPIO, so use u16 values. The first register contains ports 1 and 0, the
* second ports 3 and 2.
*/
-static unsigned int realtek_gpio_port_offset_u8_rev(unsigned int port)
+static u32 realtek_gpio_bank_read(void __iomem *reg)
{
- return 3 - port;
+ return ioread32(reg);
}
-static unsigned int realtek_gpio_port_offset_u16_rev(unsigned int port)
+static void realtek_gpio_bank_write(void __iomem *reg, u32 value)
{
- return 2 * (port ^ 1);
+ iowrite32(value, reg);
}
-static void realtek_gpio_write_imr(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, u16 irq_type, u16 irq_mask)
+static unsigned int realtek_gpio_line_imr_pos(unsigned int line)
{
- iowrite16(irq_type & irq_mask,
- ctrl->base + REALTEK_GPIO_REG_IMR + ctrl->port_offset_u16(port));
+ return 2 * line;
}
-static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, u8 mask)
+static void realtek_gpio_clear_isr(struct realtek_gpio_ctrl *ctrl, u32 mask)
{
- iowrite8(mask, ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
+ ctrl->bank_write(ctrl->base + REALTEK_GPIO_REG_ISR, mask);
}
-static u8 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl, unsigned int port)
+static u32 realtek_gpio_read_isr(struct realtek_gpio_ctrl *ctrl)
{
- return ioread8(ctrl->base + REALTEK_GPIO_REG_ISR + ctrl->port_offset_u8(port));
+ return ctrl->bank_read(ctrl->base + REALTEK_GPIO_REG_ISR);
}
-/* Set the rising and falling edge mask bits for a GPIO port pin */
-static u16 realtek_gpio_imr_bits(unsigned int pin, u16 value)
+/* Set the rising and falling edge mask bits for a GPIO pin */
+static void realtek_gpio_update_line_imr(struct realtek_gpio_ctrl *ctrl, unsigned int line)
{
- return (value & REALTEK_GPIO_IMR_LINE_MASK) << 2 * pin;
+ void __iomem *reg = ctrl->base + REALTEK_GPIO_REG_IMR;
+ unsigned int line_shift = ctrl->line_imr_pos(line);
+ unsigned int shift = line_shift % 32;
+ u32 irq_type = ctrl->intr_type[line];
+ u32 irq_mask = ctrl->intr_mask[line];
+ u32 reg_val;
+
+ reg += 4 * (line_shift / 32);
+ reg_val = ioread32(reg);
+ reg_val &= ~(REALTEK_GPIO_IMR_LINE_MASK << shift);
+ reg_val |= (irq_type & irq_mask & REALTEK_GPIO_IMR_LINE_MASK) << shift;
+ iowrite32(reg_val, reg);
}
static void realtek_gpio_irq_ack(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
irq_hw_number_t line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
- realtek_gpio_clear_isr(ctrl, port, BIT(port_pin));
+ realtek_gpio_clear_isr(ctrl, BIT(line));
}
static void realtek_gpio_irq_unmask(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 m;
gpiochip_enable_irq(&ctrl->gc, line);
raw_spin_lock_irqsave(&ctrl->lock, flags);
- m = ctrl->intr_mask[port];
- m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- ctrl->intr_mask[port] = m;
- realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ ctrl->intr_mask[line] = REALTEK_GPIO_IMR_LINE_MASK;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
}
@@ -186,16 +206,11 @@ static void realtek_gpio_irq_mask(struct irq_data *data)
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 m;
raw_spin_lock_irqsave(&ctrl->lock, flags);
- m = ctrl->intr_mask[port];
- m &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- ctrl->intr_mask[port] = m;
- realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m);
+ ctrl->intr_mask[line] = 0;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
gpiochip_disable_irq(&ctrl->gc, line);
@@ -205,10 +220,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
unsigned long flags;
- u16 type, t;
+ u8 type;
switch (flow_type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
@@ -227,11 +240,8 @@ static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_ty
irq_set_handler_locked(data, handle_edge_irq);
raw_spin_lock_irqsave(&ctrl->lock, flags);
- t = ctrl->intr_type[port];
- t &= ~realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK);
- t |= realtek_gpio_imr_bits(port_pin, type);
- ctrl->intr_type[port] = t;
- realtek_gpio_write_imr(ctrl, port, t, ctrl->intr_mask[port]);
+ ctrl->intr_type[line] = type;
+ realtek_gpio_update_line_imr(ctrl, line);
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
@@ -242,28 +252,21 @@ static void realtek_gpio_irq_handler(struct irq_desc *desc)
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
- unsigned int lines_done;
- unsigned int port_pin_count;
unsigned long status;
int offset;
chained_irq_enter(irq_chip, desc);
- for (lines_done = 0; lines_done < gc->ngpio; lines_done += 8) {
- status = realtek_gpio_read_isr(ctrl, lines_done / 8);
- port_pin_count = min(gc->ngpio - lines_done, 8U);
- for_each_set_bit(offset, &status, port_pin_count)
- generic_handle_domain_irq(gc->irq.domain, offset + lines_done);
- }
+ status = realtek_gpio_read_isr(ctrl);
+ for_each_set_bit(offset, &status, gc->ngpio)
+ generic_handle_domain_irq(gc->irq.domain, offset);
chained_irq_exit(irq_chip, desc);
}
-static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl,
- unsigned int port, int cpu)
+static inline void __iomem *realtek_gpio_irq_cpu_mask(struct realtek_gpio_ctrl *ctrl, int cpu)
{
- return ctrl->cpumask_base + ctrl->port_offset_u8(port) +
- REALTEK_GPIO_PORTS_PER_BANK * cpu;
+ return ctrl->cpumask_base + REALTEK_GPIO_PORTS_PER_BANK * cpu;
}
static int realtek_gpio_irq_set_affinity(struct irq_data *data,
@@ -271,12 +274,10 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
{
struct realtek_gpio_ctrl *ctrl = irq_data_to_ctrl(data);
unsigned int line = irqd_to_hwirq(data);
- unsigned int port = line / 8;
- unsigned int port_pin = line % 8;
void __iomem *irq_cpu_mask;
unsigned long flags;
int cpu;
- u8 v;
+ u32 v;
if (!ctrl->cpumask_base)
return -ENXIO;
@@ -284,15 +285,15 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
raw_spin_lock_irqsave(&ctrl->lock, flags);
for_each_cpu(cpu, &ctrl->cpu_irq_maskable) {
- irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, port, cpu);
- v = ioread8(irq_cpu_mask);
+ irq_cpu_mask = realtek_gpio_irq_cpu_mask(ctrl, cpu);
+ v = ctrl->bank_read(irq_cpu_mask);
if (cpumask_test_cpu(cpu, dest))
- v |= BIT(port_pin);
+ v |= BIT(line);
else
- v &= ~BIT(port_pin);
+ v &= ~BIT(line);
- iowrite8(v, irq_cpu_mask);
+ ctrl->bank_write(irq_cpu_mask, v);
}
raw_spin_unlock_irqrestore(&ctrl->lock, flags);
@@ -305,16 +306,17 @@ static int realtek_gpio_irq_set_affinity(struct irq_data *data,
static int realtek_gpio_irq_init(struct gpio_chip *gc)
{
struct realtek_gpio_ctrl *ctrl = gpiochip_get_data(gc);
- unsigned int port;
+ u32 mask_all = GENMASK(gc->ngpio - 1, 0);
+ unsigned int line;
int cpu;
- for (port = 0; (port * 8) < gc->ngpio; port++) {
- realtek_gpio_write_imr(ctrl, port, 0, 0);
- realtek_gpio_clear_isr(ctrl, port, GENMASK(7, 0));
+ for (line = 0; line < gc->ngpio; line++)
+ realtek_gpio_update_line_imr(ctrl, line);
- for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
- iowrite8(GENMASK(7, 0), realtek_gpio_irq_cpu_mask(ctrl, port, cpu));
- }
+ realtek_gpio_clear_isr(ctrl, mask_all);
+
+ for_each_cpu(cpu, &ctrl->cpu_irq_maskable)
+ ctrl->bank_write(realtek_gpio_irq_cpu_mask(ctrl, cpu), mask_all);
return 0;
}
@@ -387,12 +389,14 @@ static int realtek_gpio_probe(struct platform_device *pdev)
if (dev_flags & GPIO_PORTS_REVERSED) {
bgpio_flags = 0;
- ctrl->port_offset_u8 = realtek_gpio_port_offset_u8_rev;
- ctrl->port_offset_u16 = realtek_gpio_port_offset_u16_rev;
+ ctrl->bank_read = realtek_gpio_bank_read;
+ ctrl->bank_write = realtek_gpio_bank_write;
+ ctrl->line_imr_pos = realtek_gpio_line_imr_pos;
} else {
bgpio_flags = BGPIOF_BIG_ENDIAN_BYTE_ORDER;
- ctrl->port_offset_u8 = realtek_gpio_port_offset_u8;
- ctrl->port_offset_u16 = realtek_gpio_port_offset_u16;
+ ctrl->bank_read = realtek_gpio_bank_read_swapped;
+ ctrl->bank_write = realtek_gpio_bank_write_swapped;
+ ctrl->line_imr_pos = realtek_gpio_line_imr_pos_swapped;
}
err = bgpio_init(&ctrl->gc, dev, 4,
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index f91e876fd969..bb50335239ac 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -419,11 +419,11 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
goto out;
} else {
bank->toggle_edge_mode |= mask;
- level |= mask;
+ level &= ~mask;
/*
* Determine gpio state. If 1 next interrupt should be
- * falling otherwise rising.
+ * low otherwise high.
*/
data = readl(bank->reg_base + bank->gpio_regs->ext_port);
if (data & mask)
diff --git a/drivers/gpio/gpio-ws16c48.c b/drivers/gpio/gpio-ws16c48.c
index b098f2dc196b..59fb10641598 100644
--- a/drivers/gpio/gpio-ws16c48.c
+++ b/drivers/gpio/gpio-ws16c48.c
@@ -265,6 +265,7 @@ static void ws16c48_irq_mask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
ws16c48gpio->irq_mask &= ~mask;
+ gpiochip_disable_irq(chip, offset);
port_state = ws16c48gpio->irq_mask >> (8 * port);
/* Select Register Page 2; Unlock all I/O ports */
@@ -295,6 +296,7 @@ static void ws16c48_irq_unmask(struct irq_data *data)
raw_spin_lock_irqsave(&ws16c48gpio->lock, flags);
+ gpiochip_enable_irq(chip, offset);
ws16c48gpio->irq_mask |= mask;
port_state = ws16c48gpio->irq_mask >> (8 * port);
@@ -356,12 +358,14 @@ static int ws16c48_irq_set_type(struct irq_data *data, unsigned flow_type)
return 0;
}
-static struct irq_chip ws16c48_irqchip = {
+static const struct irq_chip ws16c48_irqchip = {
.name = "ws16c48",
.irq_ack = ws16c48_irq_ack,
.irq_mask = ws16c48_irq_mask,
.irq_unmask = ws16c48_irq_unmask,
- .irq_set_type = ws16c48_irq_set_type
+ .irq_set_type = ws16c48_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
};
static irqreturn_t ws16c48_irq_handler(int irq, void *dev_id)
@@ -463,7 +467,7 @@ static int ws16c48_probe(struct device *dev, unsigned int id)
ws16c48gpio->chip.set_multiple = ws16c48_gpio_set_multiple;
girq = &ws16c48gpio->chip.irq;
- girq->chip = &ws16c48_irqchip;
+ gpio_irq_chip_set_chip(girq, &ws16c48_irqchip);
/* This will let us handle the parent IRQ in the driver */
girq->parent_handler = NULL;
girq->num_parents = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index cbd593f7d553..2170db83e41d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1728,7 +1728,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, user_addr);
if (user_addr) {
- pr_debug("creating userptr BO for user_addr = %llu\n", user_addr);
+ pr_debug("creating userptr BO for user_addr = %llx\n", user_addr);
ret = init_user_pages(*mem, user_addr, criu_resume);
if (ret)
goto allocate_init_user_pages_failed;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index f095a2513aff..be7aff2d4a57 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2365,8 +2365,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.sw = true;
- /* need to do gmc hw init early so we can allocate gpu mem */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+ /* need to do common hw init early so everything is set up for gmc */
+ r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
+ if (r) {
+ DRM_ERROR("hw_init %d failed %d\n", i, r);
+ goto init_failed;
+ }
+ adev->ip_blocks[i].status.hw = true;
+ } else if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
+ /* need to do gmc hw init early so we can allocate gpu mem */
/* Try to reserve bad pages early */
if (amdgpu_sriov_vf(adev))
amdgpu_virt_exchange_data(adev);
@@ -3052,8 +3060,8 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
int i, r;
static enum amd_ip_block_type ip_order[] = {
- AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_COMMON,
+ AMD_IP_BLOCK_TYPE_GMC,
AMD_IP_BLOCK_TYPE_PSP,
AMD_IP_BLOCK_TYPE_IH,
};
@@ -5524,7 +5532,8 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1);
resource_size_t aper_limit =
adev->gmc.aper_base + adev->gmc.aper_size - 1;
- bool p2p_access = !(pci_p2pdma_distance_many(adev->pdev,
+ bool p2p_access = !adev->gmc.xgmi.connected_to_cpu &&
+ !(pci_p2pdma_distance_many(adev->pdev,
&peer_adev->dev, 1, true) < 0);
return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index c20922a5af9f..5b09c8f4fe95 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -38,6 +38,7 @@
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_fb_helper.h>
@@ -496,6 +497,7 @@ bool amdgpu_display_ddc_probe(struct amdgpu_connector *amdgpu_connector,
static const struct drm_framebuffer_funcs amdgpu_fb_funcs = {
.destroy = drm_gem_fb_destroy,
.create_handle = drm_gem_fb_create_handle,
+ .dirty = drm_atomic_helper_dirtyfb,
};
uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
index ecada5eadfe3..e325150879df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -66,10 +66,15 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
return true;
case CHIP_SIENNA_CICHLID:
if (strnstr(atom_ctx->vbios_version, "D603",
+ sizeof(atom_ctx->vbios_version))) {
+ if (strnstr(atom_ctx->vbios_version, "D603GLXE",
sizeof(atom_ctx->vbios_version)))
- return true;
- else
+ return false;
+ else
+ return true;
+ } else {
return false;
+ }
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index b1099ee79c50..c2fd6f3076a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -159,7 +159,10 @@ void amdgpu_job_free(struct amdgpu_job *job)
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
- dma_fence_put(&job->hw_fence);
+ if (!job->hw_fence.ops)
+ kfree(job);
+ else
+ dma_fence_put(&job->hw_fence);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 1036446abc30..c9dec2434f37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -486,11 +486,14 @@ static int psp_sw_fini(void *handle)
release_firmware(psp->ta_fw);
psp->ta_fw = NULL;
}
- if (adev->psp.cap_fw) {
+ if (psp->cap_fw) {
release_firmware(psp->cap_fw);
psp->cap_fw = NULL;
}
-
+ if (psp->toc_fw) {
+ release_firmware(psp->toc_fw);
+ psp->toc_fw = NULL;
+ }
if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
psp_sysfs_fini(adev);
@@ -753,7 +756,7 @@ static int psp_tmr_init(struct psp_context *psp)
}
pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_SIZE(psp->adev),
+ ret = amdgpu_bo_create_kernel(psp->adev, tmr_size, PSP_TMR_ALIGNMENT,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
@@ -2401,7 +2404,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
static bool fw_load_skip_check(struct psp_context *psp,
struct amdgpu_firmware_info *ucode)
{
- if (!ucode->fw)
+ if (!ucode->fw || !ucode->ucode_size)
return true;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index c32b74bd970f..e593e8c2a54d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -36,6 +36,7 @@
#define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
+#define PSP_TMR_ALIGNMENT 0x100000
#define PSP_FW_NAME_LEN 0x24
enum psp_shared_mem_size {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ff5361f5c2d4..12c6f97945a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1811,7 +1811,8 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
amdgpu_ras_query_error_status(adev, &info);
if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) &&
- adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) {
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4) &&
+ adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 0)) {
if (amdgpu_ras_reset_error_status(adev, info.head.block))
dev_warn(adev->dev, "Failed to reset error counter and error status");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index ebed3f5226db..96b6cf4c4d54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -390,6 +390,7 @@ union amdgpu_firmware_header {
struct rlc_firmware_header_v2_1 rlc_v2_1;
struct rlc_firmware_header_v2_2 rlc_v2_2;
struct rlc_firmware_header_v2_3 rlc_v2_3;
+ struct rlc_firmware_header_v2_4 rlc_v2_4;
struct sdma_firmware_header_v1_0 sdma;
struct sdma_firmware_header_v1_1 sdma_v1_1;
struct sdma_firmware_header_v2_0 sdma_v2_0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index a2a4dc1844c0..a3cd5c1e8529 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -4274,35 +4274,45 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
}
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.global_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.global_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se0_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE0_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE0_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se0_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se1_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE1_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE1_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se1_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se2_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE2_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE2_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se2_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
- info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
- info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
- info->fw = adev->gfx.rlc_fw;
- adev->firmware.fw_size +=
- ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ if (adev->gfx.rlc.se3_tap_delays_ucode_size_bytes) {
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SE3_TAP_DELAYS];
+ info->ucode_id = AMDGPU_UCODE_ID_SE3_TAP_DELAYS;
+ info->fw = adev->gfx.rlc_fw;
+ adev->firmware.fw_size +=
+ ALIGN(adev->gfx.rlc.se3_tap_delays_ucode_size_bytes, PAGE_SIZE);
+ }
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
info->ucode_id = AMDGPU_UCODE_ID_CP_MEC1;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 120ea294abef..cc3fdbbcd314 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -183,6 +183,7 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes,
mes_add_queue_pkt.trap_handler_addr = input->tba_addr;
mes_add_queue_pkt.tma_addr = input->tma_addr;
mes_add_queue_pkt.is_kfd_process = input->is_kfd_process;
+ mes_add_queue_pkt.trap_en = 1;
return mes_v11_0_submit_pkt_and_poll_completion(mes,
&mes_add_queue_pkt, sizeof(mes_add_queue_pkt),
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b465baa26762..aa761ff3a5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -380,6 +380,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
WREG32_PCIE(smnPCIE_LC_CNTL, data);
}
+#ifdef CONFIG_PCIEASPM
static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -401,9 +402,11 @@ static void nbio_v2_3_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -459,7 +462,10 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v2_3_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v2_3_program_ltr(adev);
def = data = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -483,6 +489,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
static void nbio_v2_3_apply_lc_spc_mode_wa(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index f7f6ddebd3e4..37615a77287b 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -282,6 +282,7 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
+#ifdef CONFIG_PCIEASPM
static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -303,9 +304,11 @@ static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
@@ -361,7 +364,10 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v6_1_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v6_1_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -385,6 +391,7 @@ static void nbio_v6_1_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 11848d1e238b..19455a725939 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -673,6 +673,7 @@ struct amdgpu_nbio_ras nbio_v7_4_ras = {
};
+#ifdef CONFIG_PCIEASPM
static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -694,9 +695,11 @@ static void nbio_v7_4_program_ltr(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data);
}
+#endif
static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
{
+#ifdef CONFIG_PCIEASPM
uint32_t def, data;
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
@@ -755,7 +758,10 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL6, data);
- nbio_v7_4_program_ltr(adev);
+ /* Don't bother about LTR if LTR is not enabled
+ * in the path */
+ if (adev->pdev->ltr_path)
+ nbio_v7_4_program_ltr(adev);
def = data = RREG32_PCIE(smnRCC_BIF_STRAP3);
data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT;
@@ -779,6 +785,7 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK;
if (def != data)
WREG32_PCIE(smnPCIE_LC_CNTL3, data);
+#endif
}
const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
index 1dc95ef21da6..def89379b51a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_7.c
@@ -28,6 +28,14 @@
#include "nbio/nbio_7_7_0_sh_mask.h"
#include <uapi/linux/kfd_ioctl.h>
+static void nbio_v7_7_remap_hdp_registers(struct amdgpu_device *adev)
+{
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL);
+ WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL,
+ adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL);
+}
+
static u32 nbio_v7_7_get_rev_id(struct amdgpu_device *adev)
{
u32 tmp;
@@ -68,12 +76,6 @@ static void nbio_v7_7_sdma_doorbell_range(struct amdgpu_device *adev, int instan
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_CSDMA_DOORBELL_RANGE,
SIZE, doorbell_size);
- doorbell_range = REG_SET_FIELD(doorbell_range,
- GDC0_BIF_SDMA0_DOORBELL_RANGE,
- OFFSET, doorbell_index);
- doorbell_range = REG_SET_FIELD(doorbell_range,
- GDC0_BIF_SDMA0_DOORBELL_RANGE,
- SIZE, doorbell_size);
} else {
doorbell_range = REG_SET_FIELD(doorbell_range,
GDC0_BIF_SDMA0_DOORBELL_RANGE,
@@ -342,4 +344,5 @@ const struct amdgpu_nbio_funcs nbio_v7_7_funcs = {
.get_clockgating_state = nbio_v7_7_get_clockgating_state,
.ih_control = nbio_v7_7_ih_control,
.init_registers = nbio_v7_7_init_registers,
+ .remap_hdp_registers = nbio_v7_7_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 65181efba50e..56424f75dd2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1504,6 +1504,11 @@ static int sdma_v4_0_start(struct amdgpu_device *adev)
WREG32_SDMA(i, mmSDMA0_CNTL, temp);
if (!amdgpu_sriov_vf(adev)) {
+ ring = &adev->sdma.instance[i].ring;
+ adev->nbio.funcs->sdma_doorbell_range(adev, i,
+ ring->use_doorbell, ring->doorbell_index,
+ adev->doorbell_index.sdma_doorbell_range);
+
/* unhalt engine */
temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL);
temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index fde6154f2009..183024d7c184 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1211,25 +1211,6 @@ static int soc15_common_sw_fini(void *handle)
return 0;
}
-static void soc15_doorbell_range_init(struct amdgpu_device *adev)
-{
- int i;
- struct amdgpu_ring *ring;
-
- /* sdma/ih doorbell range are programed by hypervisor */
- if (!amdgpu_sriov_vf(adev)) {
- for (i = 0; i < adev->sdma.num_instances; i++) {
- ring = &adev->sdma.instance[i].ring;
- adev->nbio.funcs->sdma_doorbell_range(adev, i,
- ring->use_doorbell, ring->doorbell_index,
- adev->doorbell_index.sdma_doorbell_range);
- }
-
- adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
- adev->irq.ih.doorbell_index);
- }
-}
-
static int soc15_common_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1249,12 +1230,6 @@ static int soc15_common_hw_init(void *handle)
/* enable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, true);
- /* HW doorbell routing policy: doorbell writing not
- * in SDMA/IH/MM/ACV range will be routed to CP. So
- * we need to init SDMA/IH/MM/ACV doorbell range prior
- * to CP ip block init and ring test.
- */
- soc15_doorbell_range_init(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c
index 55284b24f113..2e50db3b761e 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc21.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc21.c
@@ -421,6 +421,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev)
{
switch (adev->ip_versions[GC_HWIP][0]) {
case IP_VERSION(11, 0, 0):
+ return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC);
case IP_VERSION(11, 0, 2):
return false;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 03b7066471f9..1e83db0c5438 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -289,6 +289,10 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
}
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
/* enable interrupts */
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 2022ffbb8dba..59dfca093155 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -340,6 +340,10 @@ static int vega20_ih_irq_init(struct amdgpu_device *adev)
}
}
+ if (!amdgpu_sriov_vf(adev))
+ adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell,
+ adev->irq.ih.doorbell_index);
+
pci_set_master(adev->pdev);
/* enable interrupts */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0e48824f55e3..ee242d9d8b06 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -3288,6 +3288,7 @@ void crtc_debugfs_init(struct drm_crtc *crtc)
&crc_win_y_end_fops);
debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc,
&crc_win_update_fops);
+ dput(dir);
#endif
debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry,
crtc, &amdgpu_current_bpc_fops);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
index beb025cd3dc2..9781a8dbc238 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
@@ -670,6 +670,8 @@ static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *cl
}
ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
bw_params->vram_type = bios_info->memory_type;
+
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
for (i = 0; i < WM_SET_COUNT; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index aeecca68dea7..fb22c3d70528 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1094,7 +1094,8 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
dc->current_state->stream_count != context->stream_count)
should_disable = true;
- if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe) {
+ if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
+ !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *old_pipe, *new_pipe;
old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index f62d50901d92..0c85ab5933b4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -329,7 +329,7 @@ bool dc_stream_set_cursor_attributes(
dc = stream->ctx->dc;
- if (attributes->height * attributes->width * 4 > 16384)
+ if (dc->debug.allow_sw_cursor_fallback && attributes->height * attributes->width * 4 > 16384)
if (stream->mall_stream_config.type == SUBVP_MAIN)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 5908b60db313..dbf8158b832e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -745,6 +745,7 @@ struct dc_debug_options {
bool disable_fixed_vs_aux_timeout_wa;
bool force_disable_subvp;
bool force_subvp_mclk_switch;
+ bool allow_sw_cursor_fallback;
bool force_usr_allow;
/* uses value at boot and disables switch */
bool disable_dtb_ref_clk_switch;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 09b304507bad..52a61b3e5a8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -417,44 +417,42 @@ static void populate_subvp_cmd_drr_info(struct dc *dc,
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
- int16_t drr_frame_us = 0;
- int16_t min_drr_supported_us = 0;
- int16_t max_drr_supported_us = 0;
- int16_t max_drr_vblank_us = 0;
- int16_t max_drr_mallregion_us = 0;
- int16_t mall_region_us = 0;
- int16_t prefetch_us = 0;
- int16_t subvp_active_us = 0;
- int16_t drr_active_us = 0;
- int16_t min_vtotal_supported = 0;
- int16_t max_vtotal_supported = 0;
+ uint16_t drr_frame_us = 0;
+ uint16_t min_drr_supported_us = 0;
+ uint16_t max_drr_supported_us = 0;
+ uint16_t max_drr_vblank_us = 0;
+ uint16_t max_drr_mallregion_us = 0;
+ uint16_t mall_region_us = 0;
+ uint16_t prefetch_us = 0;
+ uint16_t subvp_active_us = 0;
+ uint16_t drr_active_us = 0;
+ uint16_t min_vtotal_supported = 0;
+ uint16_t max_vtotal_supported = 0;
pipe_data->pipe_config.vblank_data.drr_info.drr_in_use = true;
pipe_data->pipe_config.vblank_data.drr_info.use_ramping = false; // for now don't use ramping
pipe_data->pipe_config.vblank_data.drr_info.drr_window_size_ms = 4; // hardcode 4ms DRR window for now
- drr_frame_us = div64_s64(drr_timing->v_total * drr_timing->h_total,
- (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
+ drr_frame_us = div64_u64(((uint64_t)drr_timing->v_total * drr_timing->h_total * 1000000),
+ (((uint64_t)drr_timing->pix_clk_100hz * 100)));
// P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = div64_s64(phantom_timing->v_addressable * phantom_timing->h_total,
- (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000);
+ mall_region_us = div64_u64(((uint64_t)phantom_timing->v_addressable * phantom_timing->h_total * 1000000),
+ (((uint64_t)phantom_timing->pix_clk_100hz * 100)));
min_drr_supported_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
- min_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 *
- (div64_s64((int64_t)min_drr_supported_us, 1000000)),
- (int64_t)drr_timing->h_total);
-
- prefetch_us = div64_s64((phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total,
- (int64_t)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
- dc->caps.subvp_prefetch_end_to_mall_start_us);
- subvp_active_us = div64_s64(main_timing->v_addressable * main_timing->h_total,
- (int64_t)(main_timing->pix_clk_100hz * 100) * 1000000);
- drr_active_us = div64_s64(drr_timing->v_addressable * drr_timing->h_total,
- (int64_t)(drr_timing->pix_clk_100hz * 100) * 1000000);
- max_drr_vblank_us = div64_s64((int64_t)(subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
+ min_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * min_drr_supported_us),
+ (((uint64_t)drr_timing->h_total * 1000000)));
+
+ prefetch_us = div64_u64(((uint64_t)(phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total * 1000000),
+ (((uint64_t)phantom_timing->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+ subvp_active_us = div64_u64(((uint64_t)main_timing->v_addressable * main_timing->h_total * 1000000),
+ (((uint64_t)main_timing->pix_clk_100hz * 100)));
+ drr_active_us = div64_u64(((uint64_t)drr_timing->v_addressable * drr_timing->h_total * 1000000),
+ (((uint64_t)drr_timing->pix_clk_100hz * 100)));
+ max_drr_vblank_us = div64_u64((subvp_active_us - prefetch_us - drr_active_us), 2) + drr_active_us;
max_drr_mallregion_us = subvp_active_us - prefetch_us - mall_region_us;
max_drr_supported_us = max_drr_vblank_us > max_drr_mallregion_us ? max_drr_vblank_us : max_drr_mallregion_us;
- max_vtotal_supported = div64_s64(drr_timing->pix_clk_100hz * 100 * (div64_s64((int64_t)max_drr_supported_us, 1000000)),
- (int64_t)drr_timing->h_total);
+ max_vtotal_supported = div64_u64(((uint64_t)drr_timing->pix_clk_100hz * 100 * max_drr_supported_us),
+ (((uint64_t)drr_timing->h_total * 1000000)));
pipe_data->pipe_config.vblank_data.drr_info.min_vtotal_supported = min_vtotal_supported;
pipe_data->pipe_config.vblank_data.drr_info.max_vtotal_supported = max_vtotal_supported;
@@ -548,10 +546,12 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
- subvp0_prefetch_us = div64_s64((phantom_timing0->v_total - phantom_timing0->v_front_porch) * phantom_timing0->h_total,
- (int64_t)(phantom_timing0->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
- subvp1_prefetch_us = div64_s64((phantom_timing1->v_total - phantom_timing1->v_front_porch) * phantom_timing1->h_total,
- (int64_t)(phantom_timing1->pix_clk_100hz * 100) * 1000000 + dc->caps.subvp_prefetch_end_to_mall_start_us);
+ subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
+ (uint64_t)phantom_timing0->h_total * 1000000),
+ (((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
+ subvp1_prefetch_us = div64_u64(((uint64_t)(phantom_timing1->v_total - phantom_timing1->v_front_porch) *
+ (uint64_t)phantom_timing1->h_total * 1000000),
+ (((uint64_t)phantom_timing1->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
// Whichever SubVP PIPE has the smaller prefetch (including the prefetch end to mall start time)
// should increase it's prefetch time to match the other
@@ -559,16 +559,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[1];
prefetch_delta_us = subvp0_prefetch_us - subvp1_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
- (phantom_timing1->pix_clk_100hz * 100) + phantom_timing1->h_total - 1),
- (int64_t)phantom_timing1->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+ ((uint64_t)phantom_timing1->pix_clk_100hz * 100) + ((uint64_t)phantom_timing1->h_total * 1000000 - 1)),
+ ((uint64_t)phantom_timing1->h_total * 1000000));
+
} else if (subvp1_prefetch_us > subvp0_prefetch_us) {
pipe_data = &cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[0];
prefetch_delta_us = subvp1_prefetch_us - subvp0_prefetch_us;
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us), 1000000)) *
- (phantom_timing0->pix_clk_100hz * 100) + phantom_timing0->h_total - 1),
- (int64_t)phantom_timing0->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_prefetch_end_to_mall_start_us + prefetch_delta_us) *
+ ((uint64_t)phantom_timing0->pix_clk_100hz * 100) + ((uint64_t)phantom_timing0->h_total * 1000000 - 1)),
+ ((uint64_t)phantom_timing0->h_total * 1000000));
}
}
@@ -630,13 +631,11 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
// Round up
pipe_data->pipe_config.subvp_data.prefetch_to_mall_start_lines =
- div64_s64(((div64_s64((int64_t)dc->caps.subvp_prefetch_end_to_mall_start_us, 1000000)) *
- (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
- (int64_t)phantom_timing->h_total);
+ div64_u64(((uint64_t)dc->caps.subvp_prefetch_end_to_mall_start_us * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+ ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
pipe_data->pipe_config.subvp_data.processing_delay_lines =
- div64_s64(((div64_s64((int64_t)dc->caps.subvp_fw_processing_delay_us, 1000000)) *
- (phantom_timing->pix_clk_100hz * 100) + phantom_timing->h_total - 1),
- (int64_t)phantom_timing->h_total);
+ div64_u64(((uint64_t)(dc->caps.subvp_fw_processing_delay_us) * ((uint64_t)phantom_timing->pix_clk_100hz * 100) +
+ ((uint64_t)phantom_timing->h_total * 1000000 - 1)), ((uint64_t)phantom_timing->h_total * 1000000));
// Find phantom pipe index based on phantom stream
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index a788d160953b..ab70ebd8f223 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -104,6 +104,9 @@ static bool has_query_dp_alt(struct link_encoder *enc)
{
struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+ if (enc->ctx->dce_version >= DCN_VERSION_3_15)
+ return true;
+
/* Supports development firmware and firmware >= 4.0.11 */
return dc_dmub_srv &&
!(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
index b384f30395d3..06d8638db696 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c
@@ -67,8 +67,7 @@ static void enc314_disable_fifo(struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- REG_UPDATE_2(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0,
- DIG_FIFO_READ_START_LEVEL, 0);
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
}
static void enc314_dp_set_odm_combine(
@@ -317,6 +316,7 @@ static void enc314_stream_encoder_dp_unblank(
/* switch DP encoder to CRTC data, but reset it the fifo first. It may happen
* that it overflows during mode transition, and sometimes doesn't recover.
*/
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, 1);
udelay(10);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
index 0c7980266b85..38aa28ec6b13 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
@@ -98,7 +98,8 @@ static void optc314_set_odm_combine(struct timing_generator *optc, int *opp_id,
REG_UPDATE(OPTC_WIDTH_CONTROL,
OPTC_SEGMENT_WIDTH, mpcc_hactive);
- REG_SET(OTG_H_TIMING_CNTL, 0, OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
+ REG_UPDATE(OTG_H_TIMING_CNTL,
+ OTG_H_TIMING_DIV_MODE, opp_cnt - 1);
optc1->opp_count = opp_cnt;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
index 3a9e3870b3a9..2a2a4a9cc117 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
@@ -454,6 +454,7 @@ static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs
hpo_dp_stream_encoder_reg_list(0),
hpo_dp_stream_encoder_reg_list(1),
hpo_dp_stream_encoder_reg_list(2),
+ hpo_dp_stream_encoder_reg_list(3)
};
static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
index a31c64b50410..0d5e8a441512 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
@@ -225,19 +225,19 @@ void dccg32_set_dpstreamclk(
case 0:
REG_UPDATE_2(DPSTREAMCLK_CNTL,
DPSTREAMCLK0_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, 0);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK0_SRC_SEL, otg_inst);
break;
case 1:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK1_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, 1);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK1_SRC_SEL, otg_inst);
break;
case 2:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK2_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, 2);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK2_SRC_SEL, otg_inst);
break;
case 3:
REG_UPDATE_2(DPSTREAMCLK_CNTL, DPSTREAMCLK3_EN,
- (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, 3);
+ (src == REFCLK) ? 0 : 1, DPSTREAMCLK3_SRC_SEL, otg_inst);
break;
default:
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
index 26648ce772da..38a48983f663 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_stream_encoder.c
@@ -310,6 +310,11 @@ static void enc32_stream_encoder_dp_unblank(
// TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON
REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000);
+ /* read start level = 0 will bring underflow / overflow and DIG_FIFO_ERROR = 1
+ * so set it to 1/2 full = 7 before reset as suggested by hardware team.
+ */
+ REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
+
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1);
REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
index 6ec1c52535b9..2038cbda33f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hubp.c
@@ -103,6 +103,11 @@ void hubp32_cursor_set_attributes(
enum cursor_lines_per_chunk lpc = hubp2_get_lines_per_chunk(
attr->width, attr->color_format);
+ //Round cursor width up to next multiple of 64
+ uint32_t cursor_width = ((attr->width + 63) / 64) * 64;
+ uint32_t cursor_height = attr->height;
+ uint32_t cursor_size = cursor_width * cursor_height;
+
hubp->curs_attr = *attr;
REG_UPDATE(CURSOR_SURFACE_ADDRESS_HIGH,
@@ -126,7 +131,24 @@ void hubp32_cursor_set_attributes(
/* used to shift the cursor chunk request deadline */
CURSOR0_CHUNK_HDL_ADJUST, 3);
- if (attr->width * attr->height * 4 > 16384)
+ switch (attr->color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, true);
else
REG_UPDATE(DCHUBP_MALL_CONFIG, USE_MALL_FOR_CURSOR, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
index ebd3945c71f1..344fe7535df5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
@@ -295,24 +295,38 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
}
// Include cursor size for CAB allocation
- if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
- cursor_size = dc->caps.max_cursor_size * dc->caps.max_cursor_size;
- switch (stream->cursor_attributes.color_format) {
- case CURSOR_MODE_MONO:
- cursor_size /= 2;
- break;
- case CURSOR_MODE_COLOR_1BIT_AND:
- case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
- case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
- cursor_size *= 4;
- break;
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe = &ctx->res_ctx.pipe_ctx[j];
+ struct hubp *hubp = pipe->plane_res.hubp;
- case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
- case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
- cursor_size *= 8;
- break;
- }
- cache_lines_used += dcn32_cache_lines_for_surface(dc, surface_size,
+ if (pipe->stream && pipe->plane_state && hubp)
+ /* Find the cursor plane and use the exact size instead of
+ * using the max for calculation
+ */
+ if (hubp->curs_attr.width > 0) {
+ cursor_size = hubp->curs_attr.width * hubp->curs_attr.height;
+ break;
+ }
+ }
+
+ switch (stream->cursor_attributes.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (stream->cursor_position.enable && plane->address.grph.cursor_cache_addr.quad_part) {
+ cache_lines_used += dcn32_cache_lines_for_surface(dc, cursor_size,
plane->address.grph.cursor_cache_addr.quad_part);
}
}
@@ -325,6 +339,26 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c
if (cache_lines_used % lines_per_way > 0)
num_ways++;
+ for (i = 0; i < ctx->stream_count; i++) {
+ stream = ctx->streams[i];
+ for (j = 0; j < ctx->stream_status[i].plane_count; j++) {
+ plane = ctx->stream_status[i].plane_states[j];
+
+ if (stream->cursor_position.enable && plane &&
+ !plane->address.grph.cursor_cache_addr.quad_part &&
+ cursor_size > 16384) {
+ /* Cursor caching is not supported since it won't be on the same line.
+ * So we need an extra line to accommodate it. With large cursors and a single 4k monitor
+ * this case triggers corruption. If we're at the edge, then dont trigger display refresh
+ * from MALL. We only need to cache cursor if its greater that 64x64 at 4 bpp.
+ */
+ num_ways++;
+ /* We only expect one cursor plane */
+ break;
+ }
+ }
+ }
+
return num_ways;
}
@@ -707,7 +741,29 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
struct hubp *hubp = pipe->plane_res.hubp;
if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) {
- if (hubp->curs_attr.width * hubp->curs_attr.height * 4 > 16384)
+ //Round cursor width up to next multiple of 64
+ int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64;
+ int cursor_height = hubp->curs_attr.height;
+ int cursor_size = cursor_width * cursor_height;
+
+ switch (hubp->curs_attr.color_format) {
+ case CURSOR_MODE_MONO:
+ cursor_size /= 2;
+ break;
+ case CURSOR_MODE_COLOR_1BIT_AND:
+ case CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA:
+ case CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA:
+ cursor_size *= 4;
+ break;
+
+ case CURSOR_MODE_COLOR_64BIT_FP_PRE_MULTIPLIED:
+ case CURSOR_MODE_COLOR_64BIT_FP_UN_PRE_MULTIPLIED:
+ default:
+ cursor_size *= 8;
+ break;
+ }
+
+ if (cursor_size > 16384)
cache_cursor = true;
if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
index 8b887b552f2c..c3b783cea8a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
@@ -871,6 +871,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
+ .allow_sw_cursor_fallback = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -2039,7 +2040,8 @@ static bool dcn32_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
- dc->caps.max_cursor_size = 256;
+ /* TODO: Bring max_cursor_size back to 256 after subvp cursor corruption is fixed*/
+ dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
index 1e7e6201c880..cf15d0e5e9b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
@@ -30,6 +30,9 @@
#define DCN3_2_DET_SEG_SIZE 64
#define DCN3_2_MALL_MBLK_SIZE_BYTES 65536 // 64 * 1024
+#define DCN3_2_MBLK_WIDTH 128
+#define DCN3_2_MBLK_HEIGHT_4BPE 128
+#define DCN3_2_MBLK_HEIGHT_8BPE 64
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 955f52e6064d..1f195c5b3377 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -46,7 +46,6 @@
uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_state *context)
{
uint32_t num_ways = 0;
- uint32_t mall_region_pixels = 0;
uint32_t bytes_per_pixel = 0;
uint32_t cache_lines_used = 0;
uint32_t lines_per_way = 0;
@@ -54,20 +53,64 @@ uint32_t dcn32_helper_calculate_num_ways_for_subvp(struct dc *dc, struct dc_stat
uint32_t bytes_in_mall = 0;
uint32_t num_mblks = 0;
uint32_t cache_lines_per_plane = 0;
- uint32_t i = 0;
+ uint32_t i = 0, j = 0;
+ uint32_t mblk_width = 0;
+ uint32_t mblk_height = 0;
+ uint32_t full_vp_width_blk_aligned = 0;
+ uint32_t full_vp_height_blk_aligned = 0;
+ uint32_t mall_alloc_width_blk_aligned = 0;
+ uint32_t mall_alloc_height_blk_aligned = 0;
+ uint32_t full_vp_height = 0;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Find the phantom pipes
- if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
+ if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
- mall_region_pixels = pipe->plane_state->plane_size.surface_pitch * pipe->stream->timing.v_addressable;
+ struct pipe_ctx *main_pipe = NULL;
- // For bytes required in MALL, calculate based on number of MBlks required
- num_mblks = (mall_region_pixels * bytes_per_pixel +
- DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES;
+ /* Get full viewport height from main pipe (required for MBLK calculation) */
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ main_pipe = &context->res_ctx.pipe_ctx[j];
+ if (main_pipe->stream == pipe->stream->mall_stream_config.paired_stream) {
+ full_vp_height = main_pipe->plane_res.scl_data.viewport.height;
+ break;
+ }
+ }
+
+ bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
+ mblk_width = DCN3_2_MBLK_WIDTH;
+ mblk_height = bytes_per_pixel == 4 ? DCN3_2_MBLK_HEIGHT_4BPE : DCN3_2_MBLK_HEIGHT_8BPE;
+
+ /* full_vp_width_blk_aligned = FLOOR(vp_x_start + full_vp_width + blk_width - 1, blk_width) -
+ * FLOOR(vp_x_start, blk_width)
+ */
+ full_vp_width_blk_aligned = ((pipe->plane_res.scl_data.viewport.x +
+ pipe->plane_res.scl_data.viewport.width + mblk_width - 1) / mblk_width * mblk_width) +
+ (pipe->plane_res.scl_data.viewport.x / mblk_width * mblk_width);
+
+ /* full_vp_height_blk_aligned = FLOOR(vp_y_start + full_vp_height + blk_height - 1, blk_height) -
+ * FLOOR(vp_y_start, blk_height)
+ */
+ full_vp_height_blk_aligned = ((pipe->plane_res.scl_data.viewport.y +
+ full_vp_height + mblk_height - 1) / mblk_height * mblk_height) +
+ (pipe->plane_res.scl_data.viewport.y / mblk_height * mblk_height);
+
+ /* mall_alloc_width_blk_aligned_l/c = full_vp_width_blk_aligned_l/c */
+ mall_alloc_width_blk_aligned = full_vp_width_blk_aligned;
+
+ /* mall_alloc_height_blk_aligned_l/c = CEILING(sub_vp_height_l/c - 1, blk_height_l/c) + blk_height_l/c */
+ mall_alloc_height_blk_aligned = (pipe->stream->timing.v_addressable - 1 + mblk_height - 1) /
+ mblk_height * mblk_height + mblk_height;
+
+ /* full_mblk_width_ub_l/c = mall_alloc_width_blk_aligned_l/c;
+ * full_mblk_height_ub_l/c = mall_alloc_height_blk_aligned_l/c;
+ * num_mblk_l/c = (full_mblk_width_ub_l/c / mblk_width_l/c) * (full_mblk_height_ub_l/c / mblk_height_l/c);
+ * (Should be divisible, but round up if not)
+ */
+ num_mblks = ((mall_alloc_width_blk_aligned + mblk_width - 1) / mblk_width) *
+ ((mall_alloc_height_blk_aligned + mblk_height - 1) / mblk_height);
bytes_in_mall = num_mblks * DCN3_2_MALL_MBLK_SIZE_BYTES;
// cache lines used is total bytes / cache_line size. Add +2 for worst case alignment
// (MALL is 64-byte aligned)
@@ -144,7 +187,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->stream)
- continue;
+ return false;
if (!pipe->plane_state)
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
index c8b7d6ff38f4..7309eed33a61 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
@@ -872,6 +872,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.exit_idle_opt_for_cursor_updates = true,
.enable_single_display_2to1_odm_policy = true,
.enable_dp_dig_pixel_rate_div_policy = 1,
+ .allow_sw_cursor_fallback = false,
};
static const struct dc_debug_options debug_defaults_diags = {
@@ -1651,7 +1652,8 @@ static bool dcn321_resource_construct(
dc->caps.max_downscale_ratio = 600;
dc->caps.i2c_speed_in_khz = 100;
dc->caps.i2c_speed_in_khz_hdcp = 100; /*1.4 w/a applied by default*/
- dc->caps.max_cursor_size = 256;
+ /* TODO: Bring max cursor size back to 256 after subvp cursor corruption is fixed*/
+ dc->caps.max_cursor_size = 64;
dc->caps.min_horizontal_blanking_period = 80;
dc->caps.dmdata_alloc_size = 2048;
dc->caps.mall_size_per_mem_channel = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 86a3b5bfd699..cb81ed2fbd53 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_ccflags) $(frame_warn_flag)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn314/dcn314_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags)
@@ -123,6 +125,7 @@ DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
DML += dcn30/dcn30_fpu.o dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn314/display_mode_vba_314.o dcn314/display_rq_dlg_calc_314.o
DML += dcn32/display_mode_vba_32.o dcn32/display_rq_dlg_calc_32.o dcn32/display_mode_vba_util_32.o
DML += dcn31/dcn31_fpu.o
DML += dcn32/dcn32_fpu.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 876b321b30ca..1cb858dd6ea0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -6610,8 +6610,7 @@ static double CalculateUrgentLatency(
return ret;
}
-
-static void UseMinimumDCFCLK(
+static noinline_for_stack void UseMinimumDCFCLK(
struct display_mode_lib *mode_lib,
int MaxInterDCNTileRepeaters,
int MaxPrefetchMode,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index d63b4209b14c..8ca66f1644dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -251,33 +251,13 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe);
+ double DPTEBytesPerRow);
static double CalculateWriteBackDelay(
enum source_format_class WritebackPixelFormat,
double WritebackHRatio,
@@ -311,64 +291,28 @@ static void CalculateVupdateAndDynamicMetadataParameters(
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported);
+ double *Z8StutterEnterPlusExitWatermark);
static void CalculateDCFCLKDeepSleep(
struct display_mode_lib *mode_lib,
@@ -2904,33 +2848,13 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->UrgentExtraLatency,
v->UrgentLatency,
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
- v->PixelPTEBytesPerRow[k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->PixelPTEBytesPerRow[k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
@@ -3017,64 +2941,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
PrefetchMode,
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLK,
v->ReturnBW,
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgentLatency,
v->UrgentExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLK,
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->DCFCLKDeepSleep,
v->DETBufferSizeY,
v->DETBufferSizeC,
v->SwathHeightY,
v->SwathHeightC,
- v->LBBitPerPixel,
v->SwathWidthY,
v->SwathWidthC,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
&DRAMClockChangeSupport,
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
&v->StutterExitWatermark,
&v->StutterEnterPlusExitWatermark,
&v->Z8StutterExitWatermark,
- &v->Z8StutterEnterPlusExitWatermark,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &v->Z8StutterEnterPlusExitWatermark);
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->WritebackEnable[k] == true) {
@@ -3598,61 +3486,43 @@ static void CalculateRowBandwidth(
static void CalculateFlipSchedule(
struct display_mode_lib *mode_lib,
+ unsigned int k,
double HostVMInefficiencyFactor,
double UrgentExtraLatency,
double UrgentLatency,
- unsigned int GPUVMMaxPageTableLevels,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- bool GPUVMEnable,
- double HostVMMinPageSize,
double PDEAndMetaPTEBytesPerFrame,
double MetaRowBytes,
- double DPTEBytesPerRow,
- double BandwidthAvailableForImmediateFlip,
- unsigned int TotImmediateFlipBytes,
- enum source_format_class SourcePixelFormat,
- double LineTime,
- double VRatio,
- double VRatioChroma,
- double Tno_bw,
- bool DCCEnable,
- unsigned int dpte_row_height,
- unsigned int meta_row_height,
- unsigned int dpte_row_height_chroma,
- unsigned int meta_row_height_chroma,
- double *DestinationLinesToRequestVMInImmediateFlip,
- double *DestinationLinesToRequestRowInImmediateFlip,
- double *final_flip_bw,
- bool *ImmediateFlipSupportedForPipe)
+ double DPTEBytesPerRow)
{
+ struct vba_vars_st *v = &mode_lib->vba;
double min_row_time = 0.0;
unsigned int HostVMDynamicLevelsTrips;
double TimeForFetchingMetaPTEImmediateFlip;
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
+ double LineTime = v->HTotal[k] / v->PixelClock[k];
- if (GPUVMEnable == true && HostVMEnable == true) {
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true) {
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
} else {
HostVMDynamicLevelsTrips = 0;
}
- if (GPUVMEnable == true || DCCEnable == true) {
- ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * BandwidthAvailableForImmediateFlip / TotImmediateFlipBytes;
+ if (v->GPUVMEnable == true || v->DCCEnable[k] == true) {
+ ImmediateFlipBW = (PDEAndMetaPTEBytesPerFrame + MetaRowBytes + DPTEBytesPerRow) * v->BandwidthAvailableForImmediateFlip / v->TotImmediateFlipBytes;
}
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
TimeForFetchingMetaPTEImmediateFlip = dml_max3(
- Tno_bw + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
- UrgentExtraLatency + UrgentLatency * (GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
+ v->Tno_bw[k] + PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / ImmediateFlipBW,
+ UrgentExtraLatency + UrgentLatency * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1),
LineTime / 4.0);
} else {
TimeForFetchingMetaPTEImmediateFlip = 0;
}
- *DestinationLinesToRequestVMInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
- if ((GPUVMEnable == true || DCCEnable == true)) {
+ v->DestinationLinesToRequestVMInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingMetaPTEImmediateFlip / LineTime), 1) / 4.0;
+ if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
TimeForFetchingRowInVBlankImmediateFlip = dml_max3(
(MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / ImmediateFlipBW,
UrgentLatency * (HostVMDynamicLevelsTrips + 1),
@@ -3661,54 +3531,54 @@ static void CalculateFlipSchedule(
TimeForFetchingRowInVBlankImmediateFlip = 0;
}
- *DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
+ v->DestinationLinesToRequestRowInImmediateFlip[k] = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
- if (GPUVMEnable == true) {
- *final_flip_bw = dml_max(
- PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime),
- (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
- } else if ((GPUVMEnable == true || DCCEnable == true)) {
- *final_flip_bw = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (*DestinationLinesToRequestRowInImmediateFlip * LineTime);
+ if (v->GPUVMEnable == true) {
+ v->final_flip_bw[k] = dml_max(
+ PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (v->DestinationLinesToRequestVMInImmediateFlip[k] * LineTime),
+ (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime));
+ } else if ((v->GPUVMEnable == true || v->DCCEnable[k] == true)) {
+ v->final_flip_bw[k] = (MetaRowBytes + DPTEBytesPerRow * HostVMInefficiencyFactor) / (v->DestinationLinesToRequestRowInImmediateFlip[k] * LineTime);
} else {
- *final_flip_bw = 0;
+ v->final_flip_bw[k] = 0;
}
- if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10 || SourcePixelFormat == dm_rgbe_alpha) {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, dpte_row_height_chroma * LineTime / VRatioChroma);
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = dml_min(meta_row_height * LineTime / VRatio, meta_row_height_chroma * LineTime / VRatioChroma);
+ if (v->SourcePixelFormat[k] == dm_420_8 || v->SourcePixelFormat[k] == dm_420_10 || v->SourcePixelFormat[k] == dm_rgbe_alpha) {
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = dml_min(v->meta_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / VRatioChroma,
- meta_row_height_chroma * LineTime / VRatioChroma);
+ v->dpte_row_height[k] * LineTime / v->VRatio[k],
+ v->meta_row_height[k] * LineTime / v->VRatio[k],
+ v->dpte_row_height_chroma[k] * LineTime / v->VRatioChroma[k],
+ v->meta_row_height_chroma[k] * LineTime / v->VRatioChroma[k]);
}
} else {
- if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
- } else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ if (v->GPUVMEnable == true && v->DCCEnable[k] != true) {
+ min_row_time = v->dpte_row_height[k] * LineTime / v->VRatio[k];
+ } else if (v->GPUVMEnable != true && v->DCCEnable[k] == true) {
+ min_row_time = v->meta_row_height[k] * LineTime / v->VRatio[k];
} else {
- min_row_time = dml_min(dpte_row_height * LineTime / VRatio, meta_row_height * LineTime / VRatio);
+ min_row_time = dml_min(v->dpte_row_height[k] * LineTime / v->VRatio[k], v->meta_row_height[k] * LineTime / v->VRatio[k]);
}
}
- if (*DestinationLinesToRequestVMInImmediateFlip >= 32 || *DestinationLinesToRequestRowInImmediateFlip >= 16
+ if (v->DestinationLinesToRequestVMInImmediateFlip[k] >= 32 || v->DestinationLinesToRequestRowInImmediateFlip[k] >= 16
|| TimeForFetchingMetaPTEImmediateFlip + 2 * TimeForFetchingRowInVBlankImmediateFlip > min_row_time) {
- *ImmediateFlipSupportedForPipe = false;
+ v->ImmediateFlipSupportedForPipe[k] = false;
} else {
- *ImmediateFlipSupportedForPipe = true;
+ v->ImmediateFlipSupportedForPipe[k] = true;
}
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestVMInImmediateFlip);
- dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, *DestinationLinesToRequestRowInImmediateFlip);
+ dml_print("DML::%s: DestinationLinesToRequestVMInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestVMInImmediateFlip[k]);
+ dml_print("DML::%s: DestinationLinesToRequestRowInImmediateFlip = %f\n", __func__, v->DestinationLinesToRequestRowInImmediateFlip[k]);
dml_print("DML::%s: TimeForFetchingMetaPTEImmediateFlip = %f\n", __func__, TimeForFetchingMetaPTEImmediateFlip);
dml_print("DML::%s: TimeForFetchingRowInVBlankImmediateFlip = %f\n", __func__, TimeForFetchingRowInVBlankImmediateFlip);
dml_print("DML::%s: min_row_time = %f\n", __func__, min_row_time);
- dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, *ImmediateFlipSupportedForPipe);
+ dml_print("DML::%s: ImmediateFlipSupportedForPipe = %d\n", __func__, v->ImmediateFlipSupportedForPipe[k]);
#endif
}
@@ -5300,33 +5170,13 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
for (k = 0; k < v->NumberOfActivePlanes; k++) {
CalculateFlipSchedule(
mode_lib,
+ k,
HostVMInefficiencyFactor,
v->ExtraLatency,
v->UrgLatency[i],
- v->GPUVMMaxPageTableLevels,
- v->HostVMEnable,
- v->HostVMMaxNonCachedPageTableLevels,
- v->GPUVMEnable,
- v->HostVMMinPageSize,
v->PDEAndMetaPTEBytesPerFrame[i][j][k],
v->MetaRowBytes[i][j][k],
- v->DPTEBytesPerRow[i][j][k],
- v->BandwidthAvailableForImmediateFlip,
- v->TotImmediateFlipBytes,
- v->SourcePixelFormat[k],
- v->HTotal[k] / v->PixelClock[k],
- v->VRatio[k],
- v->VRatioChroma[k],
- v->Tno_bw[k],
- v->DCCEnable[k],
- v->dpte_row_height[k],
- v->meta_row_height[k],
- v->dpte_row_height_chroma[k],
- v->meta_row_height_chroma[k],
- &v->DestinationLinesToRequestVMInImmediateFlip[k],
- &v->DestinationLinesToRequestRowInImmediateFlip[k],
- &v->final_flip_bw[k],
- &v->ImmediateFlipSupportedForPipe[k]);
+ v->DPTEBytesPerRow[i][j][k]);
}
v->total_dcn_read_bw_with_flip = 0.0;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
@@ -5384,64 +5234,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
CalculateWatermarksAndDRAMSpeedChangeSupport(
mode_lib,
v->PrefetchModePerState[i][j],
- v->NumberOfActivePlanes,
- v->MaxLineBufferLines,
- v->LineBufferSize,
- v->WritebackInterfaceBufferSize,
v->DCFCLKState[i][j],
v->ReturnBWPerState[i][j],
- v->SynchronizedVBlank,
- v->dpte_group_bytes,
- v->MetaChunkSize,
v->UrgLatency[i],
v->ExtraLatency,
- v->WritebackLatency,
- v->WritebackChunkSize,
v->SOCCLKPerState[i],
- v->DRAMClockChangeLatency,
- v->SRExitTime,
- v->SREnterPlusExitTime,
- v->SRExitZ8Time,
- v->SREnterPlusExitZ8Time,
v->ProjectedDCFCLKDeepSleep[i][j],
v->DETBufferSizeYThisState,
v->DETBufferSizeCThisState,
v->SwathHeightYThisState,
v->SwathHeightCThisState,
- v->LBBitPerPixel,
v->SwathWidthYThisState,
v->SwathWidthCThisState,
- v->HRatio,
- v->HRatioChroma,
- v->vtaps,
- v->VTAPsChroma,
- v->VRatio,
- v->VRatioChroma,
- v->HTotal,
- v->PixelClock,
- v->BlendingAndTiming,
v->NoOfDPPThisState,
v->BytePerPixelInDETY,
v->BytePerPixelInDETC,
- v->DSTXAfterScaler,
- v->DSTYAfterScaler,
- v->WritebackEnable,
- v->WritebackPixelFormat,
- v->WritebackDestinationWidth,
- v->WritebackDestinationHeight,
- v->WritebackSourceHeight,
UnboundedRequestEnabledThisState,
CompressedBufferSizeInkByteThisState,
&v->DRAMClockChangeSupport[i][j],
- &v->UrgentWatermark,
- &v->WritebackUrgentWatermark,
- &v->DRAMClockChangeWatermark,
- &v->WritebackDRAMClockChangeWatermark,
- &dummy,
&dummy,
&dummy,
&dummy,
- &v->MinActiveDRAMClockChangeLatencySupported);
+ &dummy);
}
}
@@ -5566,64 +5380,28 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
static void CalculateWatermarksAndDRAMSpeedChangeSupport(
struct display_mode_lib *mode_lib,
unsigned int PrefetchMode,
- unsigned int NumberOfActivePlanes,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizedVBlank,
- unsigned int dpte_group_bytes[],
- unsigned int MetaChunkSize,
double UrgentLatency,
double ExtraLatency,
- double WritebackLatency,
- double WritebackChunkSize,
double SOCCLK,
- double DRAMClockChangeLatency,
- double SRExitTime,
- double SREnterPlusExitTime,
- double SRExitZ8Time,
- double SREnterPlusExitZ8Time,
double DCFCLKDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int vtaps[],
- unsigned int VTAPsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerPlane[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
- double DSTXAfterScaler[],
- double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
int unsigned CompressedBufferSizeInkByte,
enum clock_change_support *DRAMClockChangeSupport,
- double *UrgentWatermark,
- double *WritebackUrgentWatermark,
- double *DRAMClockChangeWatermark,
- double *WritebackDRAMClockChangeWatermark,
double *StutterExitWatermark,
double *StutterEnterPlusExitWatermark,
double *Z8StutterExitWatermark,
- double *Z8StutterEnterPlusExitWatermark,
- double *MinActiveDRAMClockChangeLatencySupported)
+ double *Z8StutterEnterPlusExitWatermark)
{
struct vba_vars_st *v = &mode_lib->vba;
double EffectiveLBLatencyHidingY;
@@ -5643,103 +5421,103 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
double TotalPixelBW = 0.0;
int k, j;
- *UrgentWatermark = UrgentLatency + ExtraLatency;
+ v->UrgentWatermark = UrgentLatency + ExtraLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, ExtraLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, *UrgentWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->UrgentWatermark);
#endif
- *DRAMClockChangeWatermark = DRAMClockChangeLatency + *UrgentWatermark;
+ v->DRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->UrgentWatermark;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, DRAMClockChangeLatency);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, *DRAMClockChangeWatermark);
+ dml_print("DML::%s: v->DRAMClockChangeLatency = %f\n", __func__, v->DRAMClockChangeLatency);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->DRAMClockChangeWatermark);
#endif
v->TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (WritebackEnable[k] == true) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->WritebackEnable[k] == true) {
v->TotalActiveWriteback = v->TotalActiveWriteback + 1;
}
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackUrgentWatermark = WritebackLatency;
+ v->WritebackUrgentWatermark = v->WritebackLatency;
} else {
- *WritebackUrgentWatermark = WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackUrgentWatermark = v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
if (v->TotalActiveWriteback <= 1) {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency;
} else {
- *WritebackDRAMClockChangeWatermark = DRAMClockChangeLatency + WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->WritebackDRAMClockChangeWatermark = v->DRAMClockChangeLatency + v->WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
TotalPixelBW = TotalPixelBW
- + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k])
- / (HTotal[k] / PixelClock[k]);
+ + DPPPerPlane[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] + SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k])
+ / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
double EffectiveDETBufferSizeY = DETBufferSizeY[k];
v->LBLatencyHidingSourceLinesY = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (vtaps[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
v->LBLatencyHidingSourceLinesC = dml_min(
- (double) MaxLineBufferLines,
- dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTAPsChroma[k] - 1);
+ (double) v->MaxLineBufferLines,
+ dml_floor(v->LineBufferSize / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
- EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / VRatio[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = v->LBLatencyHidingSourceLinesY / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
- EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingC = v->LBLatencyHidingSourceLinesC / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
- + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ + CompressedBufferSizeInkByte * 1024 * SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
if (BytePerPixelDETC[k] > 0) {
LinesInDETC = v->DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath = dml_floor(LinesInDETC, SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (HTotal[k] / PixelClock[k]) / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath * (v->HTotal[k] / v->PixelClock[k]) / v->VRatioChroma[k];
} else {
LinesInDETC = 0;
FullDETBufferingTimeC = 999999;
}
ActiveDRAMClockChangeLatencyMarginY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginY = ActiveDRAMClockChangeLatencyMarginY
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightY[k] * HTotal[k] / PixelClock[k] / VRatio[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
ActiveDRAMClockChangeLatencyMarginC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - ((double) DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k] - *UrgentWatermark - *DRAMClockChangeWatermark;
+ - ((double) v->DSTXAfterScaler[k] / v->HTotal[k] + v->DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k] - v->UrgentWatermark - v->DRAMClockChangeWatermark;
- if (NumberOfActivePlanes > 1) {
+ if (v->NumberOfActivePlanes > 1) {
ActiveDRAMClockChangeLatencyMarginC = ActiveDRAMClockChangeLatencyMarginC
- - (1 - 1.0 / NumberOfActivePlanes) * SwathHeightC[k] * HTotal[k] / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1.0 / v->NumberOfActivePlanes) * SwathHeightC[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatioChroma[k];
}
v->ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMarginY, ActiveDRAMClockChangeLatencyMarginC);
} else {
v->ActiveDRAMClockChangeLatencyMargin[k] = ActiveDRAMClockChangeLatencyMarginY;
}
- if (WritebackEnable[k] == true) {
- WritebackDRAMClockChangeLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k] / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64) {
+ if (v->WritebackEnable[k] == true) {
+ WritebackDRAMClockChangeLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k] / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64) {
WritebackDRAMClockChangeLatencyHiding = WritebackDRAMClockChangeLatencyHiding / 2;
}
WritebackDRAMClockChangeLatencyMargin = WritebackDRAMClockChangeLatencyHiding - v->WritebackDRAMClockChangeWatermark;
@@ -5749,14 +5527,14 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->MinActiveDRAMClockChangeMargin = 999999;
PlaneWithMinActiveDRAMClockChangeMargin = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->ActiveDRAMClockChangeLatencyMargin[k] < v->MinActiveDRAMClockChangeMargin) {
v->MinActiveDRAMClockChangeMargin = v->ActiveDRAMClockChangeLatencyMargin[k];
- if (BlendingAndTiming[k] == k) {
+ if (v->BlendingAndTiming[k] == k) {
PlaneWithMinActiveDRAMClockChangeMargin = k;
} else {
- for (j = 0; j < NumberOfActivePlanes; ++j) {
- if (BlendingAndTiming[k] == j) {
+ for (j = 0; j < v->NumberOfActivePlanes; ++j) {
+ if (v->BlendingAndTiming[k] == j) {
PlaneWithMinActiveDRAMClockChangeMargin = j;
}
}
@@ -5764,11 +5542,11 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
}
}
- *MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + DRAMClockChangeLatency;
+ v->MinActiveDRAMClockChangeLatencySupported = v->MinActiveDRAMClockChangeMargin + v->DRAMClockChangeLatency ;
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (BlendingAndTiming[k] == k)) && !(BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (v->BlendingAndTiming[k] == k)) && !(v->BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
&& v->ActiveDRAMClockChangeLatencyMargin[k] < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = v->ActiveDRAMClockChangeLatencyMargin[k];
}
@@ -5776,25 +5554,25 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
v->TotalNumberOfActiveOTG = 0;
- for (k = 0; k < NumberOfActivePlanes; ++k) {
- if (BlendingAndTiming[k] == k) {
+ for (k = 0; k < v->NumberOfActivePlanes; ++k) {
+ if (v->BlendingAndTiming[k] == k) {
v->TotalNumberOfActiveOTG = v->TotalNumberOfActiveOTG + 1;
}
}
if (v->MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vactive;
- } else if ((SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
+ } else if ((v->SynchronizedVBlank == true || v->TotalNumberOfActiveOTG == 1
|| SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0) {
*DRAMClockChangeSupport = dm_dram_clock_change_vblank;
} else {
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- *StutterExitWatermark = SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
- *StutterEnterPlusExitWatermark = (SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
- *Z8StutterExitWatermark = SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
- *Z8StutterEnterPlusExitWatermark = SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterExitWatermark = v->SRExitTime + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *StutterEnterPlusExitWatermark = (v->SREnterPlusExitTime + ExtraLatency + 10 / DCFCLKDeepSleep);
+ *Z8StutterExitWatermark = v->SRExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
+ *Z8StutterEnterPlusExitWatermark = v->SREnterPlusExitZ8Time + ExtraLatency + 10 / DCFCLKDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, *StutterExitWatermark);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
index 34a5d0f87b5f..4bb3b31ea7e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c
@@ -194,6 +194,9 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
dcn3_14_ip.max_num_otg = dc->res_pool->res_cap->num_timing_generator;
dcn3_14_ip.max_num_dpp = dc->res_pool->pipe_count;
+ if (bw_params->dram_channel_width_bytes > 0)
+ dcn3_14_soc.dram_channel_width_bytes = bw_params->dram_channel_width_bytes;
+
if (bw_params->num_channels > 0)
dcn3_14_soc.num_chans = bw_params->num_channels;
@@ -262,7 +265,7 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p
}
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31);
+ dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN314);
else
dml_init_instance(&dc->dml, &dcn3_14_soc, &dcn3_14_ip, DML_PROJECT_DCN31_FPGA);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
index fc4d7474c111..01f3fad172f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/display_mode_vba_314.c
@@ -61,7 +61,7 @@
// fudge factor for min dcfclk calclation
#define __DML_MIN_DCFCLK_FACTOR__ 1.15
-struct {
+typedef struct {
double DPPCLK;
double DISPCLK;
double PixelClock;
@@ -1599,7 +1599,7 @@ static void CalculateDCCConfiguration(
int segment_order_vert_contiguous_luma;
int segment_order_vert_contiguous_chroma;
- enum {
+ typedef enum {
REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA
} RequestType;
RequestType RequestLuma;
@@ -4071,9 +4071,7 @@ void dml314_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_
v->SourceFormatPixelAndScanSupport = true;
for (k = 0; k < v->NumberOfActivePlanes; k++) {
- if ((v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true))
- || ((v->SurfaceTiling[k] == dm_sw_64kb_d || v->SurfaceTiling[k] == dm_sw_64kb_d_t
- || v->SurfaceTiling[k] == dm_sw_64kb_d_x) && !(v->SourcePixelFormat[k] == dm_444_64))) {
+ if (v->SurfaceTiling[k] == dm_sw_linear && (!(v->SourceScan[k] != dm_vert) || v->DCCEnable[k] == true)) {
v->SourceFormatPixelAndScanSupport = false;
}
}
@@ -7157,12 +7155,13 @@ static double CalculateExtraLatencyBytes(
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 1);
else
HostVMDynamicLevels = dml_max(0, (int) HostVMMaxNonCachedPageTableLevels - 2);
- else
+ } else {
HostVMDynamicLevels = 0;
+ }
ret = ReorderingBytes + (TotalNumberOfActiveDPP * PixelChunkSizeInKByte + TotalNumberOfDCCActiveDPP * MetaChunkSize) * 1024.0;
- if (GPUVMEnable == true)
+ if (GPUVMEnable == true) {
for (k = 0; k < NumberOfActivePlanes; ++k)
ret = ret + NumberOfDPP[k] * dpte_group_bytes[k] * (1 + 8 * HostVMDynamicLevels) * HostVMInefficiencyFactor;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index 8118cfc5b405..8e4c9d0887ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -1014,6 +1014,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
+ // to re-initialize viewport after the pipe merge
+ for (int i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->plane_state || !pipe_ctx->stream)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
while (!found_supported_config && dcn32_enough_pipes_for_subvp(dc, context) &&
dcn32_assign_subvp_pipe(dc, context, &dc_pipe_idx)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index cb2025771646..9a60f27eceaa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -755,30 +755,18 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelY = v->BytePerPixelY[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.BytePerPixelC = v->BytePerPixelC[k];
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe.ProgressiveToInterlaceUnitInOPP = mode_lib->vba.ProgressiveToInterlaceUnitInOPP;
- v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
- &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe, v->DSCDelay[k],
- mode_lib->vba.DPPCLKDelaySubtotal + mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- (unsigned int) (v->SwathWidthY[k] / mode_lib->vba.HRatio[k]),
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
+ v->ErrorResult[k] = dml32_CalculatePrefetchSchedule(
+ v,
+ k,
+ v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.HostVMInefficiencyFactor,
+ &v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.myPipe,
+ v->DSCDelay[k],
+ (unsigned int) (v->SwathWidthY[k] / v->HRatio[k]),
dml_min(v->VStartupLines, v->MaxVStartupLines[k]),
v->MaxVStartupLines[k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable,
- mode_lib->vba.HostVMEnable,
- mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
- mode_lib->vba.HostVMMinPageSize,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataVMEnabled,
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
v->UrgentLatency,
v->UrgentExtraLatency,
- mode_lib->vba.TCalc,
+ v->TCalc,
v->PDEAndMetaPTEBytesFrame[k],
v->MetaRowByte[k],
v->PixelPTEBytesPerRow[k],
@@ -792,8 +780,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->MaxNumSwathC[k],
v->swath_width_luma_ub[k],
v->swath_width_chroma_ub[k],
- mode_lib->vba.SwathHeightY[k],
- mode_lib->vba.SwathHeightC[k],
+ v->SwathHeightY[k],
+ v->SwathHeightC[k],
TWait,
/* Output */
&v->DSTXAfterScaler[k],
@@ -1163,58 +1151,28 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters.SMNLatency = mode_lib->vba.SMNLatency;
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- mode_lib->vba.USRRetrainingRequiredFinal,
- mode_lib->vba.UsesMALLForPStateChange,
- mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb],
- mode_lib->vba.NumberOfActiveSurfaces,
- mode_lib->vba.MaxLineBufferLines,
- mode_lib->vba.LineBufferSizeFinal,
- mode_lib->vba.WritebackInterfaceBufferSize,
- mode_lib->vba.DCFCLK,
- mode_lib->vba.ReturnBW,
- mode_lib->vba.SynchronizeTimingsFinal,
- mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- mode_lib->vba.DRRDisplay,
- v->dpte_group_bytes,
- v->meta_row_height,
- v->meta_row_height_chroma,
+ v,
+ v->PrefetchModePerState[v->VoltageLevel][v->maxMpcComb],
+ v->DCFCLK,
+ v->ReturnBW,
v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.mmSOCParameters,
- mode_lib->vba.WritebackChunkSize,
- mode_lib->vba.SOCCLK,
+ v->SOCCLK,
v->DCFCLKDeepSleep,
- mode_lib->vba.DETBufferSizeY,
- mode_lib->vba.DETBufferSizeC,
- mode_lib->vba.SwathHeightY,
- mode_lib->vba.SwathHeightC,
- mode_lib->vba.LBBitPerPixel,
+ v->DETBufferSizeY,
+ v->DETBufferSizeC,
+ v->SwathHeightY,
+ v->SwathHeightC,
v->SwathWidthY,
v->SwathWidthC,
- mode_lib->vba.HRatio,
- mode_lib->vba.HRatioChroma,
- mode_lib->vba.vtaps,
- mode_lib->vba.VTAPsChroma,
- mode_lib->vba.VRatio,
- mode_lib->vba.VRatioChroma,
- mode_lib->vba.HTotal,
- mode_lib->vba.VTotal,
- mode_lib->vba.VActive,
- mode_lib->vba.PixelClock,
- mode_lib->vba.BlendingAndTiming,
- mode_lib->vba.DPPPerPlane,
+ v->DPPPerPlane,
v->BytePerPixelDETY,
v->BytePerPixelDETC,
v->DSTXAfterScaler,
v->DSTYAfterScaler,
- mode_lib->vba.WritebackEnable,
- mode_lib->vba.WritebackPixelFormat,
- mode_lib->vba.WritebackDestinationWidth,
- mode_lib->vba.WritebackDestinationHeight,
- mode_lib->vba.WritebackSourceHeight,
v->UnboundedRequestEnabled,
v->CompressedBufferSizeInkByte,
/* Output */
- &v->Watermark,
&v->dummy_vars.DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation.dummy_dramchange_support,
v->MaxActiveDRAMClockChangeLatencySupported,
v->SubViewportLinesNeededInMALL,
@@ -1806,10 +1764,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
&mode_lib->vba.Read256BlockHeightC[k],
&mode_lib->vba.Read256BlockWidthY[k],
&mode_lib->vba.Read256BlockWidthC[k],
- &mode_lib->vba.MicroTileHeightY[k],
- &mode_lib->vba.MicroTileHeightC[k],
- &mode_lib->vba.MicroTileWidthY[k],
- &mode_lib->vba.MicroTileWidthC[k]);
+ &mode_lib->vba.MacroTileHeightY[k],
+ &mode_lib->vba.MacroTileHeightC[k],
+ &mode_lib->vba.MacroTileWidthY[k],
+ &mode_lib->vba.MacroTileWidthC[k]);
}
/*Bandwidth Support Check*/
@@ -2659,10 +2617,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.Read256BlockWidthC,
mode_lib->vba.Read256BlockHeightY,
mode_lib->vba.Read256BlockHeightC,
- mode_lib->vba.MicroTileWidthY,
- mode_lib->vba.MicroTileWidthC,
- mode_lib->vba.MicroTileHeightY,
- mode_lib->vba.MicroTileHeightC,
+ mode_lib->vba.MacroTileWidthY,
+ mode_lib->vba.MacroTileWidthC,
+ mode_lib->vba.MacroTileHeightY,
+ mode_lib->vba.MacroTileHeightC,
/* Output */
mode_lib->vba.SurfaceSizeInMALL,
@@ -2709,10 +2667,10 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesY = mode_lib->vba.Read256BlockHeightY[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidth256BytesC = mode_lib->vba.Read256BlockWidthC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeight256BytesC = mode_lib->vba.Read256BlockHeightC[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MicroTileWidthY[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MicroTileHeightY[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MicroTileWidthC[k];
- v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MicroTileHeightC[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthY = mode_lib->vba.MacroTileWidthY[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightY = mode_lib->vba.MacroTileHeightY[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockWidthC = mode_lib->vba.MacroTileWidthC[k];
+ v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].BlockHeightC = mode_lib->vba.MacroTileHeightC[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].InterlaceEnable = mode_lib->vba.Interlace[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].HTotal = mode_lib->vba.HTotal[k];
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.SurfParameters[k].DCCEnable = mode_lib->vba.DCCEnable[k];
@@ -3258,63 +3216,47 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.NoTimeForPrefetch[i][j][k] =
dml32_CalculatePrefetchSchedule(
+ v,
+ k,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.HostVMInefficiencyFactor,
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe,
- mode_lib->vba.DSCDelayPerState[i][k],
- mode_lib->vba.DPPCLKDelaySubtotal +
- mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- mode_lib->vba.SwathWidthYThisState[k] /
- mode_lib->vba.HRatio[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
- dml_min(mode_lib->vba.MaxVStartup,
- mode_lib->vba.MaximumVStartup[i][j][k]),
- mode_lib->vba.MaximumVStartup[i][j][k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable, mode_lib->vba.HostVMEnable,
- mode_lib->vba.HostVMMaxNonCachedPageTableLevels,
- mode_lib->vba.HostVMMinPageSize,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataVMEnabled,
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
- mode_lib->vba.UrgLatency[i],
- mode_lib->vba.ExtraLatency,
- mode_lib->vba.TimeCalc,
- mode_lib->vba.PDEAndMetaPTEBytesPerFrame[i][j][k],
- mode_lib->vba.MetaRowBytes[i][j][k],
- mode_lib->vba.DPTEBytesPerRow[i][j][k],
- mode_lib->vba.PrefetchLinesY[i][j][k],
- mode_lib->vba.SwathWidthYThisState[k],
- mode_lib->vba.PrefillY[k],
- mode_lib->vba.MaxNumSwY[k],
- mode_lib->vba.PrefetchLinesC[i][j][k],
- mode_lib->vba.SwathWidthCThisState[k],
- mode_lib->vba.PrefillC[k],
- mode_lib->vba.MaxNumSwC[k],
- mode_lib->vba.swath_width_luma_ub_this_state[k],
- mode_lib->vba.swath_width_chroma_ub_this_state[k],
- mode_lib->vba.SwathHeightYThisState[k],
- mode_lib->vba.SwathHeightCThisState[k], mode_lib->vba.TWait,
+ v->DSCDelayPerState[i][k],
+ v->SwathWidthYThisState[k] / v->HRatio[k],
+ dml_min(v->MaxVStartup, v->MaximumVStartup[i][j][k]),
+ v->MaximumVStartup[i][j][k],
+ v->UrgLatency[i],
+ v->ExtraLatency,
+ v->TimeCalc,
+ v->PDEAndMetaPTEBytesPerFrame[i][j][k],
+ v->MetaRowBytes[i][j][k],
+ v->DPTEBytesPerRow[i][j][k],
+ v->PrefetchLinesY[i][j][k],
+ v->SwathWidthYThisState[k],
+ v->PrefillY[k],
+ v->MaxNumSwY[k],
+ v->PrefetchLinesC[i][j][k],
+ v->SwathWidthCThisState[k],
+ v->PrefillC[k],
+ v->MaxNumSwC[k],
+ v->swath_width_luma_ub_this_state[k],
+ v->swath_width_chroma_ub_this_state[k],
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k], v->TWait,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler[k],
- &mode_lib->vba.LineTimesForPrefetch[k],
- &mode_lib->vba.PrefetchBW[k],
- &mode_lib->vba.LinesForMetaPTE[k],
- &mode_lib->vba.LinesForMetaAndDPTERow[k],
- &mode_lib->vba.VRatioPreY[i][j][k],
- &mode_lib->vba.VRatioPreC[i][j][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBWLuma[0][0][k],
- &mode_lib->vba.RequiredPrefetchPixelDataBWChroma[0][0][k],
- &mode_lib->vba.NoTimeForDynamicMetadata[i][j][k],
- &mode_lib->vba.Tno_bw[k],
- &mode_lib->vba.prefetch_vmrow_bw[k],
+ &v->LineTimesForPrefetch[k],
+ &v->PrefetchBW[k],
+ &v->LinesForMetaPTE[k],
+ &v->LinesForMetaAndDPTERow[k],
+ &v->VRatioPreY[i][j][k],
+ &v->VRatioPreC[i][j][k],
+ &v->RequiredPrefetchPixelDataBWLuma[0][0][k],
+ &v->RequiredPrefetchPixelDataBWChroma[0][0][k],
+ &v->NoTimeForDynamicMetadata[i][j][k],
+ &v->Tno_bw[k],
+ &v->prefetch_vmrow_bw[k],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[0], // double *Tdmdl_vm
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[1], // double *Tdmdl
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single[2], // double *TSetup
@@ -3557,62 +3499,32 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- mode_lib->vba.USRRetrainingRequiredFinal,
- mode_lib->vba.UsesMALLForPStateChange,
- mode_lib->vba.PrefetchModePerState[i][j],
- mode_lib->vba.NumberOfActiveSurfaces,
- mode_lib->vba.MaxLineBufferLines,
- mode_lib->vba.LineBufferSizeFinal,
- mode_lib->vba.WritebackInterfaceBufferSize,
- mode_lib->vba.DCFCLKState[i][j],
- mode_lib->vba.ReturnBWPerState[i][j],
- mode_lib->vba.SynchronizeTimingsFinal,
- mode_lib->vba.SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- mode_lib->vba.DRRDisplay,
- mode_lib->vba.dpte_group_bytes,
- mode_lib->vba.meta_row_height,
- mode_lib->vba.meta_row_height_chroma,
+ v,
+ v->PrefetchModePerState[i][j],
+ v->DCFCLKState[i][j],
+ v->ReturnBWPerState[i][j],
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.mSOCParameters,
- mode_lib->vba.WritebackChunkSize,
- mode_lib->vba.SOCCLKPerState[i],
- mode_lib->vba.ProjectedDCFCLKDeepSleep[i][j],
- mode_lib->vba.DETBufferSizeYThisState,
- mode_lib->vba.DETBufferSizeCThisState,
- mode_lib->vba.SwathHeightYThisState,
- mode_lib->vba.SwathHeightCThisState,
- mode_lib->vba.LBBitPerPixel,
- mode_lib->vba.SwathWidthYThisState, // 24
- mode_lib->vba.SwathWidthCThisState,
- mode_lib->vba.HRatio,
- mode_lib->vba.HRatioChroma,
- mode_lib->vba.vtaps,
- mode_lib->vba.VTAPsChroma,
- mode_lib->vba.VRatio,
- mode_lib->vba.VRatioChroma,
- mode_lib->vba.HTotal,
- mode_lib->vba.VTotal,
- mode_lib->vba.VActive,
- mode_lib->vba.PixelClock,
- mode_lib->vba.BlendingAndTiming,
- mode_lib->vba.NoOfDPPThisState,
- mode_lib->vba.BytePerPixelInDETY,
- mode_lib->vba.BytePerPixelInDETC,
+ v->SOCCLKPerState[i],
+ v->ProjectedDCFCLKDeepSleep[i][j],
+ v->DETBufferSizeYThisState,
+ v->DETBufferSizeCThisState,
+ v->SwathHeightYThisState,
+ v->SwathHeightCThisState,
+ v->SwathWidthYThisState, // 24
+ v->SwathWidthCThisState,
+ v->NoOfDPPThisState,
+ v->BytePerPixelInDETY,
+ v->BytePerPixelInDETC,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler,
v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTYAfterScaler,
- mode_lib->vba.WritebackEnable,
- mode_lib->vba.WritebackPixelFormat,
- mode_lib->vba.WritebackDestinationWidth,
- mode_lib->vba.WritebackDestinationHeight,
- mode_lib->vba.WritebackSourceHeight,
- mode_lib->vba.UnboundedRequestEnabledThisState,
- mode_lib->vba.CompressedBufferSizeInkByteThisState,
+ v->UnboundedRequestEnabledThisState,
+ v->CompressedBufferSizeInkByteThisState,
/* Output */
- &mode_lib->vba.Watermark, // Store the values in vba
- &mode_lib->vba.DRAMClockChangeSupport[i][j],
+ &v->DRAMClockChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[0], // double *MaxActiveDRAMClockChangeLatencySupported
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_integer[0], // Long SubViewportLinesNeededInMALL[]
- &mode_lib->vba.FCLKChangeSupport[i][j],
+ &v->FCLKChangeSupport[i][j],
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.dummy_single2[1], // double *MinActiveFCLKChangeLatencySupported
&mode_lib->vba.USRRetrainingSupport[i][j],
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 05fc14a47fba..59c2547d01b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -3363,28 +3363,14 @@ double dml32_CalculateExtraLatency(
} // CalculateExtraLatency
bool dml32_CalculatePrefetchSchedule(
+ struct vba_vars_st *v,
+ unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -3425,6 +3411,7 @@ bool dml32_CalculatePrefetchSchedule(
double *VUpdateWidthPix,
double *VReadyOffsetPix)
{
+ double DPPCLKDelaySubtotalPlusCNVCFormater = v->DPPCLKDelaySubtotal + v->DPPCLKDelayCNVCFormater;
bool MyError = false;
unsigned int DPPCycles, DISPCLKCycles;
double DSTTotalPixelsAfterScaler;
@@ -3461,27 +3448,27 @@ bool dml32_CalculatePrefetchSchedule(
double Tsw_est1 = 0;
double Tsw_est3 = 0;
- if (GPUVMEnable == true && HostVMEnable == true)
- HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels;
+ if (v->GPUVMEnable == true && v->HostVMEnable == true)
+ HostVMDynamicLevelsTrips = v->HostVMMaxNonCachedPageTableLevels;
else
HostVMDynamicLevelsTrips = 0;
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: GPUVMEnable = %d\n", __func__, GPUVMEnable);
- dml_print("DML::%s: GPUVMPageTableLevels = %d\n", __func__, GPUVMPageTableLevels);
+ dml_print("DML::%s: v->GPUVMEnable = %d\n", __func__, v->GPUVMEnable);
+ dml_print("DML::%s: v->GPUVMMaxPageTableLevels = %d\n", __func__, v->GPUVMMaxPageTableLevels);
dml_print("DML::%s: DCCEnable = %d\n", __func__, myPipe->DCCEnable);
- dml_print("DML::%s: HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
- __func__, HostVMEnable, HostVMInefficiencyFactor);
+ dml_print("DML::%s: v->HostVMEnable=%d HostVMInefficiencyFactor=%f\n",
+ __func__, v->HostVMEnable, HostVMInefficiencyFactor);
#endif
dml32_CalculateVUpdateAndDynamicMetadataParameters(
- MaxInterDCNTileRepeaters,
+ v->MaxInterDCNTileRepeaters,
myPipe->Dppclk,
myPipe->Dispclk,
myPipe->DCFClkDeepSleep,
myPipe->PixelClock,
myPipe->HTotal,
myPipe->VBlank,
- DynamicMetadataTransmittedBytes,
- DynamicMetadataLinesBeforeActiveRequired,
+ v->DynamicMetadataTransmittedBytes[k],
+ v->DynamicMetadataLinesBeforeActiveRequired[k],
myPipe->InterlaceEnable,
myPipe->ProgressiveToInterlaceUnitInOPP,
TSetup,
@@ -3496,19 +3483,19 @@ bool dml32_CalculatePrefetchSchedule(
LineTime = myPipe->HTotal / myPipe->PixelClock;
trip_to_mem = UrgentLatency;
- Tvm_trips = UrgentExtraLatency + trip_to_mem * (GPUVMPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
+ Tvm_trips = UrgentExtraLatency + trip_to_mem * (v->GPUVMMaxPageTableLevels * (HostVMDynamicLevelsTrips + 1) - 1);
- if (DynamicMetadataVMEnabled == true)
+ if (v->DynamicMetadataVMEnabled == true)
*Tdmdl = TWait + Tvm_trips + trip_to_mem;
else
*Tdmdl = TWait + UrgentExtraLatency;
#ifdef __DML_VBA_ALLOW_DELTA__
- if (DynamicMetadataEnable == false)
+ if (v->DynamicMetadataEnable[k] == false)
*Tdmdl = 0.0;
#endif
- if (DynamicMetadataEnable == true) {
+ if (v->DynamicMetadataEnable[k] == true) {
if (VStartup * LineTime < *TSetup + *Tdmdl + Tdmbf + Tdmec + Tdmsks) {
*NotEnoughTimeForDynamicMetadata = true;
#ifdef __DML_VBA_DEBUG__
@@ -3528,17 +3515,17 @@ bool dml32_CalculatePrefetchSchedule(
*NotEnoughTimeForDynamicMetadata = false;
}
- *Tdmdl_vm = (DynamicMetadataEnable == true && DynamicMetadataVMEnabled == true &&
- GPUVMEnable == true ? TWait + Tvm_trips : 0);
+ *Tdmdl_vm = (v->DynamicMetadataEnable[k] == true && v->DynamicMetadataVMEnabled == true &&
+ v->GPUVMEnable == true ? TWait + Tvm_trips : 0);
if (myPipe->ScalerEnabled)
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCL;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCL;
else
- DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + DPPCLKDelaySCLLBOnly;
+ DPPCycles = DPPCLKDelaySubtotalPlusCNVCFormater + v->DPPCLKDelaySCLLBOnly;
- DPPCycles = DPPCycles + myPipe->NumberOfCursors * DPPCLKDelayCNVCCursor;
+ DPPCycles = DPPCycles + myPipe->NumberOfCursors * v->DPPCLKDelayCNVCCursor;
- DISPCLKCycles = DISPCLKDelaySubtotal;
+ DISPCLKCycles = v->DISPCLKDelaySubtotal;
if (myPipe->Dppclk == 0.0 || myPipe->Dispclk == 0.0)
return true;
@@ -3564,7 +3551,7 @@ bool dml32_CalculatePrefetchSchedule(
dml_print("DML::%s: DSTXAfterScaler: %d\n", __func__, *DSTXAfterScaler);
#endif
- if (OutputFormat == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
+ if (v->OutputFormat[k] == dm_420 || (myPipe->InterlaceEnable && myPipe->ProgressiveToInterlaceUnitInOPP))
*DSTYAfterScaler = 1;
else
*DSTYAfterScaler = 0;
@@ -3581,13 +3568,13 @@ bool dml32_CalculatePrefetchSchedule(
Tr0_trips = trip_to_mem * (HostVMDynamicLevelsTrips + 1);
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
Tvm_trips_rounded = dml_ceil(4.0 * Tvm_trips / LineTime, 1.0) / 4.0 * LineTime;
Tr0_trips_rounded = dml_ceil(4.0 * Tr0_trips / LineTime, 1.0) / 4.0 * LineTime;
- if (GPUVMPageTableLevels >= 3) {
+ if (v->GPUVMMaxPageTableLevels >= 3) {
*Tno_bw = UrgentExtraLatency + trip_to_mem *
- (double) ((GPUVMPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
- } else if (GPUVMPageTableLevels == 1 && myPipe->DCCEnable != true) {
+ (double) ((v->GPUVMMaxPageTableLevels - 2) * (HostVMDynamicLevelsTrips + 1) - 1);
+ } else if (v->GPUVMMaxPageTableLevels == 1 && myPipe->DCCEnable != true) {
Tr0_trips_rounded = dml_ceil(4.0 * UrgentExtraLatency / LineTime, 1.0) /
4.0 * LineTime; // VBA_ERROR
*Tno_bw = UrgentExtraLatency;
@@ -3622,7 +3609,7 @@ bool dml32_CalculatePrefetchSchedule(
min_Lsw = dml_max(min_Lsw, 1.0);
Lsw_oto = dml_ceil(4.0 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1.0) / 4.0;
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
Tvm_oto = dml_max3(
Tvm_trips,
*Tno_bw + PDEAndMetaPTEBytesFrame * HostVMInefficiencyFactor / prefetch_bw_oto,
@@ -3630,7 +3617,7 @@ bool dml32_CalculatePrefetchSchedule(
} else
Tvm_oto = LineTime / 4.0;
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_oto = dml_max4(
Tr0_trips,
(MetaRowByte + PixelPTEBytesPerRow * HostVMInefficiencyFactor) / prefetch_bw_oto,
@@ -3833,7 +3820,7 @@ bool dml32_CalculatePrefetchSchedule(
#endif
if (prefetch_bw_equ > 0) {
- if (GPUVMEnable == true) {
+ if (v->GPUVMEnable == true) {
Tvm_equ = dml_max3(*Tno_bw + PDEAndMetaPTEBytesFrame *
HostVMInefficiencyFactor / prefetch_bw_equ,
Tvm_trips, LineTime / 4);
@@ -3841,7 +3828,7 @@ bool dml32_CalculatePrefetchSchedule(
Tvm_equ = LineTime / 4;
}
- if ((GPUVMEnable == true || myPipe->DCCEnable == true)) {
+ if ((v->GPUVMEnable == true || myPipe->DCCEnable == true)) {
Tr0_equ = dml_max4((MetaRowByte + PixelPTEBytesPerRow *
HostVMInefficiencyFactor) / prefetch_bw_equ, Tr0_trips,
(LineTime - Tvm_equ) / 2, LineTime / 4);
@@ -4206,58 +4193,28 @@ void dml32_CalculateFlipSchedule(
} // CalculateFlipSchedule
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- bool USRRetrainingRequiredFinal,
- enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+ struct vba_vars_st *v,
unsigned int PrefetchMode,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizeTimingsFinal,
- bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- bool DRRDisplay[],
- unsigned int dpte_group_bytes[],
- unsigned int meta_row_height[],
- unsigned int meta_row_height_chroma[],
SOCParametersList mmSOCParameters,
- unsigned int WritebackChunkSize,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int VTaps[],
- unsigned int VTapsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- unsigned int VTotal[],
- unsigned int VActive[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
- Watermarks *Watermark,
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
@@ -4299,136 +4256,136 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
unsigned int LBLatencyHidingSourceLinesY[DC__NUM_DPP__MAX];
unsigned int LBLatencyHidingSourceLinesC[DC__NUM_DPP__MAX];
- Watermark->UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
- Watermark->USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ v->Watermark.UrgentWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency;
+ v->Watermark.USRRetrainingWatermark = mmSOCParameters.UrgentLatency + mmSOCParameters.ExtraLatency
+ mmSOCParameters.USRRetrainingLatency + mmSOCParameters.SMNLatency;
- Watermark->DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + Watermark->UrgentWatermark;
- Watermark->FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + Watermark->UrgentWatermark;
- Watermark->StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ v->Watermark.DRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency + v->Watermark.UrgentWatermark;
+ v->Watermark.FCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency + v->Watermark.UrgentWatermark;
+ v->Watermark.StutterExitWatermark = mmSOCParameters.SRExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ v->Watermark.StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitTime + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ v->Watermark.Z8StutterExitWatermark = mmSOCParameters.SRExitZ8Time + mmSOCParameters.ExtraLatency
+ 10 / DCFClkDeepSleep;
- Watermark->Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ v->Watermark.Z8StutterEnterPlusExitWatermark = mmSOCParameters.SREnterPlusExitZ8Time
+ mmSOCParameters.ExtraLatency + 10 / DCFClkDeepSleep;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: UrgentLatency = %f\n", __func__, mmSOCParameters.UrgentLatency);
dml_print("DML::%s: ExtraLatency = %f\n", __func__, mmSOCParameters.ExtraLatency);
dml_print("DML::%s: DRAMClockChangeLatency = %f\n", __func__, mmSOCParameters.DRAMClockChangeLatency);
- dml_print("DML::%s: UrgentWatermark = %f\n", __func__, Watermark->UrgentWatermark);
- dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, Watermark->USRRetrainingWatermark);
- dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, Watermark->DRAMClockChangeWatermark);
- dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, Watermark->FCLKChangeWatermark);
- dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, Watermark->StutterExitWatermark);
- dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, Watermark->StutterEnterPlusExitWatermark);
- dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, Watermark->Z8StutterExitWatermark);
+ dml_print("DML::%s: UrgentWatermark = %f\n", __func__, v->Watermark.UrgentWatermark);
+ dml_print("DML::%s: USRRetrainingWatermark = %f\n", __func__, v->Watermark.USRRetrainingWatermark);
+ dml_print("DML::%s: DRAMClockChangeWatermark = %f\n", __func__, v->Watermark.DRAMClockChangeWatermark);
+ dml_print("DML::%s: FCLKChangeWatermark = %f\n", __func__, v->Watermark.FCLKChangeWatermark);
+ dml_print("DML::%s: StutterExitWatermark = %f\n", __func__, v->Watermark.StutterExitWatermark);
+ dml_print("DML::%s: StutterEnterPlusExitWatermark = %f\n", __func__, v->Watermark.StutterEnterPlusExitWatermark);
+ dml_print("DML::%s: Z8StutterExitWatermark = %f\n", __func__, v->Watermark.Z8StutterExitWatermark);
dml_print("DML::%s: Z8StutterEnterPlusExitWatermark = %f\n",
- __func__, Watermark->Z8StutterEnterPlusExitWatermark);
+ __func__, v->Watermark.Z8StutterEnterPlusExitWatermark);
#endif
TotalActiveWriteback = 0;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (WritebackEnable[k] == true)
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->WritebackEnable[k] == true)
TotalActiveWriteback = TotalActiveWriteback + 1;
}
if (TotalActiveWriteback <= 1) {
- Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
+ v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency;
} else {
- Watermark->WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
- + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->Watermark.WritebackUrgentWatermark = mmSOCParameters.WritebackLatency
+ + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
}
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackUrgentWatermark = Watermark->WritebackUrgentWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackUrgentWatermark = v->Watermark.WritebackUrgentWatermark
+ mmSOCParameters.USRRetrainingLatency;
if (TotalActiveWriteback <= 1) {
- Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ mmSOCParameters.WritebackLatency;
- Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ mmSOCParameters.WritebackLatency;
} else {
- Watermark->WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
- + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
- Watermark->WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
- + mmSOCParameters.WritebackLatency + WritebackChunkSize * 1024 / 32 / SOCCLK;
+ v->Watermark.WritebackDRAMClockChangeWatermark = mmSOCParameters.DRAMClockChangeLatency
+ + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024.0 / 32.0 / SOCCLK;
+ v->Watermark.WritebackFCLKChangeWatermark = mmSOCParameters.FCLKChangeLatency
+ + mmSOCParameters.WritebackLatency + v->WritebackChunkSize * 1024 / 32 / SOCCLK;
}
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackDRAMClockChangeWatermark = Watermark->WritebackDRAMClockChangeWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackDRAMClockChangeWatermark = v->Watermark.WritebackDRAMClockChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
- if (USRRetrainingRequiredFinal)
- Watermark->WritebackFCLKChangeWatermark = Watermark->WritebackFCLKChangeWatermark
+ if (v->USRRetrainingRequiredFinal)
+ v->Watermark.WritebackFCLKChangeWatermark = v->Watermark.WritebackFCLKChangeWatermark
+ mmSOCParameters.USRRetrainingLatency;
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: WritebackDRAMClockChangeWatermark = %f\n",
- __func__, Watermark->WritebackDRAMClockChangeWatermark);
- dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, Watermark->WritebackFCLKChangeWatermark);
- dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, Watermark->WritebackUrgentWatermark);
- dml_print("DML::%s: USRRetrainingRequiredFinal = %d\n", __func__, USRRetrainingRequiredFinal);
+ __func__, v->Watermark.WritebackDRAMClockChangeWatermark);
+ dml_print("DML::%s: WritebackFCLKChangeWatermark = %f\n", __func__, v->Watermark.WritebackFCLKChangeWatermark);
+ dml_print("DML::%s: WritebackUrgentWatermark = %f\n", __func__, v->Watermark.WritebackUrgentWatermark);
+ dml_print("DML::%s: v->USRRetrainingRequiredFinal = %d\n", __func__, v->USRRetrainingRequiredFinal);
dml_print("DML::%s: USRRetrainingLatency = %f\n", __func__, mmSOCParameters.USRRetrainingLatency);
#endif
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k] +
- SwathWidthC[k] * BytePerPixelDETC[k] * VRatioChroma[k]) / (HTotal[k] / PixelClock[k]);
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ TotalPixelBW = TotalPixelBW + DPPPerSurface[k] * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k] +
+ SwathWidthC[k] * BytePerPixelDETC[k] * v->VRatioChroma[k]) / (v->HTotal[k] / v->PixelClock[k]);
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
- LBLatencyHidingSourceLinesY[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(HRatio[k], 1.0)), 1)) - (VTaps[k] - 1);
- LBLatencyHidingSourceLinesC[k] = dml_min((double) MaxLineBufferLines, dml_floor(LineBufferSize / LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(HRatioChroma[k], 1.0)), 1)) - (VTapsChroma[k] - 1);
+ LBLatencyHidingSourceLinesY[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthY[k] / dml_max(v->HRatio[k], 1.0)), 1)) - (v->vtaps[k] - 1);
+ LBLatencyHidingSourceLinesC[k] = dml_min((double) v->MaxLineBufferLines, dml_floor(v->LineBufferSizeFinal / v->LBBitPerPixel[k] / (SwathWidthC[k] / dml_max(v->HRatioChroma[k], 1.0)), 1)) - (v->VTAPsChroma[k] - 1);
#ifdef __DML_VBA_DEBUG__
- dml_print("DML::%s: k=%d, MaxLineBufferLines = %d\n", __func__, k, MaxLineBufferLines);
- dml_print("DML::%s: k=%d, LineBufferSize = %d\n", __func__, k, LineBufferSize);
- dml_print("DML::%s: k=%d, LBBitPerPixel = %d\n", __func__, k, LBBitPerPixel[k]);
- dml_print("DML::%s: k=%d, HRatio = %f\n", __func__, k, HRatio[k]);
- dml_print("DML::%s: k=%d, VTaps = %d\n", __func__, k, VTaps[k]);
+ dml_print("DML::%s: k=%d, v->MaxLineBufferLines = %d\n", __func__, k, v->MaxLineBufferLines);
+ dml_print("DML::%s: k=%d, v->LineBufferSizeFinal = %d\n", __func__, k, v->LineBufferSizeFinal);
+ dml_print("DML::%s: k=%d, v->LBBitPerPixel = %d\n", __func__, k, v->LBBitPerPixel[k]);
+ dml_print("DML::%s: k=%d, v->HRatio = %f\n", __func__, k, v->HRatio[k]);
+ dml_print("DML::%s: k=%d, v->vtaps = %d\n", __func__, k, v->vtaps[k]);
#endif
- EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / VRatio[k] * (HTotal[k] / PixelClock[k]);
- EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / VRatioChroma[k] * (HTotal[k] / PixelClock[k]);
+ EffectiveLBLatencyHidingY = LBLatencyHidingSourceLinesY[k] / v->VRatio[k] * (v->HTotal[k] / v->PixelClock[k]);
+ EffectiveLBLatencyHidingC = LBLatencyHidingSourceLinesC[k] / v->VRatioChroma[k] * (v->HTotal[k] / v->PixelClock[k]);
EffectiveDETBufferSizeY = DETBufferSizeY[k];
if (UnboundedRequestEnabled) {
EffectiveDETBufferSizeY = EffectiveDETBufferSizeY
+ CompressedBufferSizeInkByte * 1024
- * (SwathWidthY[k] * BytePerPixelDETY[k] * VRatio[k])
- / (HTotal[k] / PixelClock[k]) / TotalPixelBW;
+ * (SwathWidthY[k] * BytePerPixelDETY[k] * v->VRatio[k])
+ / (v->HTotal[k] / v->PixelClock[k]) / TotalPixelBW;
}
LinesInDETY[k] = (double) EffectiveDETBufferSizeY / BytePerPixelDETY[k] / SwathWidthY[k];
LinesInDETYRoundedDownToSwath[k] = dml_floor(LinesInDETY[k], SwathHeightY[k]);
- FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k]) / VRatio[k];
+ FullDETBufferingTimeY = LinesInDETYRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k]) / v->VRatio[k];
ActiveClockChangeLatencyHidingY = EffectiveLBLatencyHidingY + FullDETBufferingTimeY
- - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k] / PixelClock[k];
+ - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k] / v->PixelClock[k];
- if (NumberOfActiveSurfaces > 1) {
+ if (v->NumberOfActiveSurfaces > 1) {
ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY
- - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightY[k] * HTotal[k]
- / PixelClock[k] / VRatio[k];
+ - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k]
+ / v->PixelClock[k] / v->VRatio[k];
}
if (BytePerPixelDETC[k] > 0) {
LinesInDETC[k] = DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k];
LinesInDETCRoundedDownToSwath[k] = dml_floor(LinesInDETC[k], SwathHeightC[k]);
- FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (HTotal[k] / PixelClock[k])
- / VRatioChroma[k];
+ FullDETBufferingTimeC = LinesInDETCRoundedDownToSwath[k] * (v->HTotal[k] / v->PixelClock[k])
+ / v->VRatioChroma[k];
ActiveClockChangeLatencyHidingC = EffectiveLBLatencyHidingC + FullDETBufferingTimeC
- - (DSTXAfterScaler[k] / HTotal[k] + DSTYAfterScaler[k]) * HTotal[k]
- / PixelClock[k];
- if (NumberOfActiveSurfaces > 1) {
+ - (DSTXAfterScaler[k] / v->HTotal[k] + DSTYAfterScaler[k]) * v->HTotal[k]
+ / v->PixelClock[k];
+ if (v->NumberOfActiveSurfaces > 1) {
ActiveClockChangeLatencyHidingC = ActiveClockChangeLatencyHidingC
- - (1 - 1 / NumberOfActiveSurfaces) * SwathHeightC[k] * HTotal[k]
- / PixelClock[k] / VRatioChroma[k];
+ - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightC[k] * v->HTotal[k]
+ / v->PixelClock[k] / v->VRatioChroma[k];
}
ActiveClockChangeLatencyHiding = dml_min(ActiveClockChangeLatencyHidingY,
ActiveClockChangeLatencyHidingC);
@@ -4436,24 +4393,24 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
ActiveClockChangeLatencyHiding = ActiveClockChangeLatencyHidingY;
}
- ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- - Watermark->DRAMClockChangeWatermark;
- ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->UrgentWatermark
- - Watermark->FCLKChangeWatermark;
- USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - Watermark->USRRetrainingWatermark;
-
- if (WritebackEnable[k]) {
- WritebackLatencyHiding = WritebackInterfaceBufferSize * 1024
- / (WritebackDestinationWidth[k] * WritebackDestinationHeight[k]
- / (WritebackSourceHeight[k] * HTotal[k] / PixelClock[k]) * 4);
- if (WritebackPixelFormat[k] == dm_444_64)
+ ActiveDRAMClockChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+ - v->Watermark.DRAMClockChangeWatermark;
+ ActiveFCLKChangeLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.UrgentWatermark
+ - v->Watermark.FCLKChangeWatermark;
+ USRRetrainingLatencyMargin[k] = ActiveClockChangeLatencyHiding - v->Watermark.USRRetrainingWatermark;
+
+ if (v->WritebackEnable[k]) {
+ WritebackLatencyHiding = v->WritebackInterfaceBufferSize * 1024
+ / (v->WritebackDestinationWidth[k] * v->WritebackDestinationHeight[k]
+ / (v->WritebackSourceHeight[k] * v->HTotal[k] / v->PixelClock[k]) * 4);
+ if (v->WritebackPixelFormat[k] == dm_444_64)
WritebackLatencyHiding = WritebackLatencyHiding / 2;
WritebackDRAMClockChangeLatencyMargin = WritebackLatencyHiding
- - Watermark->WritebackDRAMClockChangeWatermark;
+ - v->Watermark.WritebackDRAMClockChangeWatermark;
WritebackFCLKChangeLatencyMargin = WritebackLatencyHiding
- - Watermark->WritebackFCLKChangeWatermark;
+ - v->Watermark.WritebackFCLKChangeWatermark;
ActiveDRAMClockChangeLatencyMargin[k] = dml_min(ActiveDRAMClockChangeLatencyMargin[k],
WritebackFCLKChangeLatencyMargin);
@@ -4461,22 +4418,22 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
WritebackDRAMClockChangeLatencyMargin);
}
MaxActiveDRAMClockChangeLatencySupported[k] =
- (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
+ (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_phantom_pipe) ?
0 :
(ActiveDRAMClockChangeLatencyMargin[k]
+ mmSOCParameters.DRAMClockChangeLatency);
}
- for (i = 0; i < NumberOfActiveSurfaces; ++i) {
- for (j = 0; j < NumberOfActiveSurfaces; ++j) {
+ for (i = 0; i < v->NumberOfActiveSurfaces; ++i) {
+ for (j = 0; j < v->NumberOfActiveSurfaces; ++j) {
if (i == j ||
- (BlendingAndTiming[i] == i && BlendingAndTiming[j] == i) ||
- (BlendingAndTiming[j] == j && BlendingAndTiming[i] == j) ||
- (BlendingAndTiming[i] == BlendingAndTiming[j] && BlendingAndTiming[i] != i) ||
- (SynchronizeTimingsFinal && PixelClock[i] == PixelClock[j] &&
- HTotal[i] == HTotal[j] && VTotal[i] == VTotal[j] &&
- VActive[i] == VActive[j]) || (SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
- (DRRDisplay[i] || DRRDisplay[j]))) {
+ (v->BlendingAndTiming[i] == i && v->BlendingAndTiming[j] == i) ||
+ (v->BlendingAndTiming[j] == j && v->BlendingAndTiming[i] == j) ||
+ (v->BlendingAndTiming[i] == v->BlendingAndTiming[j] && v->BlendingAndTiming[i] != i) ||
+ (v->SynchronizeTimingsFinal && v->PixelClock[i] == v->PixelClock[j] &&
+ v->HTotal[i] == v->HTotal[j] && v->VTotal[i] == v->VTotal[j] &&
+ v->VActive[i] == v->VActive[j]) || (v->SynchronizeDRRDisplaysForUCLKPStateChangeFinal &&
+ (v->DRRDisplay[i] || v->DRRDisplay[j]))) {
SynchronizedSurfaces[i][j] = true;
} else {
SynchronizedSurfaces[i][j] = false;
@@ -4484,8 +4441,8 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(!FoundFirstSurfaceWithMinActiveFCLKChangeMargin ||
ActiveFCLKChangeLatencyMargin[k] < MinActiveFCLKChangeMargin)) {
FoundFirstSurfaceWithMinActiveFCLKChangeMargin = true;
@@ -4497,9 +4454,9 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
*MinActiveFCLKChangeLatencySupported = MinActiveFCLKChangeMargin + mmSOCParameters.FCLKChangeLatency;
SameTimingForFCLKChange = true;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
if (!SynchronizedSurfaces[k][SurfaceWithMinActiveFCLKChangeMargin]) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(SameTimingForFCLKChange ||
ActiveFCLKChangeLatencyMargin[k] <
SecondMinActiveFCLKChangeMarginOneDisplayInVBLank)) {
@@ -4519,17 +4476,17 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
*USRRetrainingSupport = true;
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if ((UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if ((v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe) &&
(USRRetrainingLatencyMargin[k] < 0)) {
*USRRetrainingSupport = false;
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
- UseMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
- UseMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_full_frame &&
+ v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_sub_viewport &&
+ v->UsesMALLForPStateChange[k] != dm_use_mall_pstate_change_phantom_pipe &&
ActiveDRAMClockChangeLatencyMargin[k] < 0) {
if (PrefetchMode > 0) {
DRAMClockChangeSupportNumber = 2;
@@ -4543,10 +4500,10 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
}
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
- if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
+ if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_full_frame)
DRAMClockChangeMethod = 1;
- else if (UseMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
+ else if (v->UsesMALLForPStateChange[k] == dm_use_mall_pstate_change_sub_viewport)
DRAMClockChangeMethod = 2;
}
@@ -4573,16 +4530,16 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
*DRAMClockChangeSupport = dm_dram_clock_change_unsupported;
}
- for (k = 0; k < NumberOfActiveSurfaces; ++k) {
+ for (k = 0; k < v->NumberOfActiveSurfaces; ++k) {
unsigned int dst_y_pstate;
unsigned int src_y_pstate_l;
unsigned int src_y_pstate_c;
unsigned int src_y_ahead_l, src_y_ahead_c, sub_vp_lines_l, sub_vp_lines_c;
- dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (HTotal[k] / PixelClock[k]), 1);
- src_y_pstate_l = dml_ceil(dst_y_pstate * VRatio[k], SwathHeightY[k]);
+ dst_y_pstate = dml_ceil((mmSOCParameters.DRAMClockChangeLatency + mmSOCParameters.UrgentLatency) / (v->HTotal[k] / v->PixelClock[k]), 1);
+ src_y_pstate_l = dml_ceil(dst_y_pstate * v->VRatio[k], SwathHeightY[k]);
src_y_ahead_l = dml_floor(DETBufferSizeY[k] / BytePerPixelDETY[k] / SwathWidthY[k], SwathHeightY[k]) + LBLatencyHidingSourceLinesY[k];
- sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + meta_row_height[k];
+ sub_vp_lines_l = src_y_pstate_l + src_y_ahead_l + v->meta_row_height[k];
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, DETBufferSizeY = %d\n", __func__, k, DETBufferSizeY[k]);
@@ -4593,21 +4550,21 @@ dml_print("DML::%s: k=%d, LBLatencyHidingSourceLinesY = %d\n", __func__, k, LBL
dml_print("DML::%s: k=%d, dst_y_pstate = %d\n", __func__, k, dst_y_pstate);
dml_print("DML::%s: k=%d, src_y_pstate_l = %d\n", __func__, k, src_y_pstate_l);
dml_print("DML::%s: k=%d, src_y_ahead_l = %d\n", __func__, k, src_y_ahead_l);
-dml_print("DML::%s: k=%d, meta_row_height = %d\n", __func__, k, meta_row_height[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height = %d\n", __func__, k, v->meta_row_height[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_l = %d\n", __func__, k, sub_vp_lines_l);
#endif
SubViewportLinesNeededInMALL[k] = sub_vp_lines_l;
if (BytePerPixelDETC[k] > 0) {
- src_y_pstate_c = dml_ceil(dst_y_pstate * VRatioChroma[k], SwathHeightC[k]);
+ src_y_pstate_c = dml_ceil(dst_y_pstate * v->VRatioChroma[k], SwathHeightC[k]);
src_y_ahead_c = dml_floor(DETBufferSizeC[k] / BytePerPixelDETC[k] / SwathWidthC[k], SwathHeightC[k]) + LBLatencyHidingSourceLinesC[k];
- sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + meta_row_height_chroma[k];
+ sub_vp_lines_c = src_y_pstate_c + src_y_ahead_c + v->meta_row_height_chroma[k];
SubViewportLinesNeededInMALL[k] = dml_max(sub_vp_lines_l, sub_vp_lines_c);
#ifdef __DML_VBA_DEBUG__
dml_print("DML::%s: k=%d, src_y_pstate_c = %d\n", __func__, k, src_y_pstate_c);
dml_print("DML::%s: k=%d, src_y_ahead_c = %d\n", __func__, k, src_y_ahead_c);
-dml_print("DML::%s: k=%d, meta_row_height_chroma = %d\n", __func__, k, meta_row_height_chroma[k]);
+dml_print("DML::%s: k=%d, v->meta_row_height_chroma = %d\n", __func__, k, v->meta_row_height_chroma[k]);
dml_print("DML::%s: k=%d, sub_vp_lines_c = %d\n", __func__, k, sub_vp_lines_c);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index d293856ba906..924e361ad243 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -30,6 +30,7 @@
#include "os_types.h"
#include "../dc_features.h"
#include "../display_mode_structs.h"
+#include "dml/display_mode_vba.h"
unsigned int dml32_dscceComputeDelay(
unsigned int bpc,
@@ -712,28 +713,14 @@ double dml32_CalculateExtraLatency(
unsigned int HostVMMaxNonCachedPageTableLevels);
bool dml32_CalculatePrefetchSchedule(
+ struct vba_vars_st *v,
+ unsigned int k,
double HostVMInefficiencyFactor,
DmlPipe *myPipe,
unsigned int DSCDelay,
- double DPPCLKDelaySubtotalPlusCNVCFormater,
- double DPPCLKDelaySCL,
- double DPPCLKDelaySCLLBOnly,
- double DPPCLKDelayCNVCCursor,
- double DISPCLKDelaySubtotal,
unsigned int DPP_RECOUT_WIDTH,
- enum output_format_class OutputFormat,
- unsigned int MaxInterDCNTileRepeaters,
unsigned int VStartup,
unsigned int MaxVStartup,
- unsigned int GPUVMPageTableLevels,
- bool GPUVMEnable,
- bool HostVMEnable,
- unsigned int HostVMMaxNonCachedPageTableLevels,
- double HostVMMinPageSize,
- bool DynamicMetadataEnable,
- bool DynamicMetadataVMEnabled,
- int DynamicMetadataLinesBeforeActiveRequired,
- unsigned int DynamicMetadataTransmittedBytes,
double UrgentLatency,
double UrgentExtraLatency,
double TCalc,
@@ -807,58 +794,28 @@ void dml32_CalculateFlipSchedule(
bool *ImmediateFlipSupportedForPipe);
void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport(
- bool USRRetrainingRequiredFinal,
- enum dm_use_mall_for_pstate_change_mode UseMALLForPStateChange[],
+ struct vba_vars_st *v,
unsigned int PrefetchMode,
- unsigned int NumberOfActiveSurfaces,
- unsigned int MaxLineBufferLines,
- unsigned int LineBufferSize,
- unsigned int WritebackInterfaceBufferSize,
double DCFCLK,
double ReturnBW,
- bool SynchronizeTimingsFinal,
- bool SynchronizeDRRDisplaysForUCLKPStateChangeFinal,
- bool DRRDisplay[],
- unsigned int dpte_group_bytes[],
- unsigned int meta_row_height[],
- unsigned int meta_row_height_chroma[],
SOCParametersList mmSOCParameters,
- unsigned int WritebackChunkSize,
double SOCCLK,
double DCFClkDeepSleep,
unsigned int DETBufferSizeY[],
unsigned int DETBufferSizeC[],
unsigned int SwathHeightY[],
unsigned int SwathHeightC[],
- unsigned int LBBitPerPixel[],
double SwathWidthY[],
double SwathWidthC[],
- double HRatio[],
- double HRatioChroma[],
- unsigned int VTaps[],
- unsigned int VTapsChroma[],
- double VRatio[],
- double VRatioChroma[],
- unsigned int HTotal[],
- unsigned int VTotal[],
- unsigned int VActive[],
- double PixelClock[],
- unsigned int BlendingAndTiming[],
unsigned int DPPPerSurface[],
double BytePerPixelDETY[],
double BytePerPixelDETC[],
double DSTXAfterScaler[],
double DSTYAfterScaler[],
- bool WritebackEnable[],
- enum source_format_class WritebackPixelFormat[],
- double WritebackDestinationWidth[],
- double WritebackDestinationHeight[],
- double WritebackSourceHeight[],
bool UnboundedRequestEnabled,
unsigned int CompressedBufferSizeInkByte,
/* Output */
- Watermarks *Watermark,
enum clock_change_support *DRAMClockChangeSupport,
double MaxActiveDRAMClockChangeLatencySupported[],
unsigned int SubViewportLinesNeededInMALL[],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 5d27ff0ebb5f..f5400eda07a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -35,6 +35,8 @@
#include "dcn30/display_rq_dlg_calc_30.h"
#include "dcn31/display_mode_vba_31.h"
#include "dcn31/display_rq_dlg_calc_31.h"
+#include "dcn314/display_mode_vba_314.h"
+#include "dcn314/display_rq_dlg_calc_314.h"
#include "dcn32/display_mode_vba_32.h"
#include "dcn32/display_rq_dlg_calc_32.h"
#include "dml_logger.h"
@@ -74,6 +76,13 @@ const struct dml_funcs dml31_funcs = {
.rq_dlg_get_rq_reg = dml31_rq_dlg_get_rq_reg
};
+const struct dml_funcs dml314_funcs = {
+ .validate = dml314_ModeSupportAndSystemConfigurationFull,
+ .recalculate = dml314_recalculate,
+ .rq_dlg_get_dlg_reg = dml314_rq_dlg_get_dlg_reg,
+ .rq_dlg_get_rq_reg = dml314_rq_dlg_get_rq_reg
+};
+
const struct dml_funcs dml32_funcs = {
.validate = dml32_ModeSupportAndSystemConfigurationFull,
.recalculate = dml32_recalculate,
@@ -107,6 +116,9 @@ void dml_init_instance(struct display_mode_lib *lib,
case DML_PROJECT_DCN31_FPGA:
lib->funcs = dml31_funcs;
break;
+ case DML_PROJECT_DCN314:
+ lib->funcs = dml314_funcs;
+ break;
case DML_PROJECT_DCN32:
lib->funcs = dml32_funcs;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index 2bdd6ed22611..b1878a1440e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -41,6 +41,7 @@ enum dml_project {
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
DML_PROJECT_DCN31_FPGA,
+ DML_PROJECT_DCN314,
DML_PROJECT_DCN32,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 492aec634b68..2051ddaa641a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -651,10 +651,10 @@ struct vba_vars_st {
unsigned int OutputTypeAndRatePerState[DC__VOLTAGE_STATES][DC__NUM_DPP__MAX];
double RequiredDISPCLKPerSurface[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
- unsigned int MicroTileHeightY[DC__NUM_DPP__MAX];
- unsigned int MicroTileHeightC[DC__NUM_DPP__MAX];
- unsigned int MicroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MicroTileWidthC[DC__NUM_DPP__MAX];
+ unsigned int MacroTileHeightY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileHeightC[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
+ unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
bool ImmediateFlipRequiredFinal;
bool DCCProgrammingAssumesScanDirectionUnknownFinal;
bool EnoughWritebackUnits;
@@ -800,8 +800,6 @@ struct vba_vars_st {
double PSCL_FACTOR[DC__NUM_DPP__MAX];
double PSCL_FACTOR_CHROMA[DC__NUM_DPP__MAX];
double MaximumVStartup[DC__VOLTAGE_STATES][2][DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthY[DC__NUM_DPP__MAX];
- unsigned int MacroTileWidthC[DC__NUM_DPP__MAX];
double AlignedDCCMetaPitch[DC__NUM_DPP__MAX];
double AlignedYPitch[DC__NUM_DPP__MAX];
double AlignedCPitch[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 5d2b028e5dad..d9f1b0a4fbd4 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -214,6 +214,7 @@ struct dummy_pstate_entry {
struct clk_bw_params {
unsigned int vram_type;
unsigned int num_channels;
+ unsigned int dram_channel_width_bytes;
unsigned int dispclk_vco_khz;
unsigned int dc_mode_softmax_memclk;
struct clk_limit_table clk_table;
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
index db7b0b155374..226af06278ce 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c
@@ -116,7 +116,7 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
dto_params.timing = &pipe_ctx->stream->timing;
dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr);
- dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, link_enc->inst);
+ dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst, stream_enc->inst);
dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
stream_enc->funcs->enable_stream(stream_enc);
@@ -137,7 +137,7 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx)
stream_enc->funcs->disable(stream_enc);
dccg->funcs->set_dtbclk_dto(dccg, &dto_params);
dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst);
- dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, pipe_ctx->link_res.hpo_dp_link_enc->inst);
+ dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, stream_enc->inst);
}
static void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 859ffd8725c5..04f7656906ca 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1600,6 +1600,7 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
struct fixed31_32 lut2;
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
+ const struct fixed31_32 one = dc_fixpt_from_int(1);
i = 0;
/* fixed_pt library has problems handling too small values */
@@ -1628,6 +1629,9 @@ static void interpolate_user_regamma(uint32_t hw_points_num,
} else
hw_x = coordinates_x[i].x;
+ if (dc_fixpt_le(one, hw_x))
+ hw_x = one;
+
norm_x = dc_fixpt_mul(norm_factor, hw_x);
index = dc_fixpt_floor(norm_x);
if (index < 0 || index > 255)
diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
index 80dab1146439..50bfa513cb35 100644
--- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h
+++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h
@@ -268,7 +268,8 @@ union MESAPI__ADD_QUEUE {
uint32_t is_tmz_queue : 1;
uint32_t map_kiq_utility_queue : 1;
uint32_t is_kfd_process : 1;
- uint32_t reserved : 22;
+ uint32_t trap_en : 1;
+ uint32_t reserved : 21;
};
struct MES_API_STATUS api_status;
uint64_t tma_addr;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index f745cd8f1ab7..063f4a737605 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -25,7 +25,7 @@
#define SMU13_DRIVER_IF_V13_0_0_H
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x22
+#define PPTABLE_VERSION 0x24
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index ac308e72241a..f442bf085a31 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -30,7 +30,7 @@
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x2E
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -291,5 +291,11 @@ int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
int smu_v13_0_mode1_reset(struct smu_context *smu);
+
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 6db67f082d91..644ea150e075 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -368,6 +368,17 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
smu_baco->platform_support =
(val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
false;
+
+ /*
+ * Disable BACO entry/exit completely on below SKUs to
+ * avoid hardware intermittent failures.
+ */
+ if (((adev->pdev->device == 0x73A1) &&
+ (adev->pdev->revision == 0x00)) ||
+ ((adev->pdev->device == 0x73BF) &&
+ (adev->pdev->revision == 0xCF)))
+ smu_baco->platform_support = false;
+
}
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index 18ee3b5e64c5..24488f4cb78c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -84,9 +84,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static const int link_speed[] = {25, 50, 80, 160};
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id);
-
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -224,23 +221,19 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
/*
* Temporary solution for SMU V13.0.0 with SCPM enabled:
- * - use 36831 signed pptable when pp_table_id is 3683
- * - use 37151 signed pptable when pp_table_id is 3715
- * - use 36641 signed pptable when pp_table_id is 3664 or 0
- * TODO: drop these when the pptable carried in vbios is ready.
+ * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
+ * - use 36831 soft pptable when pptable_id is 3683
*/
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
switch (pptable_id) {
- case 0:
case 3664:
- pptable_id = 36641;
+ case 3715:
+ case 3795:
+ pptable_id = 0;
break;
case 3683:
pptable_id = 36831;
break;
- case 3715:
- pptable_id = 37151;
- break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
return -EINVAL;
@@ -425,8 +418,10 @@ static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **tabl
return 0;
}
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id)
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id)
{
const struct smc_firmware_header_v1_0 *hdr;
struct amdgpu_device *adev = smu->adev;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index df4a47acd724..7db2fd9ea74a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -388,11 +388,29 @@ static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
{
struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_0_powerplay_table);
+
+ return 0;
+}
+
+static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
+ uint32_t pptable_id;
int ret = 0;
/*
@@ -401,17 +419,51 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
* rely on the combo pptable(and its revelant SMU message).
*/
if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_0_powerplay_table);
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
} else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu_table->boot_values.pp_table_id;
+ }
+
+ /*
+ * Temporary solution for SMU V13.0.0 with SCPM disabled:
+ * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
+ * - use soft pptable when pptable_id is 3683
+ */
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
+ switch (pptable_id) {
+ case 3664:
+ case 3715:
+ case 3795:
+ pptable_id = 0;
+ break;
+ case 3683:
+ break;
+ default:
+ dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
+ return -EINVAL;
+ }
+ }
+
+ /* force using vbios pptable in sriov mode */
+ if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ else
+ ret = smu_v13_0_get_pptable_from_firmware(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size,
+ pptable_id);
}
+ if (ret)
+ return ret;
ret = smu_v13_0_0_store_powerplay_table(smu);
if (ret)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 1016d1c216d8..c422bf8a09b1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -120,6 +120,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
+ MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -400,11 +401,27 @@ static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
return 0;
}
+static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_7_powerplay_table);
+
+ return 0;
+}
static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- void *combo_pptable = smu_table->combo_pptable;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
@@ -413,18 +430,11 @@ static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
* be used directly by driver. To get the raw pptable, we need to
* rely on the combo pptable(and its revelant SMU message).
*/
- if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_7_powerplay_table);
- } else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
- }
+ ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
ret = smu_v13_0_7_store_powerplay_table(smu);
if (ret)
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 493922069c90..01ee3febb813 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -377,8 +377,8 @@ static int vrr_range_show(struct seq_file *m, void *data)
if (connector->status != connector_status_connected)
return -ENODEV;
- seq_printf(m, "Min: %u\n", (u8)connector->display_info.monitor_range.min_vfreq);
- seq_printf(m, "Max: %u\n", (u8)connector->display_info.monitor_range.max_vfreq);
+ seq_printf(m, "Min: %u\n", connector->display_info.monitor_range.min_vfreq);
+ seq_printf(m, "Max: %u\n", connector->display_info.monitor_range.max_vfreq);
return 0;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index bbc25e3b7220..eaa819381281 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -5971,12 +5971,14 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
static
-void get_monitor_range(const struct detailed_timing *timing,
- void *info_monitor_range)
+void get_monitor_range(const struct detailed_timing *timing, void *c)
{
- struct drm_monitor_range_info *monitor_range = info_monitor_range;
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_info *info = &closure->connector->display_info;
+ struct drm_monitor_range_info *monitor_range = &info->monitor_range;
const struct detailed_non_pixel *data = &timing->data.other_data;
const struct detailed_data_monitor_range *range = &data->data.range;
+ const struct edid *edid = closure->drm_edid->edid;
if (!is_display_descriptor(timing, EDID_DETAIL_MONITOR_RANGE))
return;
@@ -5992,18 +5994,28 @@ void get_monitor_range(const struct detailed_timing *timing,
monitor_range->min_vfreq = range->min_vfreq;
monitor_range->max_vfreq = range->max_vfreq;
+
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ monitor_range->min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ monitor_range->max_vfreq += 255;
+ }
}
static void drm_get_monitor_range(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
- struct drm_display_info *info = &connector->display_info;
+ const struct drm_display_info *info = &connector->display_info;
+ struct detailed_mode_closure closure = {
+ .connector = connector,
+ .drm_edid = drm_edid,
+ };
if (!version_greater(drm_edid, 1, 1))
return;
- drm_for_each_detailed_block(drm_edid, get_monitor_range,
- &info->monitor_range);
+ drm_for_each_detailed_block(drm_edid, get_monitor_range, &closure);
DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
info->monitor_range.min_vfreq,
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index dd32b484dd82..ce96234f3df2 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -581,11 +581,9 @@ static const struct psb_offset cdv_regmap[2] = {
static int cdv_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
- if (pci_enable_msi(pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->use_msi = true;
dev_priv->regmap = cdv_regmap;
gma_get_core_freq(dev);
psb_intel_opregion_init(dev);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index dffe37490206..4b7627a72637 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -112,12 +112,12 @@ static void psb_gem_free_object(struct drm_gem_object *obj)
{
struct psb_gem_object *pobj = to_psb_gem_object(obj);
- drm_gem_object_release(obj);
-
/* Undo the mmap pin if we are destroying the object */
if (pobj->mmapping)
psb_gem_unpin(pobj);
+ drm_gem_object_release(obj);
+
WARN_ON(pobj->in_gart && !pobj->stolen);
release_resource(&pobj->resource);
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index bd40c040a2c9..2f52eceda3a1 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -532,15 +532,18 @@ int gma_crtc_page_flip(struct drm_crtc *crtc,
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
gma_crtc->page_flip_event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
/* Call this locked if we want an event at vblank interrupt. */
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
if (ret) {
- gma_crtc->page_flip_event = NULL;
- drm_crtc_vblank_put(crtc);
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (gma_crtc->page_flip_event) {
+ gma_crtc->page_flip_event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
}
-
- spin_unlock_irqrestore(&dev->event_lock, flags);
} else {
ret = crtc_funcs->mode_set_base(crtc, crtc->x, crtc->y, old_fb);
}
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 5923a9c89312..f90e628cb482 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -501,12 +501,9 @@ static const struct psb_offset oaktrail_regmap[2] = {
static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
- if (pci_enable_msi(pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
-
+ dev_priv->use_msi = true;
dev_priv->regmap = oaktrail_regmap;
ret = mid_chip_setup(dev);
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index b91de6d36e41..66873085d450 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -139,8 +139,6 @@ static void gma_suspend_pci(struct pci_dev *pdev)
dev_priv->regs.saveBSM = bsm;
pci_read_config_dword(pdev, 0xFC, &vbt);
dev_priv->regs.saveVBT = vbt;
- pci_read_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, &dev_priv->msi_addr);
- pci_read_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, &dev_priv->msi_data);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
@@ -168,9 +166,6 @@ static bool gma_resume_pci(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_write_config_dword(pdev, 0x5c, dev_priv->regs.saveBSM);
pci_write_config_dword(pdev, 0xFC, dev_priv->regs.saveVBT);
- /* restoring MSI address and data in PCIx space */
- pci_write_config_dword(pdev, PSB_PCIx_MSI_ADDR_LOC, dev_priv->msi_addr);
- pci_write_config_dword(pdev, PSB_PCIx_MSI_DATA_LOC, dev_priv->msi_data);
ret = pci_enable_device(pdev);
if (ret != 0)
@@ -223,8 +218,7 @@ int gma_power_resume(struct device *_dev)
mutex_lock(&power_mutex);
gma_resume_pci(pdev);
gma_resume_display(pdev);
- gma_irq_preinstall(dev);
- gma_irq_postinstall(dev);
+ gma_irq_install(dev);
mutex_unlock(&power_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 1d8744f3e702..54e756b48606 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -383,7 +383,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- gma_irq_install(dev, pdev->irq);
+ gma_irq_install(dev);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 0ea3d23575f3..731cc356c07a 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -490,6 +490,7 @@ struct drm_psb_private {
int rpm_enabled;
/* MID specific */
+ bool use_msi;
bool has_gct;
struct oaktrail_gct_data gct_data;
@@ -499,10 +500,6 @@ struct drm_psb_private {
/* Register state */
struct psb_save_area regs;
- /* MSI reg save */
- uint32_t msi_addr;
- uint32_t msi_data;
-
/* Hotplug handling */
struct work_struct hotplug_work;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index e6e6d61bbeab..038f18ed0a95 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -316,17 +316,24 @@ void gma_irq_postinstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
-int gma_irq_install(struct drm_device *dev, unsigned int irq)
+int gma_irq_install(struct drm_device *dev)
{
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
- if (irq == IRQ_NOTCONNECTED)
+ if (dev_priv->use_msi && pci_enable_msi(pdev)) {
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ dev_priv->use_msi = false;
+ }
+
+ if (pdev->irq == IRQ_NOTCONNECTED)
return -ENOTCONN;
gma_irq_preinstall(dev);
/* PCI devices require shared interrupts. */
- ret = request_irq(irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
+ ret = request_irq(pdev->irq, gma_irq_handler, IRQF_SHARED, dev->driver->name, dev);
if (ret)
return ret;
@@ -369,6 +376,8 @@ void gma_irq_uninstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
free_irq(pdev->irq, dev);
+ if (dev_priv->use_msi)
+ pci_disable_msi(pdev);
}
int gma_crtc_enable_vblank(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index b51e395194ff..7648f69824a5 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -17,7 +17,7 @@ struct drm_device;
void gma_irq_preinstall(struct drm_device *dev);
void gma_irq_postinstall(struct drm_device *dev);
-int gma_irq_install(struct drm_device *dev, unsigned int irq);
+int gma_irq_install(struct drm_device *dev);
void gma_irq_uninstall(struct drm_device *dev);
int gma_crtc_enable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 6d11e7938c83..f84d39762a72 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -23,9 +23,6 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
DEFINE_DRM_GEM_FOPS(hv_fops);
static struct drm_driver hyperv_driver = {
@@ -133,7 +130,6 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
}
ret = hyperv_setup_vram(hv, hdev);
-
if (ret)
goto err_vmbus_close;
@@ -150,18 +146,20 @@ static int hyperv_vmbus_probe(struct hv_device *hdev,
ret = hyperv_mode_config_init(hv);
if (ret)
- goto err_vmbus_close;
+ goto err_free_mmio;
ret = drm_dev_register(dev, 0);
if (ret) {
drm_err(dev, "Failed to register drm driver.\n");
- goto err_vmbus_close;
+ goto err_free_mmio;
}
drm_fbdev_generic_setup(dev, 0);
return 0;
+err_free_mmio:
+ vmbus_free_mmio(hv->mem->start, hv->fb_size);
err_vmbus_close:
vmbus_close(hdev->channel);
err_hv_set_drv_data:
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 5dcfa7feffa9..1390729401a0 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1629,6 +1629,8 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
+
ret = intel_dsc_compute_params(crtc_state);
if (ret)
return ret;
@@ -2070,7 +2072,14 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
+
intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
for_each_dsi_port(port, intel_dsi->ports) {
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 110fc98ec280..f5e1d692976e 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -16,6 +16,7 @@
#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
#include "intel_pci_config.h"
+#include "intel_pps.h"
/**
* scale - scale values from one range to another
@@ -971,26 +972,24 @@ int intel_backlight_device_register(struct intel_connector *connector)
if (!name)
return -ENOMEM;
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
-
- /*
- * Using the same name independent of the drm device or connector
- * prevents registration of multiple backlight devices in the
- * driver. However, we need to use the default name for backward
- * compatibility. Use unique names for subsequent backlight devices as a
- * fallback when the default name already exists.
- */
- if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
+ bd = backlight_device_get_by_name(name);
+ if (bd) {
+ put_device(&bd->dev);
+ /*
+ * Using the same name independent of the drm device or connector
+ * prevents registration of multiple backlight devices in the
+ * driver. However, we need to use the default name for backward
+ * compatibility. Use unique names for subsequent backlight devices as a
+ * fallback when the default name already exists.
+ */
kfree(name);
name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
i915->drm.primary->index, connector->base.name);
if (!name)
return -ENOMEM;
-
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
}
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
if (IS_ERR(bd)) {
drm_err(&i915->drm,
@@ -1773,9 +1772,13 @@ void intel_backlight_init_funcs(struct intel_panel *panel)
panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
}
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+ connector->panel.backlight.power = intel_pps_backlight_power;
+ }
/* We're using a standard PWM backlight interface */
panel->backlight.funcs = &pwm_bl_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 51dde5bfd956..7d6eb9ad7a02 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -479,6 +479,13 @@ init_bdb_block(struct drm_i915_private *i915,
block_size = get_blocksize(block);
+ /*
+ * Version number and new block size are considered
+ * part of the header for MIPI sequenece block v3+.
+ */
+ if (section_id == BDB_MIPI_SEQUENCE && *(const u8 *)block >= 3)
+ block_size += 5;
+
entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
GFP_KERNEL);
if (!entry) {
@@ -1596,6 +1603,8 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
struct intel_panel *panel,
enum port port)
{
+ enum port port_bc = DISPLAY_VER(i915) >= 11 ? PORT_B : PORT_C;
+
if (!panel->vbt.dsi.config->dual_link || i915->vbt.version < 197) {
panel->vbt.dsi.bl_ports = BIT(port);
if (panel->vbt.dsi.config->cabc_supported)
@@ -1609,11 +1618,11 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.bl_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- panel->vbt.dsi.bl_ports = BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(port_bc);
break;
default:
case DL_DCS_PORT_A_AND_C:
- panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(PORT_C);
+ panel->vbt.dsi.bl_ports = BIT(PORT_A) | BIT(port_bc);
break;
}
@@ -1625,12 +1634,12 @@ static void parse_dsi_backlight_ports(struct drm_i915_private *i915,
panel->vbt.dsi.cabc_ports = BIT(PORT_A);
break;
case DL_DCS_PORT_C:
- panel->vbt.dsi.cabc_ports = BIT(PORT_C);
+ panel->vbt.dsi.cabc_ports = BIT(port_bc);
break;
default:
case DL_DCS_PORT_A_AND_C:
panel->vbt.dsi.cabc_ports =
- BIT(PORT_A) | BIT(PORT_C);
+ BIT(PORT_A) | BIT(port_bc);
break;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 79269d2c476b..3699869ab2db 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -404,15 +404,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
int clpchgroup;
int j;
- if (i < num_groups - 1)
- bi_next = &dev_priv->max_bw[i + 1];
-
clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
- if (i < num_groups - 1 && clpchgroup < clperchgroup)
- bi_next->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
- else
- bi_next->num_planes = 0;
+ if (i < num_groups - 1) {
+ bi_next = &dev_priv->max_bw[i + 1];
+
+ if (clpchgroup < clperchgroup)
+ bi_next->num_planes = (ipqdepth - clpchgroup) /
+ clpchgroup + 1;
+ else
+ bi_next->num_planes = 0;
+ }
bi->num_qgv_points = qi.num_points;
bi->num_psf_gv_points = qi.num_psf_points;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 32292c0be2bd..3ed7eeacc706 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -389,23 +389,13 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp)
return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
}
-static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
-{
- u32 voltage;
-
- voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
-
- return voltage == VOLTAGE_INFO_0_85V;
-}
-
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
- if (intel_phy_is_combo(dev_priv, phy) &&
- (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
+ if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -413,23 +403,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
static int ehl_max_source_rate(struct intel_dp *intel_dp)
{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
- if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
- return 540000;
-
- return 810000;
-}
-
-static int dg1_max_source_rate(struct intel_dp *intel_dp)
-{
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
-
- if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
+ if (intel_dp_is_edp(intel_dp))
return 540000;
return 810000;
@@ -491,7 +465,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
max_rate = dg2_max_source_rate(intel_dp);
else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
- max_rate = dg1_max_source_rate(intel_dp);
+ max_rate = 810000;
else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
@@ -1395,6 +1369,7 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
* DP_DSC_RC_BUF_SIZE for this.
*/
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
+ vdsc_cfg->pic_height = crtc_state->hw.adjusted_mode.crtc_vdisplay;
/*
* Slice Height of 8 works for all currently available panels. So start
@@ -5293,8 +5268,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_init(intel_connector);
- if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
- intel_connector->panel.backlight.power = intel_pps_backlight_power;
intel_backlight_setup(intel_connector, pipe);
intel_edp_add_properties(intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9feaf1a589f3..d213d8ad1ea5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -671,6 +671,28 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
intel_dp_compute_rate(intel_dp, crtc_state->port_clock,
&link_bw, &rate_select);
+ /*
+ * WaEdpLinkRateDataReload
+ *
+ * Parade PS8461E MUX (used on varius TGL+ laptops) needs
+ * to snoop the link rates reported by the sink when we
+ * use LINK_RATE_SET in order to operate in jitter cleaning
+ * mode (as opposed to redriver mode). Unfortunately it
+ * loses track of the snooped link rates when powered down,
+ * so we need to make it re-snoop often. Without this high
+ * link rates are not stable.
+ */
+ if (!link_bw) {
+ struct intel_connector *connector = intel_dp->attached_connector;
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] Reloading eDP link rates\n",
+ connector->base.base.id, connector->base.name);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+ }
+
if (link_bw)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index c8488f5ebd04..e415cd7c0b84 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -191,6 +191,9 @@ static struct intel_quirk intel_quirks[] = {
/* ASRock ITX*/
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ /* ECS Liva Q2 */
+ { 0x3185, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time },
};
void intel_init_quirks(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 43e1bbc1e303..ca530f0733e0 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -460,7 +460,6 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
u8 i = 0;
vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
- vdsc_cfg->pic_height = pipe_config->hw.adjusted_mode.crtc_vdisplay;
vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
pipe_config->dsc.slice_count);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index b9b1fed99874..35136d26e517 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -1933,7 +1933,14 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
else
intel_dsi->ports = BIT(port);
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports;
+
intel_dsi->dcs_backlight_ports = intel_connector->panel.vbt.dsi.bl_ports;
+
+ if (drm_WARN_ON(&dev_priv->drm, intel_connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports))
+ intel_connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports;
+
intel_dsi->dcs_cabc_ports = intel_connector->panel.vbt.dsi.cabc_ports;
/* Create a DSI host (and a device) for each port. */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 389e9f157ca5..85482a04d158 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -723,6 +723,9 @@ bool i915_gem_object_needs_ccs_pages(struct drm_i915_gem_object *obj)
bool lmem_placement = false;
int i;
+ if (!HAS_FLAT_CCS(to_i915(obj->base.dev)))
+ return false;
+
for (i = 0; i < obj->mm.n_placements; i++) {
/* Compression is not allowed for the objects with smem placement */
if (obj->mm.placements[i]->type == INTEL_MEMORY_SYSTEM)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index f131dc065f47..6f3ab7ade41a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -297,7 +297,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
i915_tt->is_shmem = true;
}
- if (HAS_FLAT_CCS(i915) && i915_gem_object_needs_ccs_pages(obj))
+ if (i915_gem_object_needs_ccs_pages(obj))
ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
NUM_BYTES_PER_CCS_BYTE),
PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 14fe65812e42..1d19c073ba2e 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -12,6 +12,7 @@
#include "intel_llc.h"
#include "intel_mchbar_regs.h"
#include "intel_pcode.h"
+#include "intel_rps.h"
struct ia_constants {
unsigned int min_gpu_freq;
@@ -55,9 +56,6 @@ static bool get_ia_constants(struct intel_llc *llc,
if (!HAS_LLC(i915) || IS_DGFX(i915))
return false;
- if (rps->max_freq <= rps->min_freq)
- return false;
-
consts->max_ia_freq = cpu_max_MHz();
consts->min_ring_freq =
@@ -65,13 +63,8 @@ static bool get_ia_constants(struct intel_llc *llc,
/* convert DDR frequency from units of 266.6MHz to bandwidth */
consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3);
- consts->min_gpu_freq = rps->min_freq;
- consts->max_gpu_freq = rps->max_freq;
- if (GRAPHICS_VER(i915) >= 9) {
- /* Convert GT frequency to 50 HZ units */
- consts->min_gpu_freq /= GEN9_FREQ_SCALER;
- consts->max_gpu_freq /= GEN9_FREQ_SCALER;
- }
+ consts->min_gpu_freq = intel_rps_get_min_raw_freq(rps);
+ consts->max_gpu_freq = intel_rps_get_max_raw_freq(rps);
return true;
}
@@ -131,6 +124,12 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
return;
/*
+ * Although this is unlikely on any platform during initialization,
+ * let's ensure we don't get accidentally into infinite loop
+ */
+ if (consts.max_gpu_freq <= consts.min_gpu_freq)
+ return;
+ /*
* For each potential GPU frequency, load a ring frequency we'd like
* to use for memory access. We do this by specifying the IA frequency
* the PCU should use as a reference to determine the ring frequency.
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 2b10b96b17b5..933648cc90ff 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -638,9 +638,9 @@ static int emit_copy(struct i915_request *rq,
return 0;
}
-static int scatter_list_length(struct scatterlist *sg)
+static u64 scatter_list_length(struct scatterlist *sg)
{
- int len = 0;
+ u64 len = 0;
while (sg && sg_dma_len(sg)) {
len += sg_dma_len(sg);
@@ -650,28 +650,26 @@ static int scatter_list_length(struct scatterlist *sg)
return len;
}
-static void
+static int
calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
- int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy)
+ u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
{
- if (ccs_bytes_to_cpy) {
- if (!src_is_lmem)
- /*
- * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
- * will be taken for the blt. in Flat-ccs supported
- * platform Smem obj will have more pages than required
- * for main meory hence limit it to the required size
- * for main memory
- */
- *src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ);
- } else { /* ccs handling is not required */
- *src_sz = CHUNK_SZ;
- }
+ if (ccs_bytes_to_cpy && !src_is_lmem)
+ /*
+ * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
+ * will be taken for the blt. in Flat-ccs supported
+ * platform Smem obj will have more pages than required
+ * for main meory hence limit it to the required size
+ * for main memory
+ */
+ return min_t(u64, bytes_to_cpy, CHUNK_SZ);
+ else
+ return CHUNK_SZ;
}
-static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy)
+static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
{
- u32 len;
+ u64 len;
do {
GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
@@ -702,12 +700,12 @@ intel_context_migrate_copy(struct intel_context *ce,
{
struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
struct drm_i915_private *i915 = ce->engine->i915;
- u32 ccs_bytes_to_cpy = 0, bytes_to_cpy;
+ u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
enum i915_cache_level ccs_cache_level;
u32 src_offset, dst_offset;
u8 src_access, dst_access;
struct i915_request *rq;
- int src_sz, dst_sz;
+ u64 src_sz, dst_sz;
bool ccs_is_src, overwrite_ccs;
int err;
@@ -790,8 +788,8 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err)
goto out_rq;
- calculate_chunk_sz(i915, src_is_lmem, &src_sz,
- bytes_to_cpy, ccs_bytes_to_cpy);
+ src_sz = calculate_chunk_sz(i915, src_is_lmem,
+ bytes_to_cpy, ccs_bytes_to_cpy);
len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
src_offset, src_sz);
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index fb3f57ee450b..7bb967034679 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -2126,6 +2126,31 @@ u32 intel_rps_get_max_frequency(struct intel_rps *rps)
return intel_gpu_freq(rps, rps->max_freq_softlimit);
}
+/**
+ * intel_rps_get_max_raw_freq - returns the max frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the max frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->rp0_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->max_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
{
struct intel_guc_slpc *slpc = rps_to_slpc(rps);
@@ -2214,6 +2239,31 @@ u32 intel_rps_get_min_frequency(struct intel_rps *rps)
return intel_gpu_freq(rps, rps->min_freq_softlimit);
}
+/**
+ * intel_rps_get_min_raw_freq - returns the min frequency in some raw format.
+ * @rps: the intel_rps structure
+ *
+ * Returns the min frequency in a raw format. In newer platforms raw is in
+ * units of 50 MHz.
+ */
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps)
+{
+ struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+ u32 freq;
+
+ if (rps_uses_slpc(rps)) {
+ return DIV_ROUND_CLOSEST(slpc->min_freq,
+ GT_FREQUENCY_MULTIPLIER);
+ } else {
+ freq = rps->min_freq;
+ if (GRAPHICS_VER(rps_to_i915(rps)) >= 9) {
+ /* Convert GT frequency to 50 MHz units */
+ freq /= GEN9_FREQ_SCALER;
+ }
+ return freq;
+ }
+}
+
static int set_min_freq(struct intel_rps *rps, u32 val)
{
int ret = 0;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 1e8d56491308..4509dfdc52e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -37,8 +37,10 @@ u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
u32 intel_rps_get_min_frequency(struct intel_rps *rps);
+u32 intel_rps_get_min_raw_freq(struct intel_rps *rps);
int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_max_frequency(struct intel_rps *rps);
+u32 intel_rps_get_max_raw_freq(struct intel_rps *rps);
int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 76916aed897a..3e91f44829e9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1438,7 +1438,12 @@ void intel_guc_busyness_park(struct intel_gt *gt)
if (!guc_submission_initialized(guc))
return;
- cancel_delayed_work(&guc->timestamp.work);
+ /*
+ * There is a race with suspend flow where the worker runs after suspend
+ * and causes an unclaimed register access warning. Cancel the worker
+ * synchronously here.
+ */
+ cancel_delayed_work_sync(&guc->timestamp.work);
/*
* Before parking, we should sample engine busyness stats if we need to.
@@ -4027,6 +4032,13 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
xa_destroy(&guc->context_lookup);
/*
+ * A reset might have occurred while we had a pending stalled request,
+ * so make sure we clean that up.
+ */
+ guc->stalled_request = NULL;
+ guc->submission_stall_reason = STALL_NONE;
+
+ /*
* Some contexts might have been pinned before we enabled GuC
* submission, so we need to add them to the GuC bookeeping.
* Also, after a reset the of the GuC we want to make sure that the
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 557f3314291a..3b81a6d35a7b 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -298,7 +298,7 @@ no_enough_resource:
}
/**
- * inte_gvt_free_vgpu_resource - free HW resource owned by a vGPU
+ * intel_vgpu_free_resource() - free HW resource owned by a vGPU
* @vgpu: a vGPU
*
* This function is used to free the HW resource owned by a vGPU.
@@ -328,7 +328,7 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
}
/**
- * intel_alloc_vgpu_resource - allocate HW resource for a vGPU
+ * intel_vgpu_alloc_resource() - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
*
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index b4f69364f9a1..ce0eb03709c3 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2341,7 +2341,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gvt_vgpu_err("fail to populate guest ggtt entry\n");
/* guest driver may read/write the entry when partial
* update the entry in this situation p2m will fail
- * settting the shadow entry to point to a scratch page
+ * setting the shadow entry to point to a scratch page
*/
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
} else
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index beea5895e499..61423da36710 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -905,7 +905,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu,
else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX)
index = FDI_RX_IMR_TO_PIPE(offset);
else {
- gvt_vgpu_err("Unsupport registers %x\n", offset);
+ gvt_vgpu_err("Unsupported registers %x\n", offset);
return -EINVAL;
}
@@ -3052,7 +3052,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
- * intel_t_default_mmio_write - default MMIO write handler
+ * intel_vgpu_default_mmio_write() - default MMIO write handler
* @vgpu: a vGPU
* @offset: access offset
* @p_data: write data buffer
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index c85bafe7539e..1c6e941c9666 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -546,7 +546,7 @@ static void switch_mmio(struct intel_vgpu *pre,
}
/**
- * intel_gvt_switch_render_mmio - switch mmio context of specific engine
+ * intel_gvt_switch_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
* @engine: the engine
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3168d7007e10..135d04c2d41c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1857,14 +1857,14 @@
#define GT0_PERF_LIMIT_REASONS _MMIO(0x1381a8)
#define GT0_PERF_LIMIT_REASONS_MASK 0xde3
-#define PROCHOT_MASK REG_BIT(1)
-#define THERMAL_LIMIT_MASK REG_BIT(2)
-#define RATL_MASK REG_BIT(6)
-#define VR_THERMALERT_MASK REG_BIT(7)
-#define VR_TDC_MASK REG_BIT(8)
-#define POWER_LIMIT_4_MASK REG_BIT(9)
-#define POWER_LIMIT_1_MASK REG_BIT(11)
-#define POWER_LIMIT_2_MASK REG_BIT(12)
+#define PROCHOT_MASK REG_BIT(0)
+#define THERMAL_LIMIT_MASK REG_BIT(1)
+#define RATL_MASK REG_BIT(5)
+#define VR_THERMALERT_MASK REG_BIT(6)
+#define VR_TDC_MASK REG_BIT(7)
+#define POWER_LIMIT_4_MASK REG_BIT(8)
+#define POWER_LIMIT_1_MASK REG_BIT(10)
+#define POWER_LIMIT_2_MASK REG_BIT(11)
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 260371716490..373582cfd8f3 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1882,12 +1882,13 @@ int _i915_vma_move_to_active(struct i915_vma *vma,
enum dma_resv_usage usage;
int idx;
- obj->read_domains = 0;
if (flags & EXEC_OBJECT_WRITE) {
usage = DMA_RESV_USAGE_WRITE;
obj->write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->read_domains = 0;
} else {
usage = DMA_RESV_USAGE_READ;
+ obj->write_domain = 0;
}
dma_fence_array_for_each(curr, idx, fence)
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 157e166672d7..5595639d0033 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -1076,7 +1076,8 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter)
MMIO_D(GEN8_HDC_CHICKEN1);
MMIO_D(GEN9_WM_CHICKEN3);
- if (IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ if (IS_KABYLAKE(dev_priv) ||
+ IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
MMIO_D(GAMT_CHKN_BIT_REG);
if (!IS_BROXTON(dev_priv))
MMIO_D(GEN9_CTX_PREEMPT_REG);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index f06babdb3a8c..9fe4b583cc28 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6561,7 +6561,10 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
enum plane_id plane_id;
u8 slices;
- skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
+ memset(&crtc_state->wm.skl.optimal, 0,
+ sizeof(crtc_state->wm.skl.optimal));
+ if (crtc_state->hw.active)
+ skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
memset(&dbuf_state->ddb[pipe], 0, sizeof(dbuf_state->ddb[pipe]));
@@ -6572,6 +6575,9 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
struct skl_ddb_entry *ddb_y =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
+ if (!crtc_state->hw.active)
+ continue;
+
skl_ddb_get_hw_plane_state(dev_priv, crtc->pipe,
plane_id, ddb, ddb_y);
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index b9ac932af8d0..03acc68abf2c 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -170,7 +170,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Enable OSD and BLK0, set max global alpha */
priv->viu.osd1_ctrl_stat = OSD_ENABLE |
- (0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
+ (0x100 << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
priv->viu.osd1_ctrl_stat2 = readl(priv->io_base +
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index bb7e109534de..d4b907889a21 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -94,7 +94,7 @@ static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
- writel((m[11] & 0x1fff) << 16,
+ writel((m[11] & 0x1fff),
priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index c682d4e02d1b..52a626117f70 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2061,6 +2061,12 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ if (phys_enc->hw_intf)
+ intf_cfg.intf = phys_enc->hw_intf->idx;
+ if (phys_enc->hw_wb)
+ intf_cfg.wb = phys_enc->hw_wb->idx;
+
if (phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index ab6aa13b1639..013ca02e17cb 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1214,7 +1214,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
if (ret)
return ret;
- dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+ dp_ctrl_train_pattern_set(ctrl, pattern);
for (tries = 0; tries <= maximum_retries; tries++) {
drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 2c23324a2296..72c018e26f47 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -109,7 +109,7 @@ static const char * const dsi_8996_bus_clk_names[] = {
static const struct msm_dsi_config msm8996_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 2,
+ .num = 3,
.regs = {
{"vdda", 18160, 1 }, /* 1.25 V */
{"vcca", 17000, 32 }, /* 0.925 V */
@@ -148,7 +148,7 @@ static const char * const dsi_sdm660_bus_clk_names[] = {
static const struct msm_dsi_config sdm660_dsi_cfg = {
.io_offset = DSI_6G_REG_SHIFT,
.reg_cfg = {
- .num = 2,
+ .num = 1,
.regs = {
{"vdda", 12560, 4 }, /* 1.2 V */
},
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index a39de3bdc7fa..56dfa2d24be1 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -347,7 +347,7 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
} else {
timing->shared_timings.clk_pre =
linear_inter(tmax, tmin, pcnt2, 0, false);
- timing->shared_timings.clk_pre_inc_by_2 = 0;
+ timing->shared_timings.clk_pre_inc_by_2 = 0;
}
timing->ta_go = 3;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 1ed4cd09dbf8..16884db272de 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -469,6 +469,8 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
}
}
+ drm_helper_move_panel_connectors_to_head(ddev);
+
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index d1f70426f554..85c443a37e4e 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -213,6 +213,8 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ dev_pm_qos_remove_request(&df->idle_freq);
+ dev_pm_qos_remove_request(&df->boost_freq);
df->devfreq = NULL;
return;
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index a92ffde53f0b..db2f847c8535 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -196,6 +196,9 @@ static int rd_open(struct inode *inode, struct file *file)
file->private_data = rd;
rd->open = true;
+ /* Reset fifo to clear any previously unread data: */
+ rd->fifo.head = rd->fifo.tail = 0;
+
/* the parsing tools need to know gpu-id to know which
* register database to load.
*
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index cdb154c8b866..b75c690bb0cc 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -1295,7 +1295,8 @@ static const struct panel_desc innolux_n116bca_ea1 = {
},
.delay = {
.hpd_absent = 200,
- .prepare_to_enable = 80,
+ .enable = 80,
+ .disable = 50,
.unprepare = 500,
},
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 5110cd9b2425..fe5f12f16a63 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -131,6 +131,17 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return PTR_ERR(opp);
panfrost_devfreq_profile.initial_freq = cur_freq;
+
+ /*
+ * Set the recommend OPP this will enable and configure the regulator
+ * if any and will avoid a switch off by regulator_late_cleanup()
+ */
+ ret = dev_pm_opp_set_opp(dev, opp);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
+ return ret;
+ }
+
dev_pm_opp_put(opp);
/*
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index c204e9b95c1f..518ee13b1d6f 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -283,8 +283,9 @@ static int cdn_dp_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static int cdn_dp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status
+cdn_dp_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
struct cdn_dp_device *dp = connector_to_dp(connector);
struct drm_display_info *display_info = &dp->connector.display_info;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index e4631f515ba4..f9aa8b96c695 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -1439,11 +1439,15 @@ static void rk3568_set_intf_mux(struct vop2_video_port *vp, int id,
die &= ~RK3568_SYS_DSP_INFACE_EN_HDMI_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_HDMI |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_HDMI_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__HDMI_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__HDMI_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_EDP0:
die &= ~RK3568_SYS_DSP_INFACE_EN_EDP_MUX;
die |= RK3568_SYS_DSP_INFACE_EN_EDP |
FIELD_PREP(RK3568_SYS_DSP_INFACE_EN_EDP_MUX, vp->id);
+ dip &= ~RK3568_DSP_IF_POL__EDP_PIN_POL;
+ dip |= FIELD_PREP(RK3568_DSP_IF_POL__EDP_PIN_POL, polflags);
break;
case ROCKCHIP_VOP2_EP_MIPI0:
die &= ~RK3568_SYS_DSP_INFACE_EN_MIPI0_MUX;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1cbfb00c1d65..911141d16e95 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -236,16 +236,19 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
- if (fbo->base.resource) {
- ttm_resource_set_bo(fbo->base.resource, &fbo->base);
- bo->resource = NULL;
- }
-
dma_resv_init(&fbo->base.base._resv);
fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
+ if (fbo->base.resource) {
+ ttm_resource_set_bo(fbo->base.resource, &fbo->base);
+ bo->resource = NULL;
+ ttm_bo_set_bulk_move(&fbo->base, NULL);
+ } else {
+ fbo->base.bulk_move = NULL;
+ }
+
ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1);
if (ret) {
kfree(fbo);
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 4b90c86ee5f8..47774b9ab3de 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -288,11 +288,29 @@ int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
return 0;
}
+static const struct dmi_system_id dmi_nodevs[] = {
+ {
+ /*
+ * Google Chromebooks use Chrome OS Embedded Controller Sensor
+ * Hub instead of Sensor Hub Fusion and leaves MP2
+ * uninitialized, which disables all functionalities, even
+ * including the registers necessary for feature detections.
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ },
+ },
+ { }
+};
+
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct amd_mp2_dev *privdata;
int rc;
+ if (dmi_first_match(dmi_nodevs))
+ return -ENODEV;
+
privdata = devm_kzalloc(&pdev->dev, sizeof(*privdata), GFP_KERNEL);
if (!privdata)
return -ENOMEM;
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 08c9a9a60ae4..b59c3dafa6a4 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1212,6 +1212,13 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
rdesc = new_rdesc;
}
+ if (drvdata->quirks & QUIRK_ROG_NKEY_KEYBOARD &&
+ *rsize == 331 && rdesc[190] == 0x85 && rdesc[191] == 0x5a &&
+ rdesc[204] == 0x95 && rdesc[205] == 0x05) {
+ hid_info(hdev, "Fixing up Asus N-KEY keyb report descriptor\n");
+ rdesc[205] = 0x01;
+ }
+
return rdesc;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0fb720a96399..f80d6193fca6 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -185,6 +185,8 @@
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
+#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
+#define USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY 0x8302
#define USB_VENDOR_ID_ASUS 0x0486
#define USB_DEVICE_ID_ASUS_T91MT 0x0185
@@ -414,6 +416,7 @@
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN 0x261A
#define I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN 0x2A1C
+#define I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN 0x279F
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 48c1c02c69f4..859aeb07542e 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -383,6 +383,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO2_TOUCHSCREEN),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_LENOVO_YOGA_C630_TOUCHSCREEN),
+ HID_BATTERY_QUIRK_IGNORE },
{}
};
@@ -1532,7 +1534,10 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
* assume ours
*/
if (!report->tool)
- hid_report_set_tool(report, input, usage->code);
+ report->tool = usage->code;
+
+ /* drivers may have changed the value behind our back, resend it */
+ hid_report_set_tool(report, input, report->tool);
} else {
hid_report_release_tool(report, input, usage->code);
}
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 92ac4f605f13..6028af3c3aae 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -1221,6 +1221,7 @@ static void joycon_parse_report(struct joycon_ctlr *ctlr,
spin_lock_irqsave(&ctlr->lock, flags);
if (IS_ENABLED(CONFIG_NINTENDO_FF) && rep->vibrator_report &&
+ ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED &&
(msecs - ctlr->rumble_msecs) >= JC_RUMBLE_PERIOD_MS &&
(ctlr->rumble_queue_head != ctlr->rumble_queue_tail ||
ctlr->rumble_zero_countdown > 0)) {
@@ -1545,12 +1546,13 @@ static int joycon_set_rumble(struct joycon_ctlr *ctlr, u16 amp_r, u16 amp_l,
ctlr->rumble_queue_head = 0;
memcpy(ctlr->rumble_data[ctlr->rumble_queue_head], data,
JC_RUMBLE_DATA_SIZE);
- spin_unlock_irqrestore(&ctlr->lock, flags);
/* don't wait for the periodic send (reduces latency) */
- if (schedule_now)
+ if (schedule_now && ctlr->ctlr_state != JOYCON_CTLR_STATE_REMOVED)
queue_work(ctlr->rumble_queue, &ctlr->rumble_worker);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
return 0;
}
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index dc67717d2dab..70f602c64fd1 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -314,6 +314,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLEIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index a3b151b29bd7..fc616db4231b 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
int ret;
r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
+ if (!r) {
+ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
+ return -EINVAL;
+ }
+
if (hid_report_len(r) < 64)
return -EINVAL;
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index c3e6d69fdfbd..cf1679b0d4fb 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -67,12 +67,13 @@ static const struct tm_wheel_info tm_wheels_infos[] = {
{0x0200, 0x0005, "Thrustmaster T300RS (Missing Attachment)"},
{0x0206, 0x0005, "Thrustmaster T300RS"},
{0x0209, 0x0005, "Thrustmaster T300RS (Open Wheel Attachment)"},
+ {0x020a, 0x0005, "Thrustmaster T300RS (Sparco R383 Mod)"},
{0x0204, 0x0005, "Thrustmaster T300 Ferrari Alcantara Edition"},
{0x0002, 0x0002, "Thrustmaster T500RS"}
//{0x0407, 0x0001, "Thrustmaster TMX"}
};
-static const uint8_t tm_wheels_infos_length = 4;
+static const uint8_t tm_wheels_infos_length = 7;
/*
* This structs contains (in little endian) the response data
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 681614a8302a..197b1e7bf029 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -350,6 +350,8 @@ static int hidraw_release(struct inode * inode, struct file * file)
down_write(&minors_rwsem);
spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
+ for (int i = list->tail; i < list->head; i++)
+ kfree(list->buffer[i].value);
list_del(&list->node);
spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list);
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index e600dbf04dfc..fc108f19a64c 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -32,6 +32,7 @@
#define ADL_P_DEVICE_ID 0x51FC
#define ADL_N_DEVICE_ID 0x54FC
#define RPL_S_DEVICE_ID 0x7A78
+#define MTL_P_DEVICE_ID 0x7E45
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 2c67ec17bec6..7120b30ac51d 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -43,6 +43,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_P_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_N_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
{0, }
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.h b/drivers/hid/intel-ish-hid/ishtp-hid.h
index 6a5cc11aefd8..35dddc5015b3 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.h
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.h
@@ -105,7 +105,7 @@ struct report_list {
* @multi_packet_cnt: Count of fragmented packet count
*
* This structure is used to store completion flags and per client data like
- * like report description, number of HID devices etc.
+ * report description, number of HID devices etc.
*/
struct ishtp_cl_data {
/* completion flags */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 405e0d5212cc..df0a825694f5 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -626,13 +626,14 @@ static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
}
/**
- * ipc_tx_callback() - IPC tx callback function
+ * ipc_tx_send() - IPC tx send function
* @prm: Pointer to client device instance
*
- * Send message over IPC either first time or on callback on previous message
- * completion
+ * Send message over IPC. Message will be split into fragments
+ * if message size is bigger than IPC FIFO size, and all
+ * fragments will be sent one by one.
*/
-static void ipc_tx_callback(void *prm)
+static void ipc_tx_send(void *prm)
{
struct ishtp_cl *cl = prm;
struct ishtp_cl_tx_ring *cl_msg;
@@ -677,32 +678,41 @@ static void ipc_tx_callback(void *prm)
list);
rem = cl_msg->send_buf.size - cl->tx_offs;
- ishtp_hdr.host_addr = cl->host_client_id;
- ishtp_hdr.fw_addr = cl->fw_client_id;
- ishtp_hdr.reserved = 0;
- pmsg = cl_msg->send_buf.data + cl->tx_offs;
+ while (rem > 0) {
+ ishtp_hdr.host_addr = cl->host_client_id;
+ ishtp_hdr.fw_addr = cl->fw_client_id;
+ ishtp_hdr.reserved = 0;
+ pmsg = cl_msg->send_buf.data + cl->tx_offs;
+
+ if (rem <= dev->mtu) {
+ /* Last fragment or only one packet */
+ ishtp_hdr.length = rem;
+ ishtp_hdr.msg_complete = 1;
+ /* Submit to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs = 0;
+ cl->sending = 0;
- if (rem <= dev->mtu) {
- ishtp_hdr.length = rem;
- ishtp_hdr.msg_complete = 1;
- cl->sending = 0;
- list_del_init(&cl_msg->list); /* Must be before write */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- /* Submit to IPC queue with no callback */
- ishtp_write_message(dev, &ishtp_hdr, pmsg);
- spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
- list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
- ++cl->tx_ring_free_size;
- spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
- tx_free_flags);
- } else {
- /* Send IPC fragment */
- spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
- cl->tx_offs += dev->mtu;
- ishtp_hdr.length = dev->mtu;
- ishtp_hdr.msg_complete = 0;
- ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
+ break;
+ } else {
+ /* Send ipc fragment */
+ ishtp_hdr.length = dev->mtu;
+ ishtp_hdr.msg_complete = 0;
+ /* All fregments submitted to IPC queue with no callback */
+ ishtp_write_message(dev, &ishtp_hdr, pmsg);
+ cl->tx_offs += dev->mtu;
+ rem = cl_msg->send_buf.size - cl->tx_offs;
+ }
}
+
+ list_del_init(&cl_msg->list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
+ tx_free_flags);
}
/**
@@ -720,7 +730,7 @@ static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
return;
cl->tx_offs = 0;
- ipc_tx_callback(cl);
+ ipc_tx_send(cl);
++cl->send_msg_cnt_ipc;
}
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index 660036da7449..922d83eb7ddf 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -129,7 +129,7 @@ static void fcopy_send_data(struct work_struct *dummy)
/*
* The strings sent from the host are encoded in
- * in utf16; convert it to utf8 strings.
+ * utf16; convert it to utf8 strings.
* The host assures us that the utf16 strings will not exceed
* the max lengths specified. We will however, reserve room
* for the string terminating character - in the utf16s_utf8s()
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 23c680d1a0f5..3c833ea60db6 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
#include <linux/dma-map-ops.h>
+#include <linux/pci.h>
#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
@@ -2262,26 +2263,43 @@ static int vmbus_acpi_remove(struct acpi_device *device)
static void vmbus_reserve_fb(void)
{
- int size;
+ resource_size_t start = 0, size;
+ struct pci_dev *pdev;
+
+ if (efi_enabled(EFI_BOOT)) {
+ /* Gen2 VM: get FB base from EFI framebuffer */
+ start = screen_info.lfb_base;
+ size = max_t(__u32, screen_info.lfb_size, 0x800000);
+ } else {
+ /* Gen1 VM: get FB base from PCI */
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+ if (!pdev)
+ return;
+
+ if (pdev->resource[0].flags & IORESOURCE_MEM) {
+ start = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+ }
+
+ /*
+ * Release the PCI device so hyperv_drm or hyperv_fb driver can
+ * grab it later.
+ */
+ pci_dev_put(pdev);
+ }
+
+ if (!start)
+ return;
+
/*
* Make a claim for the frame buffer in the resource tree under the
* first node, which will be the one below 4GB. The length seems to
* be underreported, particularly in a Generation 1 VM. So start out
* reserving a larger area and make it smaller until it succeeds.
*/
-
- if (screen_info.lfb_base) {
- if (efi_enabled(EFI_BOOT))
- size = max_t(__u32, screen_info.lfb_size, 0x800000);
- else
- size = max_t(__u32, screen_info.lfb_size, 0x4000000);
-
- for (; !fb_mmio && (size >= 0x100000); size >>= 1) {
- fb_mmio = __request_region(hyperv_mmio,
- screen_info.lfb_base, size,
- fb_mmio_name, 0);
- }
- }
+ for (; !fb_mmio && (size >= 0x100000); size >>= 1)
+ fb_mmio = __request_region(hyperv_mmio, start, size, fb_mmio_name, 0);
}
/**
@@ -2313,7 +2331,7 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
bool fb_overlap_ok)
{
struct resource *iter, *shadow;
- resource_size_t range_min, range_max, start;
+ resource_size_t range_min, range_max, start, end;
const char *dev_n = dev_name(&device_obj->device);
int retval;
@@ -2348,6 +2366,14 @@ int vmbus_allocate_mmio(struct resource **new, struct hv_device *device_obj,
range_max = iter->end;
start = (range_min + align - 1) & ~(align - 1);
for (; start + size - 1 <= range_max; start += align) {
+ end = start + size - 1;
+
+ /* Skip the whole fb_mmio region if not fb_overlap_ok */
+ if (!fb_overlap_ok && fb_mmio &&
+ (((start >= fb_mmio->start) && (start <= fb_mmio->end)) ||
+ ((end >= fb_mmio->start) && (end <= fb_mmio->end))))
+ continue;
+
shadow = __request_region(iter, start, size, NULL,
IORESOURCE_BUSY);
if (!shadow)
diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c
index 61a4684fc020..81e688975c6a 100644
--- a/drivers/hwmon/asus-ec-sensors.c
+++ b/drivers/hwmon/asus-ec-sensors.c
@@ -266,9 +266,7 @@ static const struct ec_sensor_info sensors_family_intel_600[] = {
#define SENSOR_SET_WATER_BLOCK \
(SENSOR_TEMP_WATER_BLOCK_IN | SENSOR_TEMP_WATER_BLOCK_OUT)
-
struct ec_board_info {
- const char *board_names[MAX_IDENTICAL_BOARD_VARIATIONS];
unsigned long sensors;
/*
* Defines which mutex to use for guarding access to the state and the
@@ -281,152 +279,194 @@ struct ec_board_info {
enum board_family family;
};
-static const struct ec_board_info board_info[] = {
- {
- .board_names = {"PRIME X470-PRO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CPU_OPT |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
- .family = family_amd_400_series,
- },
- {
- .board_names = {"PRIME X570-PRO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ProArt X570-CREATOR WIFI"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- },
- {
- .board_names = {"Pro WS X570-ACE"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG CROSSHAIR VIII DARK HERO"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {
- "ROG CROSSHAIR VIII FORMULA",
- "ROG CROSSHAIR VIII HERO",
- "ROG CROSSHAIR VIII HERO (WI-FI)",
- },
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {
- "ROG MAXIMUS XI HERO",
- "ROG MAXIMUS XI HERO (WI-FI)",
- },
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_intel_300_series,
- },
- {
- .board_names = {"ROG CROSSHAIR VIII IMPACT"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX B550-E GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CPU_OPT,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX B550-I GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-E GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
- SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-E GAMING WIFI II"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
- SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-F GAMING"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
- SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX X570-I GAMING"},
- .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
- SENSOR_TEMP_T_SENSOR |
- SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
- SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
- .family = family_amd_500_series,
- },
- {
- .board_names = {"ROG STRIX Z690-A GAMING WIFI D4"},
- .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
- .family = family_intel_600_series,
- },
- {
- .board_names = {"ROG ZENITH II EXTREME"},
- .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
- SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
- SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
- SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
- SENSOR_SET_WATER_BLOCK |
- SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
- SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
- .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
- .family = family_amd_500_series,
- },
- {}
+static const struct ec_board_info board_info_prime_x470_pro = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ACPI_GLOBAL_LOCK_PSEUDO_PATH,
+ .family = family_amd_400_series,
+};
+
+static const struct ec_board_info board_info_prime_x570_pro = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_pro_art_x570_creator_wifi = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CPU_OPT |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_pro_ws_x570_ace = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_dark_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_maximus_xi_hero = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_WATER_FLOW,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_intel_300_series,
+};
+
+static const struct ec_board_info board_info_crosshair_viii_impact = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_b550_e_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CPU_OPT,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_b550_i_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_VRM_HS | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_e_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM |
+ SENSOR_FAN_CHIPSET | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_e_gaming_wifi_ii = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_CURR_CPU |
+ SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_f_gaming = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB |
+ SENSOR_TEMP_T_SENSOR | SENSOR_FAN_CHIPSET,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_x570_i_gaming = {
+ .sensors = SENSOR_TEMP_CHIPSET | SENSOR_TEMP_VRM |
+ SENSOR_TEMP_T_SENSOR |
+ SENSOR_FAN_VRM_HS | SENSOR_FAN_CHIPSET |
+ SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_ASMX,
+ .family = family_amd_500_series,
+};
+
+static const struct ec_board_info board_info_strix_z690_a_gaming_wifi_d4 = {
+ .sensors = SENSOR_TEMP_T_SENSOR | SENSOR_TEMP_VRM,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_RMTW_ASMX,
+ .family = family_intel_600_series,
+};
+
+static const struct ec_board_info board_info_zenith_ii_extreme = {
+ .sensors = SENSOR_SET_TEMP_CHIPSET_CPU_MB | SENSOR_TEMP_T_SENSOR |
+ SENSOR_TEMP_VRM | SENSOR_SET_TEMP_WATER |
+ SENSOR_FAN_CPU_OPT | SENSOR_FAN_CHIPSET | SENSOR_FAN_VRM_HS |
+ SENSOR_FAN_WATER_FLOW | SENSOR_CURR_CPU | SENSOR_IN_CPU_CORE |
+ SENSOR_SET_WATER_BLOCK |
+ SENSOR_TEMP_T_SENSOR_2 | SENSOR_TEMP_SENSOR_EXTRA_1 |
+ SENSOR_TEMP_SENSOR_EXTRA_2 | SENSOR_TEMP_SENSOR_EXTRA_3,
+ .mutex_path = ASUS_HW_ACCESS_MUTEX_SB_PCI0_SBRG_SIO1_MUT0,
+ .family = family_amd_500_series,
+};
+
+#define DMI_EXACT_MATCH_ASUS_BOARD_NAME(name, board_info) \
+ { \
+ .matches = { \
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, \
+ "ASUSTeK COMPUTER INC."), \
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, name), \
+ }, \
+ .driver_data = (void *)board_info, \
+ }
+
+static const struct dmi_system_id dmi_table[] = {
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X470-PRO",
+ &board_info_prime_x470_pro),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("PRIME X570-PRO",
+ &board_info_prime_x570_pro),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ProArt X570-CREATOR WIFI",
+ &board_info_pro_art_x570_creator_wifi),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("Pro WS X570-ACE",
+ &board_info_pro_ws_x570_ace),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII DARK HERO",
+ &board_info_crosshair_viii_dark_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII FORMULA",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII HERO (WI-FI)",
+ &board_info_crosshair_viii_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO",
+ &board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG MAXIMUS XI HERO (WI-FI)",
+ &board_info_maximus_xi_hero),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG CROSSHAIR VIII IMPACT",
+ &board_info_crosshair_viii_impact),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-E GAMING",
+ &board_info_strix_b550_e_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX B550-I GAMING",
+ &board_info_strix_b550_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING",
+ &board_info_strix_x570_e_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-E GAMING WIFI II",
+ &board_info_strix_x570_e_gaming_wifi_ii),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-F GAMING",
+ &board_info_strix_x570_f_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX X570-I GAMING",
+ &board_info_strix_x570_i_gaming),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG STRIX Z690-A GAMING WIFI D4",
+ &board_info_strix_z690_a_gaming_wifi_d4),
+ DMI_EXACT_MATCH_ASUS_BOARD_NAME("ROG ZENITH II EXTREME",
+ &board_info_zenith_ii_extreme),
+ {},
};
struct ec_sensor {
@@ -537,12 +577,12 @@ static int find_ec_sensor_index(const struct ec_sensors_data *ec,
return -ENOENT;
}
-static int __init bank_compare(const void *a, const void *b)
+static int bank_compare(const void *a, const void *b)
{
return *((const s8 *)a) - *((const s8 *)b);
}
-static void __init setup_sensor_data(struct ec_sensors_data *ec)
+static void setup_sensor_data(struct ec_sensors_data *ec)
{
struct ec_sensor *s = ec->sensors;
bool bank_found;
@@ -574,7 +614,7 @@ static void __init setup_sensor_data(struct ec_sensors_data *ec)
sort(ec->banks, ec->nr_banks, 1, bank_compare, NULL);
}
-static void __init fill_ec_registers(struct ec_sensors_data *ec)
+static void fill_ec_registers(struct ec_sensors_data *ec)
{
const struct ec_sensor_info *si;
unsigned int i, j, register_idx = 0;
@@ -589,7 +629,7 @@ static void __init fill_ec_registers(struct ec_sensors_data *ec)
}
}
-static int __init setup_lock_data(struct device *dev)
+static int setup_lock_data(struct device *dev)
{
const char *mutex_path;
int status;
@@ -812,7 +852,7 @@ static umode_t asus_ec_hwmon_is_visible(const void *drvdata,
return find_ec_sensor_index(state, type, channel) >= 0 ? S_IRUGO : 0;
}
-static int __init
+static int
asus_ec_hwmon_add_chan_info(struct hwmon_channel_info *asus_ec_hwmon_chan,
struct device *dev, int num,
enum hwmon_sensor_types type, u32 config)
@@ -841,27 +881,15 @@ static struct hwmon_chip_info asus_ec_chip_info = {
.ops = &asus_ec_hwmon_ops,
};
-static const struct ec_board_info * __init get_board_info(void)
+static const struct ec_board_info *get_board_info(void)
{
- const char *dmi_board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
- const char *dmi_board_name = dmi_get_system_info(DMI_BOARD_NAME);
- const struct ec_board_info *board;
-
- if (!dmi_board_vendor || !dmi_board_name ||
- strcasecmp(dmi_board_vendor, "ASUSTeK COMPUTER INC."))
- return NULL;
-
- for (board = board_info; board->sensors; board++) {
- if (match_string(board->board_names,
- MAX_IDENTICAL_BOARD_VARIATIONS,
- dmi_board_name) >= 0)
- return board;
- }
+ const struct dmi_system_id *dmi_entry;
- return NULL;
+ dmi_entry = dmi_first_match(dmi_table);
+ return dmi_entry ? dmi_entry->driver_data : NULL;
}
-static int __init asus_ec_probe(struct platform_device *pdev)
+static int asus_ec_probe(struct platform_device *pdev)
{
const struct hwmon_channel_info **ptr_asus_ec_ci;
int nr_count[hwmon_max] = { 0 }, nr_types = 0;
@@ -970,29 +998,37 @@ static int __init asus_ec_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwdev);
}
-
-static const struct acpi_device_id acpi_ec_ids[] = {
- /* Embedded Controller Device */
- { "PNP0C09", 0 },
- {}
-};
+MODULE_DEVICE_TABLE(dmi, dmi_table);
static struct platform_driver asus_ec_sensors_platform_driver = {
.driver = {
.name = "asus-ec-sensors",
- .acpi_match_table = acpi_ec_ids,
},
+ .probe = asus_ec_probe,
};
-MODULE_DEVICE_TABLE(acpi, acpi_ec_ids);
-/*
- * we use module_platform_driver_probe() rather than module_platform_driver()
- * because the probe function (and its dependants) are marked with __init, which
- * means we can't put it into the .probe member of the platform_driver struct
- * above, and we can't mark the asus_ec_sensors_platform_driver object as __init
- * because the object is referenced from the module exit code.
- */
-module_platform_driver_probe(asus_ec_sensors_platform_driver, asus_ec_probe);
+static struct platform_device *asus_ec_sensors_platform_device;
+
+static int __init asus_ec_init(void)
+{
+ asus_ec_sensors_platform_device =
+ platform_create_bundle(&asus_ec_sensors_platform_driver,
+ asus_ec_probe, NULL, 0, NULL, 0);
+
+ if (IS_ERR(asus_ec_sensors_platform_device))
+ return PTR_ERR(asus_ec_sensors_platform_device);
+
+ return 0;
+}
+
+static void __exit asus_ec_exit(void)
+{
+ platform_device_unregister(asus_ec_sensors_platform_device);
+ platform_driver_unregister(&asus_ec_sensors_platform_driver);
+}
+
+module_init(asus_ec_init);
+module_exit(asus_ec_exit);
module_param_named(mutex_path, mutex_path_override, charp, 0);
MODULE_PARM_DESC(mutex_path,
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index befe989ca7b9..fbf3f5a4ecb6 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -391,6 +391,9 @@ static int gpio_fan_set_cur_state(struct thermal_cooling_device *cdev,
if (!fan_data)
return -EINVAL;
+ if (state >= fan_data->num_speed)
+ return -EINVAL;
+
set_fan_speed(fan_data, state);
return 0;
}
diff --git a/drivers/hwmon/mr75203.c b/drivers/hwmon/mr75203.c
index 26278b0f17a9..9259779cc2df 100644
--- a/drivers/hwmon/mr75203.c
+++ b/drivers/hwmon/mr75203.c
@@ -68,8 +68,9 @@
/* VM Individual Macro Register */
#define VM_COM_REG_SIZE 0x200
-#define VM_SDIF_DONE(n) (VM_COM_REG_SIZE + 0x34 + 0x200 * (n))
-#define VM_SDIF_DATA(n) (VM_COM_REG_SIZE + 0x40 + 0x200 * (n))
+#define VM_SDIF_DONE(vm) (VM_COM_REG_SIZE + 0x34 + 0x200 * (vm))
+#define VM_SDIF_DATA(vm, ch) \
+ (VM_COM_REG_SIZE + 0x40 + 0x200 * (vm) + 0x4 * (ch))
/* SDA Slave Register */
#define IP_CTRL 0x00
@@ -115,6 +116,7 @@ struct pvt_device {
u32 t_num;
u32 p_num;
u32 v_num;
+ u32 c_num;
u32 ip_freq;
u8 *vm_idx;
};
@@ -178,14 +180,15 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
{
struct pvt_device *pvt = dev_get_drvdata(dev);
struct regmap *v_map = pvt->v_map;
+ u8 vm_idx, ch_idx;
u32 n, stat;
- u8 vm_idx;
int ret;
- if (channel >= pvt->v_num)
+ if (channel >= pvt->v_num * pvt->c_num)
return -EINVAL;
- vm_idx = pvt->vm_idx[channel];
+ vm_idx = pvt->vm_idx[channel / pvt->c_num];
+ ch_idx = channel % pvt->c_num;
switch (attr) {
case hwmon_in_input:
@@ -196,13 +199,23 @@ static int pvt_read_in(struct device *dev, u32 attr, int channel, long *val)
if (ret)
return ret;
- ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx), &n);
+ ret = regmap_read(v_map, VM_SDIF_DATA(vm_idx, ch_idx), &n);
if(ret < 0)
return ret;
n &= SAMPLE_DATA_MSK;
- /* Convert the N bitstream count into voltage */
- *val = (PVT_N_CONST * n - PVT_R_CONST) >> PVT_CONV_BITS;
+ /*
+ * Convert the N bitstream count into voltage.
+ * To support negative voltage calculation for 64bit machines
+ * n must be cast to long, since n and *val differ both in
+ * signedness and in size.
+ * Division is used instead of right shift, because for signed
+ * numbers, the sign bit is used to fill the vacated bit
+ * positions, and if the number is negative, 1 is used.
+ * BIT(x) may not be used instead of (1 << x) because it's
+ * unsigned.
+ */
+ *val = (PVT_N_CONST * (long)n - PVT_R_CONST) / (1 << PVT_CONV_BITS);
return 0;
default:
@@ -375,6 +388,19 @@ static int pvt_init(struct pvt_device *pvt)
if (ret)
return ret;
+ val = (BIT(pvt->c_num) - 1) | VM_CH_INIT |
+ IP_POLL << SDIF_ADDR_SFT | SDIF_WRN_W | SDIF_PROG;
+ ret = regmap_write(v_map, SDIF_W, val);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_read_poll_timeout(v_map, SDIF_STAT,
+ val, !(val & SDIF_BUSY),
+ PVT_POLL_DELAY_US,
+ PVT_POLL_TIMEOUT_US);
+ if (ret)
+ return ret;
+
val = CFG1_VOL_MEAS_MODE | CFG1_PARALLEL_OUT |
CFG1_14_BIT | IP_CFG << SDIF_ADDR_SFT |
SDIF_WRN_W | SDIF_PROG;
@@ -489,8 +515,8 @@ static int pvt_reset_control_deassert(struct device *dev, struct pvt_device *pvt
static int mr75203_probe(struct platform_device *pdev)
{
+ u32 ts_num, vm_num, pd_num, ch_num, val, index, i;
const struct hwmon_channel_info **pvt_info;
- u32 ts_num, vm_num, pd_num, val, index, i;
struct device *dev = &pdev->dev;
u32 *temp_config, *in_config;
struct device *hwmon_dev;
@@ -531,9 +557,11 @@ static int mr75203_probe(struct platform_device *pdev)
ts_num = (val & TS_NUM_MSK) >> TS_NUM_SFT;
pd_num = (val & PD_NUM_MSK) >> PD_NUM_SFT;
vm_num = (val & VM_NUM_MSK) >> VM_NUM_SFT;
+ ch_num = (val & CH_NUM_MSK) >> CH_NUM_SFT;
pvt->t_num = ts_num;
pvt->p_num = pd_num;
pvt->v_num = vm_num;
+ pvt->c_num = ch_num;
val = 0;
if (ts_num)
val++;
@@ -570,7 +598,7 @@ static int mr75203_probe(struct platform_device *pdev)
}
if (vm_num) {
- u32 num = vm_num;
+ u32 total_ch;
ret = pvt_get_regmap(pdev, "vm", pvt);
if (ret)
@@ -584,30 +612,30 @@ static int mr75203_probe(struct platform_device *pdev)
ret = device_property_read_u8_array(dev, "intel,vm-map",
pvt->vm_idx, vm_num);
if (ret) {
- num = 0;
+ /*
+ * Incase intel,vm-map property is not defined, we
+ * assume incremental channel numbers.
+ */
+ for (i = 0; i < vm_num; i++)
+ pvt->vm_idx[i] = i;
} else {
for (i = 0; i < vm_num; i++)
if (pvt->vm_idx[i] >= vm_num ||
pvt->vm_idx[i] == 0xff) {
- num = i;
+ pvt->v_num = i;
+ vm_num = i;
break;
}
}
- /*
- * Incase intel,vm-map property is not defined, we assume
- * incremental channel numbers.
- */
- for (i = num; i < vm_num; i++)
- pvt->vm_idx[i] = i;
-
- in_config = devm_kcalloc(dev, num + 1,
+ total_ch = ch_num * vm_num;
+ in_config = devm_kcalloc(dev, total_ch + 1,
sizeof(*in_config), GFP_KERNEL);
if (!in_config)
return -ENOMEM;
- memset32(in_config, HWMON_I_INPUT, num);
- in_config[num] = 0;
+ memset32(in_config, HWMON_I_INPUT, total_ch);
+ in_config[total_ch] = 0;
pvt_in.config = in_config;
pvt_info[index++] = &pvt_in;
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index f10bac8860fc..81d3f91dd204 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -2861,7 +2861,7 @@ static int pmbus_regulator_get_low_margin(struct i2c_client *client, int page)
.data = -1,
};
- if (!data->vout_low[page]) {
+ if (data->vout_low[page] < 0) {
if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MIN))
s.data = _pmbus_read_word_data(client, page, 0xff,
PMBUS_MFR_VOUT_MIN);
@@ -2887,7 +2887,7 @@ static int pmbus_regulator_get_high_margin(struct i2c_client *client, int page)
.data = -1,
};
- if (!data->vout_high[page]) {
+ if (data->vout_high[page] < 0) {
if (pmbus_check_word_register(client, page, PMBUS_MFR_VOUT_MAX))
s.data = _pmbus_read_word_data(client, page, 0xff,
PMBUS_MFR_VOUT_MAX);
@@ -3016,11 +3016,10 @@ static int pmbus_regulator_register(struct pmbus_data *data)
rdev = devm_regulator_register(dev, &info->reg_desc[i],
&config);
- if (IS_ERR(rdev)) {
- dev_err(dev, "Failed to register %s regulator\n",
- info->reg_desc[i].name);
- return PTR_ERR(rdev);
- }
+ if (IS_ERR(rdev))
+ return dev_err_probe(dev, PTR_ERR(rdev),
+ "Failed to register %s regulator\n",
+ info->reg_desc[i].name);
}
return 0;
@@ -3320,6 +3319,7 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
struct pmbus_data *data;
size_t groups_num = 0;
int ret;
+ int i;
char *name;
if (!info)
@@ -3353,6 +3353,11 @@ int pmbus_do_probe(struct i2c_client *client, struct pmbus_driver_info *info)
data->currpage = -1;
data->currphase = -1;
+ for (i = 0; i < ARRAY_SIZE(data->vout_low); i++) {
+ data->vout_low[i] = -1;
+ data->vout_high[i] = -1;
+ }
+
ret = pmbus_init_common(client, data, info);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/tps23861.c b/drivers/hwmon/tps23861.c
index 42762e87b014..f7c59ff7ae8e 100644
--- a/drivers/hwmon/tps23861.c
+++ b/drivers/hwmon/tps23861.c
@@ -493,18 +493,20 @@ static char *tps23861_port_poe_plus_status(struct tps23861_data *data, int port)
static int tps23861_port_resistance(struct tps23861_data *data, int port)
{
- u16 regval;
+ unsigned int raw_val;
+ __le16 regval;
regmap_bulk_read(data->regmap,
PORT_1_RESISTANCE_LSB + PORT_N_RESISTANCE_LSB_OFFSET * (port - 1),
&regval,
2);
- switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, regval)) {
+ raw_val = le16_to_cpu(regval);
+ switch (FIELD_GET(PORT_RESISTANCE_RSN_MASK, raw_val)) {
case PORT_RESISTANCE_RSN_OTHER:
- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB) / 10000;
+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB) / 10000;
case PORT_RESISTANCE_RSN_LOW:
- return (FIELD_GET(PORT_RESISTANCE_MASK, regval) * RESISTANCE_LSB_LOW) / 10000;
+ return (FIELD_GET(PORT_RESISTANCE_MASK, raw_val) * RESISTANCE_LSB_LOW) / 10000;
case PORT_RESISTANCE_RSN_SHORT:
case PORT_RESISTANCE_RSN_OPEN:
default:
diff --git a/drivers/iio/adc/ad7292.c b/drivers/iio/adc/ad7292.c
index 92c68d467c50..a2f9fda25ff3 100644
--- a/drivers/iio/adc/ad7292.c
+++ b/drivers/iio/adc/ad7292.c
@@ -287,10 +287,8 @@ static int ad7292_probe(struct spi_device *spi)
ret = devm_add_action_or_reset(&spi->dev,
ad7292_regulator_disable, st);
- if (ret) {
- regulator_disable(st->reg);
+ if (ret)
return ret;
- }
ret = regulator_get_voltage(st->reg);
if (ret < 0)
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index 1cb4590fe412..890af7dca62d 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -40,8 +40,8 @@
#define MCP3911_CHANNEL(x) (MCP3911_REG_CHANNEL0 + x * 3)
#define MCP3911_OFFCAL(x) (MCP3911_REG_OFFCAL_CH0 + x * 6)
-/* Internal voltage reference in uV */
-#define MCP3911_INT_VREF_UV 1200000
+/* Internal voltage reference in mV */
+#define MCP3911_INT_VREF_MV 1200
#define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff)
#define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff)
@@ -113,6 +113,8 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
if (ret)
goto out;
+ *val = sign_extend32(*val, 23);
+
ret = IIO_VAL_INT;
break;
@@ -137,11 +139,18 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev,
*val = ret / 1000;
} else {
- *val = MCP3911_INT_VREF_UV;
+ *val = MCP3911_INT_VREF_MV;
}
- *val2 = 24;
- ret = IIO_VAL_FRACTIONAL_LOG2;
+ /*
+ * For 24bit Conversion
+ * Raw = ((Voltage)/(Vref) * 2^23 * Gain * 1.5
+ * Voltage = Raw * (Vref)/(2^23 * Gain * 1.5)
+ */
+
+ /* val2 = (2^23 * 1.5) */
+ *val2 = 12582912;
+ ret = IIO_VAL_FRACTIONAL;
break;
}
@@ -208,7 +217,14 @@ static int mcp3911_config(struct mcp3911 *adc)
u32 configreg;
int ret;
- device_property_read_u32(dev, "device-addr", &adc->dev_addr);
+ ret = device_property_read_u32(dev, "microchip,device-addr", &adc->dev_addr);
+
+ /*
+ * Fallback to "device-addr" due to historical mismatch between
+ * dt-bindings and implementation
+ */
+ if (ret)
+ device_property_read_u32(dev, "device-addr", &adc->dev_addr);
if (adc->dev_addr > 3) {
dev_err(&adc->spi->dev,
"invalid device address (%i). Must be in range 0-3.\n",
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index edbe6a3138d0..001055d09750 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -505,7 +505,7 @@ static int cm32181_resume(struct device *dev)
cm32181->conf_regs[CM32181_REG_ADDR_CMD]);
}
-DEFINE_SIMPLE_DEV_PM_OPS(cm32181_pm_ops, cm32181_suspend, cm32181_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(cm32181_pm_ops, cm32181_suspend, cm32181_resume);
static const struct of_device_id cm32181_of_match[] = {
{ .compatible = "capella,cm3218" },
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index c721b69d5095..0b30db77f78b 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -226,8 +226,10 @@ static int cm3605_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return dev_err_probe(dev, irq, "failed to get irq\n");
+ if (irq < 0) {
+ ret = dev_err_probe(dev, irq, "failed to get irq\n");
+ goto out_disable_aset;
+ }
ret = devm_request_threaded_irq(dev, irq, cm3605_prox_irq,
NULL, 0, "cm3605", indio_dev);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 46d06678dfbe..be317f2665a9 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1841,8 +1841,8 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
if (!validate_net_dev(*net_dev,
- (struct sockaddr *)&req->listen_addr_storage,
- (struct sockaddr *)&req->src_addr_storage)) {
+ (struct sockaddr *)&req->src_addr_storage,
+ (struct sockaddr *)&req->listen_addr_storage)) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 186ed8859920..d39e16c211e8 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -462,7 +462,7 @@ retry:
mutex_unlock(&umem_odp->umem_mutex);
out_put_mm:
- mmput(owning_mm);
+ mmput_async(owning_mm);
out_put_task:
if (owning_process)
put_task_struct(owning_process);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index f848eedc6a23..d24996526c4d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -730,7 +730,6 @@ struct hns_roce_caps {
u32 num_qps;
u32 num_pi_qps;
u32 reserved_qps;
- int num_qpc_timer;
u32 num_srqs;
u32 max_wqes;
u32 max_srq_wrs;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index cbdafaac678a..c780646bd60a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1977,7 +1977,7 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->num_mtpts = HNS_ROCE_V2_MAX_MTPT_NUM;
caps->num_pds = HNS_ROCE_V2_MAX_PD_NUM;
- caps->num_qpc_timer = HNS_ROCE_V2_MAX_QPC_TIMER_NUM;
+ caps->qpc_timer_bt_num = HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM;
caps->cqc_timer_bt_num = HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM;
caps->max_qp_init_rdma = HNS_ROCE_V2_MAX_QP_INIT_RDMA;
@@ -2273,7 +2273,6 @@ static int hns_roce_query_pf_caps(struct hns_roce_dev *hr_dev)
caps->max_rq_sg = le16_to_cpu(resp_a->max_rq_sg);
caps->max_rq_sg = roundup_pow_of_two(caps->max_rq_sg);
caps->max_extend_sg = le32_to_cpu(resp_a->max_extend_sg);
- caps->num_qpc_timer = le16_to_cpu(resp_a->num_qpc_timer);
caps->max_srq_sges = le16_to_cpu(resp_a->max_srq_sges);
caps->max_srq_sges = roundup_pow_of_two(caps->max_srq_sges);
caps->num_aeq_vectors = resp_a->num_aeq_vectors;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index f96debac30fe..64797109bab6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -36,11 +36,11 @@
#include <linux/bitops.h>
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
-#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
+#define HNS_ROCE_V2_MAX_QPC_TIMER_BT_NUM 0x100
#define HNS_ROCE_V2_MAX_CQC_TIMER_BT_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
@@ -83,7 +83,7 @@
#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
-#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+#define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFF000
#define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
#define HNS_ROCE_INVALID_LKEY 0x0
#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c8af4ebd7cbd..4ccb217b2841 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -725,7 +725,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->qpc_timer_table,
HEM_TYPE_QPC_TIMER,
hr_dev->caps.qpc_timer_entry_sz,
- hr_dev->caps.num_qpc_timer, 1);
+ hr_dev->caps.qpc_timer_bt_num, 1);
if (ret) {
dev_err(dev,
"Failed to init QPC timer memory, aborting.\n");
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 48d3616a6d71..7bee7f6c5e70 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -462,11 +462,8 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap,
hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) +
hr_qp->rq.rsv_sge);
- if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz);
- else
- hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
- hr_qp->rq.max_gs);
+ hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz *
+ hr_qp->rq.max_gs);
hr_qp->rq.wqe_cnt = cnt;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE &&
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index daeab5daed5b..a6e5d350a94c 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -497,7 +497,8 @@ int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
FIELD_PREP(IRDMAQPSQ_IMMDATA, info->imm_data));
i = 0;
} else {
- qp->wqe_ops.iw_set_fragment(wqe, 0, op_info->sg_list,
+ qp->wqe_ops.iw_set_fragment(wqe, 0,
+ frag_cnt ? op_info->sg_list : NULL,
qp->swqe_polarity);
i = 1;
}
@@ -1005,6 +1006,7 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
int ret_code;
bool move_cq_head = true;
u8 polarity;
+ u8 op_type;
bool ext_valid;
__le64 *ext_cqe;
@@ -1187,7 +1189,6 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
do {
__le64 *sw_wqe;
u64 wqe_qword;
- u8 op_type;
u32 tail;
tail = qp->sq_ring.tail;
@@ -1204,6 +1205,8 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
break;
}
} while (1);
+ if (op_type == IRDMA_OP_TYPE_BIND_MW && info->minor_err == FLUSH_PROT_ERR)
+ info->minor_err = FLUSH_MW_BIND_ERR;
qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
qp->sq_flush_complete = true;
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index fdf4cc88cb91..075defaabee5 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -590,11 +590,14 @@ static int irdma_wait_event(struct irdma_pci_f *rf,
cqp_error = cqp_request->compl_info.error;
if (cqp_error) {
err_code = -EIO;
- if (cqp_request->compl_info.maj_err_code == 0xFFFF &&
- cqp_request->compl_info.min_err_code == 0x8029) {
- if (!rf->reset) {
- rf->reset = true;
- rf->gen_ops.request_reset(rf);
+ if (cqp_request->compl_info.maj_err_code == 0xFFFF) {
+ if (cqp_request->compl_info.min_err_code == 0x8002)
+ err_code = -EBUSY;
+ else if (cqp_request->compl_info.min_err_code == 0x8029) {
+ if (!rf->reset) {
+ rf->reset = true;
+ rf->gen_ops.request_reset(rf);
+ }
}
}
}
@@ -2598,7 +2601,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp)
spin_unlock_irqrestore(&iwqp->lock, flags2);
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
if (compl_generated)
- irdma_comp_handler(iwqp->iwrcq);
+ irdma_comp_handler(iwqp->iwscq);
} else {
spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 9b07b8af2997..9b207f5084eb 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -39,15 +39,18 @@ static int irdma_query_device(struct ib_device *ibdev,
props->max_send_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
props->max_recv_sge = hw_attrs->uk_attrs.max_hw_wq_frags;
props->max_cq = rf->max_cq - rf->used_cqs;
- props->max_cqe = rf->max_cqe;
+ props->max_cqe = rf->max_cqe - 1;
props->max_mr = rf->max_mr - rf->used_mrs;
props->max_mw = props->max_mr;
props->max_pd = rf->max_pd - rf->used_pds;
props->max_sge_rd = hw_attrs->uk_attrs.max_hw_read_sges;
props->max_qp_rd_atom = hw_attrs->max_hw_ird;
props->max_qp_init_rd_atom = hw_attrs->max_hw_ord;
- if (rdma_protocol_roce(ibdev, 1))
+ if (rdma_protocol_roce(ibdev, 1)) {
+ props->device_cap_flags |= IB_DEVICE_RC_RNR_NAK_GEN;
props->max_pkeys = IRDMA_PKEY_TBL_SZ;
+ }
+
props->max_ah = rf->max_ah;
props->max_mcast_grp = rf->max_mcg;
props->max_mcast_qp_attach = IRDMA_MAX_MGS_PER_CTX;
@@ -3009,6 +3012,7 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
struct irdma_pble_alloc *palloc = &iwpbl->pble_alloc;
struct irdma_cqp_request *cqp_request;
struct cqp_cmds_info *cqp_info;
+ int status;
if (iwmr->type != IRDMA_MEMREG_TYPE_MEM) {
if (iwmr->region) {
@@ -3039,8 +3043,11 @@ static int irdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
cqp_info->post_sq = 1;
cqp_info->in.u.dealloc_stag.dev = &iwdev->rf->sc_dev;
cqp_info->in.u.dealloc_stag.scratch = (uintptr_t)cqp_request;
- irdma_handle_cqp_op(iwdev->rf, cqp_request);
+ status = irdma_handle_cqp_op(iwdev->rf, cqp_request);
irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request);
+ if (status)
+ return status;
+
irdma_free_stag(iwdev, iwmr->stag);
done:
if (iwpbl->pbl_allocated)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 293ed709e5ed..b4dc52392275 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -166,6 +166,12 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
+ if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
+ /* set local port to one for Function-Per-Port HCA. */
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
+
/* Declaring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {};
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index fc94a1b25485..883d7c60143e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4336,7 +4336,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
dev->mdev = mdev;
dev->num_ports = num_ports;
- if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_init_enabled(mdev))
+ if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev))
profile = &raw_eth_profile;
else
profile = &pf_profile;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 2e2ad3918385..e66bf72f1f04 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -708,6 +708,7 @@ struct mlx5_ib_umr_context {
};
enum {
+ MLX5_UMR_STATE_UNINIT,
MLX5_UMR_STATE_ACTIVE,
MLX5_UMR_STATE_RECOVER,
MLX5_UMR_STATE_ERR,
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index e00b94d1b1ea..d5105b5c9979 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -177,6 +177,7 @@ int mlx5r_umr_resource_init(struct mlx5_ib_dev *dev)
sema_init(&dev->umrc.sem, MAX_UMR_WR);
mutex_init(&dev->umrc.lock);
+ dev->umrc.state = MLX5_UMR_STATE_ACTIVE;
return 0;
@@ -191,6 +192,8 @@ destroy_pd:
void mlx5r_umr_resource_cleanup(struct mlx5_ib_dev *dev)
{
+ if (dev->umrc.state == MLX5_UMR_STATE_UNINIT)
+ return;
ib_destroy_qp(dev->umrc.qp);
ib_free_cq(dev->umrc.cq);
ib_dealloc_pd(dev->umrc.pd);
diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
index 1f4e60257700..7d47b521070b 100644
--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
@@ -29,7 +29,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
if (paddr)
- return virt_to_page(paddr);
+ return virt_to_page((void *)paddr);
return NULL;
}
@@ -533,13 +533,23 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
kunmap_local(kaddr);
}
} else {
- u64 va = sge->laddr + sge_off;
+ /*
+ * Cast to an uintptr_t to preserve all 64 bits
+ * in sge->laddr.
+ */
+ uintptr_t va = (uintptr_t)(sge->laddr + sge_off);
- page_array[seg] = virt_to_page(va & PAGE_MASK);
+ /*
+ * virt_to_page() takes a (void *) pointer
+ * so cast to a (void *) meaning it will be 64
+ * bits on a 64 bit platform and 32 bits on a
+ * 32 bit platform.
+ */
+ page_array[seg] = virt_to_page((void *)(va & PAGE_MASK));
if (do_crc)
crypto_shash_update(
c_tx->mpa_crc_hd,
- (void *)(uintptr_t)va,
+ (void *)va,
plen);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index baecde41d126..449904dac0a9 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -1004,7 +1004,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
struct rtrs_clt_io_req *req,
struct rtrs_rbuf *rbuf, bool fr_en,
- u32 size, u32 imm, struct ib_send_wr *wr,
+ u32 count, u32 size, u32 imm,
+ struct ib_send_wr *wr,
struct ib_send_wr *tail)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
@@ -1024,12 +1025,12 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
num_sge = 2;
ptail = tail;
} else {
- for_each_sg(req->sglist, sg, req->sg_cnt, i) {
+ for_each_sg(req->sglist, sg, count, i) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
}
- num_sge = 1 + req->sg_cnt;
+ num_sge = 1 + count;
}
sge[i].addr = req->iu->dma_addr;
sge[i].length = size;
@@ -1142,7 +1143,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
*/
rtrs_clt_update_all_stats(req, WRITE);
- ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en,
+ ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
req->usr_len + sizeof(*msg),
imm, wr, &inv_wr);
if (ret) {
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 34c03bde5064..4894e7329d88 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -595,7 +595,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
struct sg_table *sgt = &srv_mr->sgt;
struct scatterlist *s;
struct ib_mr *mr;
- int nr, chunks;
+ int nr, nr_sgt, chunks;
chunks = chunks_per_mr * mri;
if (!always_invalidate)
@@ -610,19 +610,19 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
sg_set_page(s, srv->chunks[chunks + i],
max_chunk_size, 0);
- nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
+ nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
- if (nr < sgt->nents) {
- err = nr < 0 ? nr : -EINVAL;
+ if (!nr_sgt) {
+ err = -EINVAL;
goto free_sg;
}
mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
- sgt->nents);
+ nr_sgt);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto unmap_sg;
}
- nr = ib_map_mr_sg(mr, sgt->sgl, sgt->nents,
+ nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
NULL, max_chunk_size);
if (nr < 0 || nr < sgt->nents) {
err = nr < 0 ? nr : -EINVAL;
@@ -641,7 +641,7 @@ static int map_cont_bufs(struct rtrs_srv_path *srv_path)
}
}
/* Eventually dma addr for each chunk can be cached */
- for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
+ for_each_sg(sgt->sgl, s, nr_sgt, i)
srv_path->dma_addr[chunks + i] = sg_dma_address(s);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7720ea270ed8..d7f69e593a63 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1961,7 +1961,8 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
if (scmnd) {
req = scsi_cmd_priv(scmnd);
scmnd = srp_claim_req(ch, req, NULL, scmnd);
- } else {
+ }
+ if (!scmnd) {
shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
rsp->tag, ch - target->ch, ch->qp->qp_num);
diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c
index b2a68bc9f0b4..b86de1312512 100644
--- a/drivers/input/joystick/iforce/iforce-main.c
+++ b/drivers/input/joystick/iforce/iforce-main.c
@@ -50,6 +50,7 @@ static struct iforce_device iforce_device[] = {
{ 0x046d, 0xc291, "Logitech WingMan Formula Force", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x020a, "AVB Top Shot Pegasus", btn_joystick_avb, abs_avb_pegasus, ff_iforce },
{ 0x05ef, 0x8884, "AVB Mag Turbo Force", btn_wheel, abs_wheel, ff_iforce },
+ { 0x05ef, 0x8886, "Boeder Force Feedback Wheel", btn_wheel, abs_wheel, ff_iforce },
{ 0x05ef, 0x8888, "AVB Top Shot Force Feedback Racing Wheel", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc0a4, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce }, //?
{ 0x061c, 0xc084, "ACT LABS Force RS", btn_wheel, abs_wheel, ff_iforce },
diff --git a/drivers/input/joystick/iforce/iforce-serio.c b/drivers/input/joystick/iforce/iforce-serio.c
index f95a81b9fac7..2380546d7978 100644
--- a/drivers/input/joystick/iforce/iforce-serio.c
+++ b/drivers/input/joystick/iforce/iforce-serio.c
@@ -39,7 +39,7 @@ static void iforce_serio_xmit(struct iforce *iforce)
again:
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -64,7 +64,7 @@ again:
if (test_and_clear_bit(IFORCE_XMIT_AGAIN, iforce->xmit_flags))
goto again;
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
}
@@ -169,7 +169,7 @@ static irqreturn_t iforce_serio_irq(struct serio *serio,
iforce_serio->cmd_response_len = iforce_serio->len;
/* Signal that command is done */
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
} else if (likely(iforce->type)) {
iforce_process_packet(iforce, iforce_serio->id,
iforce_serio->data_in,
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index ea58805c480f..cba92bd590a8 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -30,7 +30,7 @@ static void __iforce_usb_xmit(struct iforce *iforce)
spin_lock_irqsave(&iforce->xmit_lock, flags);
if (iforce->xmit.head == iforce->xmit.tail) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ iforce_clear_xmit_and_wake(iforce);
spin_unlock_irqrestore(&iforce->xmit_lock, flags);
return;
}
@@ -58,9 +58,9 @@ static void __iforce_usb_xmit(struct iforce *iforce)
XMIT_INC(iforce->xmit.tail, n);
if ( (n=usb_submit_urb(iforce_usb->out, GFP_ATOMIC)) ) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_warn(&iforce_usb->intf->dev,
"usb_submit_urb failed %d\n", n);
+ iforce_clear_xmit_and_wake(iforce);
}
/* The IFORCE_XMIT_RUNNING bit is not cleared here. That's intended.
@@ -175,15 +175,15 @@ static void iforce_usb_out(struct urb *urb)
struct iforce *iforce = &iforce_usb->iforce;
if (urb->status) {
- clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
dev_dbg(&iforce_usb->intf->dev, "urb->status %d, exiting\n",
urb->status);
+ iforce_clear_xmit_and_wake(iforce);
return;
}
__iforce_usb_xmit(iforce);
- wake_up(&iforce->wait);
+ wake_up_all(&iforce->wait);
}
static int iforce_usb_probe(struct usb_interface *intf,
diff --git a/drivers/input/joystick/iforce/iforce.h b/drivers/input/joystick/iforce/iforce.h
index 6aa761ebbdf7..9ccb9107ccbe 100644
--- a/drivers/input/joystick/iforce/iforce.h
+++ b/drivers/input/joystick/iforce/iforce.h
@@ -119,6 +119,12 @@ static inline int iforce_get_id_packet(struct iforce *iforce, u8 id,
response_data, response_len);
}
+static inline void iforce_clear_xmit_and_wake(struct iforce *iforce)
+{
+ clear_bit(IFORCE_XMIT_RUNNING, iforce->xmit_flags);
+ wake_up_all(&iforce->wait);
+}
+
/* Public functions */
/* iforce-main.c */
int iforce_init_device(struct device *parent, u16 bustype,
diff --git a/drivers/input/misc/rk805-pwrkey.c b/drivers/input/misc/rk805-pwrkey.c
index 3fb64dbda1a2..76873aa005b4 100644
--- a/drivers/input/misc/rk805-pwrkey.c
+++ b/drivers/input/misc/rk805-pwrkey.c
@@ -98,6 +98,7 @@ static struct platform_driver rk805_pwrkey_driver = {
};
module_platform_driver(rk805_pwrkey_driver);
+MODULE_ALIAS("platform:rk805-pwrkey");
MODULE_AUTHOR("Joseph Chen <chenjh@rock-chips.com>");
MODULE_DESCRIPTION("RK805 PMIC Power Key driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index d016505fc081..21c0dddbe41d 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -95,6 +95,7 @@ static const struct goodix_chip_data gt9x_chip_data = {
static const struct goodix_chip_id goodix_chip_ids[] = {
{ .id = "1151", .data = &gt1x_chip_data },
+ { .id = "1158", .data = &gt1x_chip_data },
{ .id = "5663", .data = &gt1x_chip_data },
{ .id = "5688", .data = &gt1x_chip_data },
{ .id = "917S", .data = &gt1x_chip_data },
@@ -1508,6 +1509,7 @@ MODULE_DEVICE_TABLE(acpi, goodix_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt1151" },
+ { .compatible = "goodix,gt1158" },
{ .compatible = "goodix,gt5663" },
{ .compatible = "goodix,gt5688" },
{ .compatible = "goodix,gt911" },
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 65b8e4fd8217..828672a46a3d 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -939,7 +939,8 @@ static void build_completion_wait(struct iommu_cmd *cmd,
memset(cmd, 0, sizeof(*cmd));
cmd->data[0] = lower_32_bits(paddr) | CMD_COMPL_WAIT_STORE_MASK;
cmd->data[1] = upper_32_bits(paddr);
- cmd->data[2] = data;
+ cmd->data[2] = lower_32_bits(data);
+ cmd->data[3] = upper_32_bits(data);
CMD_SET_TYPE(cmd, CMD_COMPL_WAIT);
}
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index 696d5555be57..6a1f02c62dff 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -777,6 +777,8 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (dev_state->domain == NULL)
goto out_free_states;
+ /* See iommu_is_default_domain() */
+ dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
amd_iommu_domain_direct_map(dev_state->domain);
ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 5a8f780e7ffd..497c912ad9e1 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -2349,6 +2349,13 @@ static int dmar_device_hotplug(acpi_handle handle, bool insert)
if (!dmar_in_use())
return 0;
+ /*
+ * It's unlikely that any I/O board is hot added before the IOMMU
+ * subsystem is initialized.
+ */
+ if (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled)
+ return -EOPNOTSUPP;
+
if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
tmp = handle;
} else {
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 7cca030a508e..1f2cd43cf9bc 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -163,38 +163,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
-static inline void context_clear_pasid_enable(struct context_entry *context)
-{
- context->lo &= ~(1ULL << 11);
-}
-
-static inline bool context_pasid_enabled(struct context_entry *context)
-{
- return !!(context->lo & (1ULL << 11));
-}
-
-static inline void context_set_copied(struct context_entry *context)
-{
- context->hi |= (1ull << 3);
-}
-
-static inline bool context_copied(struct context_entry *context)
-{
- return !!(context->hi & (1ULL << 3));
-}
-
-static inline bool __context_present(struct context_entry *context)
-{
- return (context->lo & 1);
-}
-
-bool context_present(struct context_entry *context)
-{
- return context_pasid_enabled(context) ?
- __context_present(context) :
- __context_present(context) && !context_copied(context);
-}
-
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
@@ -242,6 +210,26 @@ static inline void context_clear_entry(struct context_entry *context)
context->hi = 0;
}
+static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ if (!iommu->copied_tables)
+ return false;
+
+ return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
+static inline void
+clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
+{
+ clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
+}
+
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
@@ -402,14 +390,36 @@ static inline int domain_pfn_supported(struct dmar_domain *domain,
return !(addr_width < BITS_PER_LONG && pfn >> addr_width);
}
+/*
+ * Calculate the Supported Adjusted Guest Address Widths of an IOMMU.
+ * Refer to 11.4.2 of the VT-d spec for the encoding of each bit of
+ * the returned SAGAW.
+ */
+static unsigned long __iommu_calculate_sagaw(struct intel_iommu *iommu)
+{
+ unsigned long fl_sagaw, sl_sagaw;
+
+ fl_sagaw = BIT(2) | (cap_fl1gp_support(iommu->cap) ? BIT(3) : 0);
+ sl_sagaw = cap_sagaw(iommu->cap);
+
+ /* Second level only. */
+ if (!sm_supported(iommu) || !ecap_flts(iommu->ecap))
+ return sl_sagaw;
+
+ /* First level only. */
+ if (!ecap_slts(iommu->ecap))
+ return fl_sagaw;
+
+ return fl_sagaw & sl_sagaw;
+}
+
static int __iommu_calculate_agaw(struct intel_iommu *iommu, int max_gaw)
{
unsigned long sagaw;
int agaw;
- sagaw = cap_sagaw(iommu->cap);
- for (agaw = width_to_agaw(max_gaw);
- agaw >= 0; agaw--) {
+ sagaw = __iommu_calculate_sagaw(iommu);
+ for (agaw = width_to_agaw(max_gaw); agaw >= 0; agaw--) {
if (test_bit(agaw, &sagaw))
break;
}
@@ -505,8 +515,9 @@ static int domain_update_device_node(struct dmar_domain *domain)
{
struct device_domain_info *info;
int nid = NUMA_NO_NODE;
+ unsigned long flags;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
/*
* There could possibly be multiple device numa nodes as devices
@@ -518,7 +529,7 @@ static int domain_update_device_node(struct dmar_domain *domain)
if (nid != NUMA_NO_NODE)
break;
}
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return nid;
}
@@ -578,6 +589,13 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
struct context_entry *context;
u64 *entry;
+ /*
+ * Except that the caller requested to allocate a new entry,
+ * returning a copied context entry makes no sense.
+ */
+ if (!alloc && context_copied(iommu, bus, devfn))
+ return NULL;
+
entry = &root->lo;
if (sm_supported(iommu)) {
if (devfn >= 0x80) {
@@ -795,32 +813,11 @@ static void free_context_table(struct intel_iommu *iommu)
}
#ifdef CONFIG_DMAR_DEBUG
-static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn, u8 bus, u8 devfn)
+static void pgtable_walk(struct intel_iommu *iommu, unsigned long pfn,
+ u8 bus, u8 devfn, struct dma_pte *parent, int level)
{
- struct device_domain_info *info;
- struct dma_pte *parent, *pte;
- struct dmar_domain *domain;
- struct pci_dev *pdev;
- int offset, level;
-
- pdev = pci_get_domain_bus_and_slot(iommu->segment, bus, devfn);
- if (!pdev)
- return;
-
- info = dev_iommu_priv_get(&pdev->dev);
- if (!info || !info->domain) {
- pr_info("device [%02x:%02x.%d] not probed\n",
- bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- return;
- }
-
- domain = info->domain;
- level = agaw_to_level(domain->agaw);
- parent = domain->pgd;
- if (!parent) {
- pr_info("no page table setup\n");
- return;
- }
+ struct dma_pte *pte;
+ int offset;
while (1) {
offset = pfn_level_offset(pfn, level);
@@ -847,9 +844,10 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
struct pasid_entry *entries, *pte;
struct context_entry *ctx_entry;
struct root_entry *rt_entry;
+ int i, dir_index, index, level;
u8 devfn = source_id & 0xff;
u8 bus = source_id >> 8;
- int i, dir_index, index;
+ struct dma_pte *pgtable;
pr_info("Dump %s table entries for IOVA 0x%llx\n", iommu->name, addr);
@@ -877,8 +875,11 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
ctx_entry->hi, ctx_entry->lo);
/* legacy mode does not require PASID entries */
- if (!sm_supported(iommu))
+ if (!sm_supported(iommu)) {
+ level = agaw_to_level(ctx_entry->hi & 7);
+ pgtable = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
goto pgtable_walk;
+ }
/* get the pointer to pasid directory entry */
dir = phys_to_virt(ctx_entry->lo & VTD_PAGE_MASK);
@@ -905,8 +906,16 @@ void dmar_fault_dump_ptes(struct intel_iommu *iommu, u16 source_id,
for (i = 0; i < ARRAY_SIZE(pte->val); i++)
pr_info("pasid table entry[%d]: 0x%016llx\n", i, pte->val[i]);
+ if (pasid_pte_get_pgtt(pte) == PASID_ENTRY_PGTT_FL_ONLY) {
+ level = pte->val[2] & BIT_ULL(2) ? 5 : 4;
+ pgtable = phys_to_virt(pte->val[2] & VTD_PAGE_MASK);
+ } else {
+ level = agaw_to_level((pte->val[0] >> 2) & 0x7);
+ pgtable = phys_to_virt(pte->val[0] & VTD_PAGE_MASK);
+ }
+
pgtable_walk:
- pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn);
+ pgtable_walk(iommu, addr >> VTD_PAGE_SHIFT, bus, devfn, pgtable, level);
}
#endif
@@ -1345,19 +1354,20 @@ iommu_support_dev_iotlb(struct dmar_domain *domain, struct intel_iommu *iommu,
u8 bus, u8 devfn)
{
struct device_domain_info *info;
+ unsigned long flags;
if (!iommu->qi)
return NULL;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->iommu == iommu && info->bus == bus &&
info->devfn == devfn) {
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return info->ats_supported ? info : NULL;
}
}
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
return NULL;
}
@@ -1366,8 +1376,9 @@ static void domain_update_iotlb(struct dmar_domain *domain)
{
struct device_domain_info *info;
bool has_iotlb_device = false;
+ unsigned long flags;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (info->ats_enabled) {
has_iotlb_device = true;
@@ -1375,7 +1386,7 @@ static void domain_update_iotlb(struct dmar_domain *domain)
}
}
domain->has_iotlb_device = has_iotlb_device;
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
@@ -1467,14 +1478,15 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
struct device_domain_info *info;
+ unsigned long flags;
if (!domain->has_iotlb_device)
return;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_for_each_entry(info, &domain->devices, link)
__iommu_flush_dev_iotlb(info, addr, mask);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
}
static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
@@ -1688,6 +1700,11 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
iommu->domain_ids = NULL;
}
+ if (iommu->copied_tables) {
+ bitmap_free(iommu->copied_tables);
+ iommu->copied_tables = NULL;
+ }
+
/* free context mapping */
free_context_table(iommu);
@@ -1913,7 +1930,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
goto out_unlock;
ret = 0;
- if (context_present(context))
+ if (context_present(context) && !context_copied(iommu, bus, devfn))
goto out_unlock;
/*
@@ -1925,7 +1942,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
* in-flight DMA will exist, and we don't need to worry anymore
* hereafter.
*/
- if (context_copied(context)) {
+ if (context_copied(iommu, bus, devfn)) {
u16 did_old = context_domain_id(context);
if (did_old < cap_ndoms(iommu->cap)) {
@@ -1936,6 +1953,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
DMA_TLB_DSI_FLUSH);
}
+
+ clear_context_copied(iommu, bus, devfn);
}
context_clear_entry(context);
@@ -2429,6 +2448,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
{
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct intel_iommu *iommu;
+ unsigned long flags;
u8 bus, devfn;
int ret;
@@ -2440,9 +2460,9 @@ static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev)
if (ret)
return ret;
info->domain = domain;
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_add(&info->link, &domain->devices);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
@@ -2684,32 +2704,14 @@ static int copy_context_table(struct intel_iommu *iommu,
/* Now copy the context entry */
memcpy(&ce, old_ce + idx, sizeof(ce));
- if (!__context_present(&ce))
+ if (!context_present(&ce))
continue;
did = context_domain_id(&ce);
if (did >= 0 && did < cap_ndoms(iommu->cap))
set_bit(did, iommu->domain_ids);
- /*
- * We need a marker for copied context entries. This
- * marker needs to work for the old format as well as
- * for extended context entries.
- *
- * Bit 67 of the context entry is used. In the old
- * format this bit is available to software, in the
- * extended format it is the PGE bit, but PGE is ignored
- * by HW if PASIDs are disabled (and thus still
- * available).
- *
- * So disable PASIDs first and then mark the entry
- * copied. This means that we don't copy PASID
- * translations from the old kernel, but this is fine as
- * faults there are not fatal.
- */
- context_clear_pasid_enable(&ce);
- context_set_copied(&ce);
-
+ set_context_copied(iommu, bus, devfn);
new_ce[idx] = ce;
}
@@ -2735,8 +2737,8 @@ static int copy_translation_tables(struct intel_iommu *iommu)
bool new_ext, ext;
rtaddr_reg = dmar_readq(iommu->reg + DMAR_RTADDR_REG);
- ext = !!(rtaddr_reg & DMA_RTADDR_RTT);
- new_ext = !!ecap_ecs(iommu->ecap);
+ ext = !!(rtaddr_reg & DMA_RTADDR_SMT);
+ new_ext = !!sm_supported(iommu);
/*
* The RTT bit can only be changed when translation is disabled,
@@ -2747,6 +2749,10 @@ static int copy_translation_tables(struct intel_iommu *iommu)
if (new_ext != ext)
return -EINVAL;
+ iommu->copied_tables = bitmap_zalloc(BIT_ULL(16), GFP_KERNEL);
+ if (!iommu->copied_tables)
+ return -ENOMEM;
+
old_rt_phys = rtaddr_reg & VTD_PAGE_MASK;
if (!old_rt_phys)
return -EINVAL;
@@ -3013,13 +3019,7 @@ static int __init init_dmars(void)
#ifdef CONFIG_INTEL_IOMMU_SVM
if (pasid_supported(iommu) && ecap_prs(iommu->ecap)) {
- /*
- * Call dmar_alloc_hwirq() with dmar_global_lock held,
- * could cause possible lock race condition.
- */
- up_write(&dmar_global_lock);
ret = intel_svm_enable_prq(iommu);
- down_write(&dmar_global_lock);
if (ret)
goto free_iommu;
}
@@ -3932,7 +3932,6 @@ int __init intel_iommu_init(void)
force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
platform_optin_force_iommu();
- down_write(&dmar_global_lock);
if (dmar_table_init()) {
if (force_on)
panic("tboot: Failed to initialize DMAR table\n");
@@ -3945,16 +3944,6 @@ int __init intel_iommu_init(void)
goto out_free_dmar;
}
- up_write(&dmar_global_lock);
-
- /*
- * The bus notifier takes the dmar_global_lock, so lockdep will
- * complain later when we register it under the lock.
- */
- dmar_register_bus_notifier();
-
- down_write(&dmar_global_lock);
-
if (!no_iommu)
intel_iommu_debugfs_init();
@@ -3999,11 +3988,9 @@ int __init intel_iommu_init(void)
pr_err("Initialization failed\n");
goto out_free_dmar;
}
- up_write(&dmar_global_lock);
init_iommu_pm_ops();
- down_read(&dmar_global_lock);
for_each_active_iommu(iommu, drhd) {
/*
* The flush queue implementation does not perform
@@ -4021,13 +4008,11 @@ int __init intel_iommu_init(void)
"%s", iommu->name);
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
}
- up_read(&dmar_global_lock);
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
- down_read(&dmar_global_lock);
if (probe_acpi_namespace_devices())
pr_warn("ACPI name space devices didn't probe correctly\n");
@@ -4038,17 +4023,15 @@ int __init intel_iommu_init(void)
iommu_disable_protect_mem_regions(iommu);
}
- up_read(&dmar_global_lock);
-
- pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
intel_iommu_enabled = 1;
+ dmar_register_bus_notifier();
+ pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
return 0;
out_free_dmar:
intel_iommu_free_dmars();
- up_write(&dmar_global_lock);
return ret;
}
@@ -4080,6 +4063,7 @@ static void dmar_remove_one_dev_info(struct device *dev)
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *domain = info->domain;
struct intel_iommu *iommu = info->iommu;
+ unsigned long flags;
if (!dev_is_real_dma_subdevice(info->dev)) {
if (dev_is_pci(info->dev) && sm_supported(iommu))
@@ -4091,9 +4075,9 @@ static void dmar_remove_one_dev_info(struct device *dev)
intel_pasid_free_table(info->dev);
}
- spin_lock(&domain->lock);
+ spin_lock_irqsave(&domain->lock, flags);
list_del(&info->link);
- spin_unlock(&domain->lock);
+ spin_unlock_irqrestore(&domain->lock, flags);
domain_detach_iommu(domain, iommu);
info->domain = NULL;
@@ -4412,19 +4396,20 @@ static void domain_set_force_snooping(struct dmar_domain *domain)
static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
{
struct dmar_domain *dmar_domain = to_dmar_domain(domain);
+ unsigned long flags;
if (dmar_domain->force_snooping)
return true;
- spin_lock(&dmar_domain->lock);
+ spin_lock_irqsave(&dmar_domain->lock, flags);
if (!domain_support_force_snooping(dmar_domain)) {
- spin_unlock(&dmar_domain->lock);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return false;
}
domain_set_force_snooping(dmar_domain);
dmar_domain->force_snooping = true;
- spin_unlock(&dmar_domain->lock);
+ spin_unlock_irqrestore(&dmar_domain->lock, flags);
return true;
}
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index fae45bbb0c7f..74b0e19e23ee 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -197,7 +197,6 @@
#define ecap_dis(e) (((e) >> 27) & 0x1)
#define ecap_nest(e) (((e) >> 26) & 0x1)
#define ecap_mts(e) (((e) >> 25) & 0x1)
-#define ecap_ecs(e) (((e) >> 24) & 0x1)
#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16)
#define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
#define ecap_coherent(e) ((e) & 0x1)
@@ -265,7 +264,6 @@
#define DMA_GSTS_CFIS (((u32)1) << 23)
/* DMA_RTADDR_REG */
-#define DMA_RTADDR_RTT (((u64)1) << 11)
#define DMA_RTADDR_SMT (((u64)1) << 10)
/* CCMD_REG */
@@ -579,6 +577,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
+ unsigned long *copied_tables; /* bitmap of copied tables */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
@@ -701,6 +700,11 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte)
(struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
}
+static inline bool context_present(struct context_entry *context)
+{
+ return (context->lo & 1);
+}
+
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_enable_qi(struct intel_iommu *iommu);
@@ -784,7 +788,6 @@ static inline void intel_iommu_debugfs_init(void) {}
#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
extern const struct attribute_group *intel_iommu_groups[];
-bool context_present(struct context_entry *context);
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 780fb7071577..3a808146b50f 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -3076,6 +3076,24 @@ out:
return ret;
}
+static bool iommu_is_default_domain(struct iommu_group *group)
+{
+ if (group->domain == group->default_domain)
+ return true;
+
+ /*
+ * If the default domain was set to identity and it is still an identity
+ * domain then we consider this a pass. This happens because of
+ * amd_iommu_init_device() replacing the default idenytity domain with an
+ * identity domain that has a different configuration for AMDGPU.
+ */
+ if (group->default_domain &&
+ group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
+ group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
+ return true;
+ return false;
+}
+
/**
* iommu_device_use_default_domain() - Device driver wants to handle device
* DMA through the kernel DMA API.
@@ -3094,8 +3112,7 @@ int iommu_device_use_default_domain(struct device *dev)
mutex_lock(&group->mutex);
if (group->owner_cnt) {
- if (group->domain != group->default_domain ||
- group->owner) {
+ if (group->owner || !iommu_is_default_domain(group)) {
ret = -EBUSY;
goto unlock_out;
}
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 41f4eb005219..5696314ae69e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -40,7 +40,7 @@ static int of_iommu_xlate(struct device *dev,
* a proper probe-ordering dependency mechanism in future.
*/
if (!ops)
- return -ENODEV;
+ return driver_deferred_probe_check_state(dev);
if (!try_module_get(ops->owner))
return -ENODEV;
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 08eeafc9529f..80151176ba12 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1006,7 +1006,18 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
+static bool viommu_capable(enum iommu_cap cap)
+{
+ switch (cap) {
+ case IOMMU_CAP_CACHE_COHERENCY:
+ return true;
+ default:
+ return false;
+ }
+}
+
static struct iommu_ops viommu_ops = {
+ .capable = viommu_capable,
.domain_alloc = viommu_domain_alloc,
.probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize,
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 0834d5f866fd..39d2b03e2631 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1416,42 +1416,37 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
{
int ret;
struct device *dev = ir->dev;
- char *data;
-
- data = kzalloc(USB_CTRL_MSG_SZ, GFP_KERNEL);
- if (!data) {
- dev_err(dev, "%s: memory allocation failed!", __func__);
- return;
- }
+ char data[USB_CTRL_MSG_SZ];
/*
* This is a strange one. Windows issues a set address to the device
* on the receive control pipe and expect a certain value pair back
*/
- ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
- USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
- data, USB_CTRL_MSG_SZ, 3000);
+ ret = usb_control_msg_recv(ir->usbdev, 0, USB_REQ_SET_ADDRESS,
+ USB_DIR_IN | USB_TYPE_VENDOR,
+ 0, 0, data, USB_CTRL_MSG_SZ, 3000,
+ GFP_KERNEL);
dev_dbg(dev, "set address - ret = %d", ret);
dev_dbg(dev, "set address - data[0] = %d, data[1] = %d",
data[0], data[1]);
/* set feature: bit rate 38400 bps */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
- 0xc04e, 0x0000, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
+ 0xc04e, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set feature - ret = %d", ret);
/* bRequest 4: set char length to 8 bits */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- 4, USB_TYPE_VENDOR,
- 0x0808, 0x0000, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ 4, USB_TYPE_VENDOR,
+ 0x0808, 0x0000, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set char length - retB = %d", ret);
/* bRequest 2: set handshaking to use DTR/DSR */
- ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
- 2, USB_TYPE_VENDOR,
- 0x0000, 0x0100, NULL, 0, 3000);
+ ret = usb_control_msg_send(ir->usbdev, 0,
+ 2, USB_TYPE_VENDOR,
+ 0x0000, 0x0100, NULL, 0, 3000, GFP_KERNEL);
dev_dbg(dev, "set handshake - retC = %d", ret);
/* device resume */
@@ -1459,8 +1454,6 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
/* get hw/sw revision? */
mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
-
- kfree(data);
}
static void mceusb_gen2_init(struct mceusb_dev *ir)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 93ebd174d848..5d9e3483b89d 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -25,7 +25,7 @@
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
-#define FASTRPC_MAX_SESSIONS 13 /*12 compute, 1 cpz*/
+#define FASTRPC_MAX_SESSIONS 14
#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
#define FASTRPC_MAX_FDLIST 16
@@ -1943,7 +1943,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
spin_lock_irqsave(&cctx->lock, flags);
- sess = &cctx->session[cctx->sesscount];
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
+ dev_err(&pdev->dev, "too many sessions\n");
+ spin_unlock_irqrestore(&cctx->lock, flags);
+ return -ENOSPC;
+ }
+ sess = &cctx->session[cctx->sesscount++];
sess->used = false;
sess->valid = true;
sess->dev = dev;
@@ -1956,13 +1961,12 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
struct fastrpc_session_ctx *dup_sess;
for (i = 1; i < sessions; i++) {
- if (cctx->sesscount++ >= FASTRPC_MAX_SESSIONS)
+ if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
break;
- dup_sess = &cctx->session[cctx->sesscount];
+ dup_sess = &cctx->session[cctx->sesscount++];
memcpy(dup_sess, sess, sizeof(*dup_sess));
}
}
- cctx->sesscount++;
spin_unlock_irqrestore(&cctx->lock, flags);
rc = dma_set_mask(dev, DMA_BIT_MASK(32));
if (rc) {
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index cee4c0b59f43..06aa62ce0ed1 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -949,16 +949,17 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/* Erase init depends on CSD and SSR */
mmc_init_erase(card);
-
- /*
- * Fetch switch information from card.
- */
- err = mmc_read_switch(card);
- if (err)
- return err;
}
/*
+ * Fetch switch information from card. Note, sd3_bus_mode can change if
+ * voltage switch outcome changes, so do this always.
+ */
+ err = mmc_read_switch(card);
+ if (err)
+ return err;
+
+ /*
* For SPI, enable CRC as appropriate.
* This CRC enable is located AFTER the reading of the
* card registers because some SDHC cards are not able
@@ -1480,26 +1481,15 @@ retry:
if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) &&
mmc_sd_card_using_v18(card) &&
host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) {
- /*
- * Re-read switch information in case it has changed since
- * oldcard was initialized.
- */
- if (oldcard) {
- err = mmc_read_switch(card);
- if (err)
- goto free_card;
- }
- if (mmc_sd_card_using_v18(card)) {
- if (mmc_host_set_uhs_voltage(host) ||
- mmc_sd_init_uhs_card(card)) {
- v18_fixup_failed = true;
- mmc_power_cycle(host, ocr);
- if (!oldcard)
- mmc_remove_card(card);
- goto retry;
- }
- goto done;
+ if (mmc_host_set_uhs_voltage(host) ||
+ mmc_sd_init_uhs_card(card)) {
+ v18_fixup_failed = true;
+ mmc_power_cycle(host, ocr);
+ if (!oldcard)
+ mmc_remove_card(card);
+ goto retry;
}
+ goto cont;
}
/* Initialization sequence for UHS-I cards */
@@ -1534,7 +1524,7 @@ retry:
mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
}
}
-
+cont:
if (!oldcard) {
/* Read/parse the extension registers. */
err = sd_read_ext_regs(card);
@@ -1566,7 +1556,7 @@ retry:
err = -EINVAL;
goto free_card;
}
-done:
+
host->card = card;
return 0;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2f4da2c13c0a..5c2febe94428 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3167,6 +3167,9 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
found:
if (!ipv6_dev_get_saddr(dev_net(dst->dev), dst->dev, &targets[i], 0, &saddr))
bond_ns_send(slave, &targets[i], &saddr, tags);
+ else
+ bond_ns_send(slave, &targets[i], &in6addr_any, tags);
+
dst_release(dst);
kfree(tags);
}
@@ -3198,12 +3201,19 @@ static bool bond_has_this_ip6(struct bonding *bond, struct in6_addr *addr)
return ret;
}
-static void bond_validate_ns(struct bonding *bond, struct slave *slave,
+static void bond_validate_na(struct bonding *bond, struct slave *slave,
struct in6_addr *saddr, struct in6_addr *daddr)
{
int i;
- if (ipv6_addr_any(saddr) || !bond_has_this_ip6(bond, daddr)) {
+ /* Ignore NAs that:
+ * 1. Source address is unspecified address.
+ * 2. Dest address is neither all-nodes multicast address nor
+ * exist on bond interface.
+ */
+ if (ipv6_addr_any(saddr) ||
+ (!ipv6_addr_equal(daddr, &in6addr_linklocal_allnodes) &&
+ !bond_has_this_ip6(bond, daddr))) {
slave_dbg(bond->dev, slave->dev, "%s: sip %pI6c tip %pI6c not found\n",
__func__, saddr, daddr);
return;
@@ -3246,14 +3256,14 @@ static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond,
* see bond_arp_rcv().
*/
if (bond_is_active_slave(slave))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
else if (curr_arp_slave &&
bond_time_in_interval(bond, slave_last_tx(curr_arp_slave), 1))
- bond_validate_ns(bond, slave, saddr, daddr);
+ bond_validate_na(bond, slave, saddr, daddr);
out:
return RX_HANDLER_ANOTHER;
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 6bd69a7e6809..872aba63e7d4 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -170,6 +170,13 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.exit = ksz8_switch_exit,
};
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause);
+
static const struct ksz_dev_ops ksz9477_dev_ops = {
.setup = ksz9477_setup,
.get_port_addr = ksz9477_get_port_addr,
@@ -196,6 +203,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
.max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = ksz9477_config_cpu_port,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = ksz9477_reset_switch,
@@ -230,6 +238,7 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.mdb_del = ksz9477_mdb_del,
.change_mtu = lan937x_change_mtu,
.max_mtu = ksz9477_max_mtu,
+ .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
.enable_stp_addr = ksz9477_enable_stp_addr,
.reset = lan937x_reset_switch,
@@ -1656,13 +1665,13 @@ static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
}
-static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause, bool rx_pause)
+static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
{
- struct ksz_device *dev = ds->priv;
struct ksz_port *p;
p = &dev->ports[port];
@@ -1676,6 +1685,15 @@ static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
ksz_port_set_xmii_speed(dev, port, speed);
ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
+}
+
+static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct ksz_device *dev = ds->priv;
if (dev->dev_ops->phylink_mac_link_up)
dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface,
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 1cdce8a98d1d..f8f19a85744c 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -22,6 +22,7 @@
#define VSC9959_NUM_PORTS 6
#define VSC9959_TAS_GCL_ENTRY_MAX 63
+#define VSC9959_TAS_MIN_GATE_LEN_NS 33
#define VSC9959_VCAP_POLICER_BASE 63
#define VSC9959_VCAP_POLICER_MAX 383
#define VSC9959_SWITCH_PCI_BAR 4
@@ -1478,6 +1479,23 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
mdiobus_free(felix->imdio);
}
+/* The switch considers any frame (regardless of size) as eligible for
+ * transmission if the traffic class gate is open for at least 33 ns.
+ * Overruns are prevented by cropping an interval at the end of the gate time
+ * slot for which egress scheduling is blocked, but we need to still keep 33 ns
+ * available for one packet to be transmitted, otherwise the port tc will hang.
+ * This function returns the size of a gate interval that remains available for
+ * setting the guard band, after reserving the space for one egress frame.
+ */
+static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns)
+{
+ /* Gate always open */
+ if (gate_len_ns == U64_MAX)
+ return U64_MAX;
+
+ return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC;
+}
+
/* Extract shortest continuous gate open intervals in ns for each traffic class
* of a cyclic tc-taprio schedule. If a gate is always open, the duration is
* considered U64_MAX. If the gate is always closed, it is considered 0.
@@ -1539,6 +1557,65 @@ static void vsc9959_tas_min_gate_lengths(struct tc_taprio_qopt_offload *taprio,
min_gate_len[tc] = 0;
}
+/* ocelot_write_rix is a macro that concatenates QSYS_MAXSDU_CFG_* with _RSZ,
+ * so we need to spell out the register access to each traffic class in helper
+ * functions, to simplify callers
+ */
+static void vsc9959_port_qmaxsdu_set(struct ocelot *ocelot, int port, int tc,
+ u32 max_sdu)
+{
+ switch (tc) {
+ case 0:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
+ port);
+ break;
+ case 1:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
+ port);
+ break;
+ case 2:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
+ port);
+ break;
+ case 3:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
+ port);
+ break;
+ case 4:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
+ port);
+ break;
+ case 5:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
+ port);
+ break;
+ case 6:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
+ port);
+ break;
+ case 7:
+ ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
+ port);
+ break;
+ }
+}
+
+static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
+{
+ switch (tc) {
+ case 0: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_0, port);
+ case 1: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_1, port);
+ case 2: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_2, port);
+ case 3: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_3, port);
+ case 4: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_4, port);
+ case 5: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_5, port);
+ case 6: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_6, port);
+ case 7: return ocelot_read_rix(ocelot, QSYS_QMAXSDU_CFG_7, port);
+ default:
+ return 0;
+ }
+}
+
/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
* switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
* values (the default value is 1518). Also, for traffic class windows smaller
@@ -1595,11 +1672,16 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
vsc9959_tas_min_gate_lengths(ocelot_port->taprio, min_gate_len);
+ mutex_lock(&ocelot->fwd_domain_lock);
+
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u64 remaining_gate_len_ps;
u32 max_sdu;
- if (min_gate_len[tc] == U64_MAX /* Gate always open */ ||
- min_gate_len[tc] * PSEC_PER_NSEC > needed_bit_time_ps) {
+ remaining_gate_len_ps =
+ vsc9959_tas_remaining_gate_len_ps(min_gate_len[tc]);
+
+ if (remaining_gate_len_ps > needed_bit_time_ps) {
/* Setting QMAXSDU_CFG to 0 disables oversized frame
* dropping.
*/
@@ -1612,9 +1694,15 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
/* If traffic class doesn't support a full MTU sized
* frame, make sure to enable oversize frame dropping
* for frames larger than the smallest that would fit.
+ *
+ * However, the exact same register, QSYS_QMAXSDU_CFG_*,
+ * controls not only oversized frame dropping, but also
+ * per-tc static guard band lengths, so it reduces the
+ * useful gate interval length. Therefore, be careful
+ * to calculate a guard band (and therefore max_sdu)
+ * that still leaves 33 ns available in the time slot.
*/
- max_sdu = div_u64(min_gate_len[tc] * PSEC_PER_NSEC,
- picos_per_byte);
+ max_sdu = div_u64(remaining_gate_len_ps, picos_per_byte);
/* A TC gate may be completely closed, which is a
* special case where all packets are oversized.
* Any limit smaller than 64 octets accomplishes this
@@ -1637,47 +1725,14 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
max_sdu);
}
- /* ocelot_write_rix is a macro that concatenates
- * QSYS_MAXSDU_CFG_* with _RSZ, so we need to spell out
- * the writes to each traffic class
- */
- switch (tc) {
- case 0:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_0,
- port);
- break;
- case 1:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_1,
- port);
- break;
- case 2:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_2,
- port);
- break;
- case 3:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_3,
- port);
- break;
- case 4:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_4,
- port);
- break;
- case 5:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_5,
- port);
- break;
- case 6:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_6,
- port);
- break;
- case 7:
- ocelot_write_rix(ocelot, max_sdu, QSYS_QMAXSDU_CFG_7,
- port);
- break;
- }
+ vsc9959_port_qmaxsdu_set(ocelot, port, tc, max_sdu);
}
ocelot_write_rix(ocelot, maxlen, QSYS_PORT_MAX_SDU, port);
+
+ ocelot->ops->cut_through_fwd(ocelot);
+
+ mutex_unlock(&ocelot->fwd_domain_lock);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
@@ -1704,13 +1759,13 @@ static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
break;
}
+ mutex_lock(&ocelot->tas_lock);
+
ocelot_rmw_rix(ocelot,
QSYS_TAG_CONFIG_LINK_SPEED(tas_speed),
QSYS_TAG_CONFIG_LINK_SPEED_M,
QSYS_TAG_CONFIG, port);
- mutex_lock(&ocelot->tas_lock);
-
if (ocelot_port->taprio)
vsc9959_tas_guard_bands_update(ocelot, port);
@@ -2770,7 +2825,7 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_switch *ds = felix->ds;
- int port, other_port;
+ int tc, port, other_port;
lockdep_assert_held(&ocelot->fwd_domain_lock);
@@ -2814,19 +2869,27 @@ static void vsc9959_cut_through_fwd(struct ocelot *ocelot)
min_speed = other_ocelot_port->speed;
}
- /* Enable cut-through forwarding for all traffic classes. */
- if (ocelot_port->speed == min_speed)
+ /* Enable cut-through forwarding for all traffic classes that
+ * don't have oversized dropping enabled, since this check is
+ * bypassed in cut-through mode.
+ */
+ if (ocelot_port->speed == min_speed) {
val = GENMASK(7, 0);
+ for (tc = 0; tc < OCELOT_NUM_TC; tc++)
+ if (vsc9959_port_qmaxsdu_get(ocelot, port, tc))
+ val &= ~BIT(tc);
+ }
+
set:
tmp = ocelot_read_rix(ocelot, ANA_CUT_THRU_CFG, port);
if (tmp == val)
continue;
dev_dbg(ocelot->dev,
- "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding\n",
+ "port %d fwd mask 0x%lx speed %d min_speed %d, %s cut-through forwarding on TC mask 0x%x\n",
port, mask, ocelot_port->speed, min_speed,
- val ? "enabling" : "disabling");
+ val ? "enabling" : "disabling", val);
ocelot_write_rix(ocelot, val, ANA_CUT_THRU_CFG, port);
}
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index 1d3e7782a71f..c181346388a4 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -1889,9 +1889,9 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (!priv)
return -ENOMEM;
- priv->info = of_device_get_match_data(priv->dev);
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
+ priv->info = of_device_get_match_data(priv->dev);
priv->reset_gpio = devm_gpiod_get_optional(priv->dev, "reset",
GPIOD_ASIS);
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 3887ed33c5fe..fa622639d640 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -109,6 +109,7 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
{
struct xrs700x_port *p = &priv->ports[port];
struct rtnl_link_stats64 stats;
+ unsigned long flags;
int i;
memset(&stats, 0, sizeof(stats));
@@ -138,9 +139,9 @@ static void xrs700x_read_port_counters(struct xrs700x *priv, int port)
*/
stats.rx_packets += stats.multicast;
- u64_stats_update_begin(&p->syncp);
+ flags = u64_stats_update_begin_irqsave(&p->syncp);
p->stats64 = stats;
- u64_stats_update_end(&p->syncp);
+ u64_stats_update_end_irqrestore(&p->syncp, flags);
mutex_unlock(&p->mib_mutex);
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index db1e9d810b41..89889d8150da 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18076,16 +18076,20 @@ static void tg3_shutdown(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
+ tg3_reset_task_cancel(tp);
+
rtnl_lock();
+
netif_device_detach(dev);
if (netif_running(dev))
dev_close(dev);
- if (system_state == SYSTEM_POWER_OFF)
- tg3_power_down(tp);
+ tg3_power_down(tp);
rtnl_unlock();
+
+ pci_disable_device(pdev);
}
/**
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 9e6de2f968fa..6dae768671e3 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1919,7 +1919,7 @@ static void gmac_get_stats64(struct net_device *netdev,
/* Racing with RX NAPI */
do {
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
stats->rx_packets = port->stats.rx_packets;
stats->rx_bytes = port->stats.rx_bytes;
@@ -1931,11 +1931,11 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_crc_errors = port->stats.rx_crc_errors;
stats->rx_frame_errors = port->stats.rx_frame_errors;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
/* Racing with MIB and TX completion interrupts */
do {
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
stats->tx_errors = port->stats.tx_errors;
stats->tx_packets = port->stats.tx_packets;
@@ -1945,15 +1945,15 @@ static void gmac_get_stats64(struct net_device *netdev,
stats->rx_missed_errors = port->stats.rx_missed_errors;
stats->rx_fifo_errors = port->stats.rx_fifo_errors;
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
/* Racing with hard_start_xmit */
do {
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
stats->tx_dropped = port->stats.tx_dropped;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
stats->rx_dropped += stats->rx_missed_errors;
}
@@ -2031,18 +2031,18 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
/* Racing with MIB interrupt */
do {
p = values;
- start = u64_stats_fetch_begin(&port->ir_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->ir_stats_syncp);
for (i = 0; i < RX_STATS_NUM; i++)
*p++ = port->hw_stats[i];
- } while (u64_stats_fetch_retry(&port->ir_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->ir_stats_syncp, start));
values = p;
/* Racing with RX NAPI */
do {
p = values;
- start = u64_stats_fetch_begin(&port->rx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->rx_stats_syncp);
for (i = 0; i < RX_STATUS_NUM; i++)
*p++ = port->rx_stats[i];
@@ -2050,13 +2050,13 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*p++ = port->rx_csum_stats[i];
*p++ = port->rx_napi_exits;
- } while (u64_stats_fetch_retry(&port->rx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->rx_stats_syncp, start));
values = p;
/* Racing with TX start_xmit */
do {
p = values;
- start = u64_stats_fetch_begin(&port->tx_stats_syncp);
+ start = u64_stats_fetch_begin_irq(&port->tx_stats_syncp);
for (i = 0; i < TX_MAX_FRAGS; i++) {
*values++ = port->tx_frag_stats[i];
@@ -2065,7 +2065,7 @@ static void gmac_get_ethtool_stats(struct net_device *netdev,
*values++ = port->tx_frags_linearized;
*values++ = port->tx_hw_csummed;
- } while (u64_stats_fetch_retry(&port->tx_stats_syncp, start));
+ } while (u64_stats_fetch_retry_irq(&port->tx_stats_syncp, start));
}
static int gmac_get_ksettings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 0cebe4b63adb..d77ee8936c6a 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -16,6 +16,7 @@
#include <linux/clocksource.h>
#include <linux/net_tstamp.h>
+#include <linux/pm_qos.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
@@ -498,6 +499,9 @@ struct bufdesc_ex {
/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */
#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22)
+/* i.MX6Q adds pm_qos support */
+#define FEC_QUIRK_HAS_PMQOS BIT(23)
+
struct bufdesc_prop {
int qid;
/* Address of Rx and Tx buffers */
@@ -557,7 +561,6 @@ struct fec_enet_private {
struct clk *clk_2x_txclk;
bool ptp_clk_on;
- struct mutex ptp_clk_mutex;
unsigned int num_tx_queues;
unsigned int num_rx_queues;
@@ -608,6 +611,7 @@ struct fec_enet_private {
struct delayed_work time_keep;
struct regulator *reg_phy;
struct fec_stop_mode_gpr stop_gpr;
+ struct pm_qos_request pm_qos_req;
unsigned int tx_align;
unsigned int rx_align;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index b0d60f898249..6152f6dbf1bc 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -111,7 +111,8 @@ static const struct fec_devinfo fec_imx6q_info = {
.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
+ FEC_QUIRK_HAS_PMQOS,
};
static const struct fec_devinfo fec_mvf600_info = {
@@ -2028,6 +2029,7 @@ static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
+ unsigned long flags;
int ret;
if (enable) {
@@ -2036,15 +2038,15 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
return ret;
if (fep->clk_ptp) {
- mutex_lock(&fep->ptp_clk_mutex);
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
ret = clk_prepare_enable(fep->clk_ptp);
if (ret) {
- mutex_unlock(&fep->ptp_clk_mutex);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
goto failed_clk_ptp;
} else {
fep->ptp_clk_on = true;
}
- mutex_unlock(&fep->ptp_clk_mutex);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
ret = clk_prepare_enable(fep->clk_ref);
@@ -2059,10 +2061,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
} else {
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
- mutex_lock(&fep->ptp_clk_mutex);
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
clk_disable_unprepare(fep->clk_ptp);
fep->ptp_clk_on = false;
- mutex_unlock(&fep->ptp_clk_mutex);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
clk_disable_unprepare(fep->clk_ref);
clk_disable_unprepare(fep->clk_2x_txclk);
@@ -2075,10 +2077,10 @@ failed_clk_2x_txclk:
clk_disable_unprepare(fep->clk_ref);
failed_clk_ref:
if (fep->clk_ptp) {
- mutex_lock(&fep->ptp_clk_mutex);
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
clk_disable_unprepare(fep->clk_ptp);
fep->ptp_clk_on = false;
- mutex_unlock(&fep->ptp_clk_mutex);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
failed_clk_ptp:
clk_disable_unprepare(fep->clk_enet_out);
@@ -3244,6 +3246,9 @@ fec_enet_open(struct net_device *ndev)
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
+
napi_enable(&fep->napi);
phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
@@ -3285,6 +3290,9 @@ fec_enet_close(struct net_device *ndev)
fec_enet_update_ethtool_stats(ndev);
fec_enet_clk_enable(ndev, false);
+ if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
+ cpu_latency_qos_remove_request(&fep->pm_qos_req);
+
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
pm_runtime_mark_last_busy(&fep->pdev->dev);
pm_runtime_put_autosuspend(&fep->pdev->dev);
@@ -3907,7 +3915,7 @@ fec_probe(struct platform_device *pdev)
}
fep->ptp_clk_on = false;
- mutex_init(&fep->ptp_clk_mutex);
+ spin_lock_init(&fep->tmreg_lock);
/* clk_ref is optional, depends on board */
fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index c74d04f4b2fd..8dd5a2615a89 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -365,21 +365,19 @@ static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
*/
static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
{
- struct fec_enet_private *adapter =
+ struct fec_enet_private *fep =
container_of(ptp, struct fec_enet_private, ptp_caps);
u64 ns;
unsigned long flags;
- mutex_lock(&adapter->ptp_clk_mutex);
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
/* Check the ptp clock */
- if (!adapter->ptp_clk_on) {
- mutex_unlock(&adapter->ptp_clk_mutex);
+ if (!fep->ptp_clk_on) {
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return -EINVAL;
}
- spin_lock_irqsave(&adapter->tmreg_lock, flags);
- ns = timecounter_read(&adapter->tc);
- spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
- mutex_unlock(&adapter->ptp_clk_mutex);
+ ns = timecounter_read(&fep->tc);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
*ts = ns_to_timespec64(ns);
@@ -404,10 +402,10 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
unsigned long flags;
u32 counter;
- mutex_lock(&fep->ptp_clk_mutex);
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
/* Check the ptp clock */
if (!fep->ptp_clk_on) {
- mutex_unlock(&fep->ptp_clk_mutex);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
return -EINVAL;
}
@@ -417,11 +415,9 @@ static int fec_ptp_settime(struct ptp_clock_info *ptp,
*/
counter = ns & fep->cc.mask;
- spin_lock_irqsave(&fep->tmreg_lock, flags);
writel(counter, fep->hwp + FEC_ATIME);
timecounter_init(&fep->tc, &fep->cc, ns);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
- mutex_unlock(&fep->ptp_clk_mutex);
return 0;
}
@@ -518,13 +514,11 @@ static void fec_time_keep(struct work_struct *work)
struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
unsigned long flags;
- mutex_lock(&fep->ptp_clk_mutex);
+ spin_lock_irqsave(&fep->tmreg_lock, flags);
if (fep->ptp_clk_on) {
- spin_lock_irqsave(&fep->tmreg_lock, flags);
timecounter_read(&fep->tc);
- spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
- mutex_unlock(&fep->ptp_clk_mutex);
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
schedule_delayed_work(&fep->time_keep, HZ);
}
@@ -599,8 +593,6 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
}
fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
- spin_lock_init(&fep->tmreg_lock);
-
fec_ptp_start_cyclecounter(ndev);
INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
index 53b7e95213a8..671f51135c26 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_txrx.h
@@ -206,9 +206,9 @@ struct funeth_rxq {
#define FUN_QSTAT_READ(q, seq, stats_copy) \
do { \
- seq = u64_stats_fetch_begin(&(q)->syncp); \
+ seq = u64_stats_fetch_begin_irq(&(q)->syncp); \
stats_copy = (q)->stats; \
- } while (u64_stats_fetch_retry(&(q)->syncp, (seq)))
+ } while (u64_stats_fetch_retry_irq(&(q)->syncp, (seq)))
#define FUN_INT_NAME_LEN (IFNAMSIZ + 16)
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 50b384910c83..7b9a2d9d9624 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -177,14 +177,14 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct gve_rx_ring *rx = &priv->rx[ring];
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_pkts = rx->rpackets;
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
rx_pkts += tmp_rx_pkts;
rx_bytes += tmp_rx_bytes;
@@ -198,10 +198,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
if (priv->tx) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_pkts = priv->tx[ring].pkt_done;
tmp_tx_bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
tx_pkts += tmp_tx_pkts;
tx_bytes += tmp_tx_bytes;
@@ -259,13 +259,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->fill_cnt - rx->cnt;
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
tmp_rx_bytes = rx->rbytes;
tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
tmp_rx_desc_err_dropped_pkt =
rx->rx_desc_err_dropped_pkt;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
data[i++] = rx->rx_cont_packet_cnt;
@@ -331,9 +331,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
}
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
tmp_tx_bytes = tx->bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 6cafee55efc3..044db3ebb071 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -51,10 +51,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->rx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->rx[ring].statss);
packets = priv->rx[ring].rpackets;
bytes = priv->rx[ring].rbytes;
- } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->rx[ring].statss,
start));
s->rx_packets += packets;
s->rx_bytes += bytes;
@@ -64,10 +64,10 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
do {
start =
- u64_stats_fetch_begin(&priv->tx[ring].statss);
+ u64_stats_fetch_begin_irq(&priv->tx[ring].statss);
packets = priv->tx[ring].pkt_done;
bytes = priv->tx[ring].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ } while (u64_stats_fetch_retry_irq(&priv->tx[ring].statss,
start));
s->tx_packets += packets;
s->tx_bytes += bytes;
@@ -1274,9 +1274,9 @@ void gve_handle_report_stats(struct gve_priv *priv)
}
do {
- start = u64_stats_fetch_begin(&priv->tx[idx].statss);
+ start = u64_stats_fetch_begin_irq(&priv->tx[idx].statss);
tx_bytes = priv->tx[idx].bytes_done;
- } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
+ } while (u64_stats_fetch_retry_irq(&priv->tx[idx].statss, start));
stats[stats_idx++] = (struct stats) {
.stat_name = cpu_to_be32(TX_WAKE_CNT),
.value = cpu_to_be64(priv->tx[idx].wake_queue),
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index a866bea65110..e5828a658caf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -74,14 +74,14 @@ void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
stats->pkts = rxq_stats->pkts;
stats->bytes = rxq_stats->bytes;
stats->errors = rxq_stats->csum_errors +
rxq_stats->other_errors;
stats->csum_errors = rxq_stats->csum_errors;
stats->other_errors = rxq_stats->other_errors;
- } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
}
/**
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 5051cdff2384..3b6c7b585737 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -99,14 +99,14 @@ void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&txq_stats->syncp);
+ start = u64_stats_fetch_begin_irq(&txq_stats->syncp);
stats->pkts = txq_stats->pkts;
stats->bytes = txq_stats->bytes;
stats->tx_busy = txq_stats->tx_busy;
stats->tx_wake = txq_stats->tx_wake;
stats->tx_dropped = txq_stats->tx_dropped;
stats->big_frags_pkts = txq_stats->big_frags_pkts;
- } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&txq_stats->syncp, start));
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index ea2bb0140a6e..10d7a982a5b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -177,6 +177,10 @@ void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
"Cannot locate client instance close routine\n");
return;
}
+ if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
+ dev_dbg(&pf->pdev->dev, "Client is not open, abort close\n");
+ return;
+ }
cdev->client->ops->close(&cdev->lan_info, cdev->client, reset);
clear_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state);
i40e_client_release_qvlist(&cdev->lan_info);
@@ -429,7 +433,6 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* Remove failed client instance */
clear_bit(__I40E_CLIENT_INSTANCE_OPENED,
&cdev->state);
- i40e_client_del_instance(pf);
return;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 9f1d5de7bf16..10c1e1ea83a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -6659,6 +6659,9 @@ static int i40e_configure_queue_channels(struct i40e_vsi *vsi)
vsi->tc_seid_map[i] = ch->seid;
}
}
+
+ /* reset to reconfigure TX queue contexts */
+ i40e_do_reset(vsi->back, I40E_PF_RESET_FLAG, true);
return ret;
err_free:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d4226161a3ef..69e67eb6aea7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -3688,7 +3688,8 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
u8 prio;
/* is DCB enabled at all? */
- if (vsi->tc_config.numtc == 1)
+ if (vsi->tc_config.numtc == 1 ||
+ i40e_is_tc_mqprio_enabled(vsi->back))
return netdev_pick_tx(netdev, skb, sb_dev);
prio = skb->priority;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index f39440ad5c50..10aa99dfdcdb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -2877,6 +2877,11 @@ static void iavf_reset_task(struct work_struct *work)
int i = 0, err;
bool running;
+ /* Detach interface to avoid subsequent NDO callbacks */
+ rtnl_lock();
+ netif_device_detach(netdev);
+ rtnl_unlock();
+
/* When device is being removed it doesn't make sense to run the reset
* task, just return in such a case.
*/
@@ -2884,7 +2889,7 @@ static void iavf_reset_task(struct work_struct *work)
if (adapter->state != __IAVF_REMOVE)
queue_work(iavf_wq, &adapter->reset_task);
- return;
+ goto reset_finish;
}
while (!mutex_trylock(&adapter->client_lock))
@@ -2954,7 +2959,6 @@ continue_reset:
if (running) {
netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
adapter->link_up = false;
iavf_napi_disable_all(adapter);
}
@@ -3084,7 +3088,7 @@ continue_reset:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
- return;
+ goto reset_finish;
reset_err:
if (running) {
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
@@ -3095,6 +3099,10 @@ reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
+reset_finish:
+ rtnl_lock();
+ netif_device_attach(netdev);
+ rtnl_unlock();
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 136d7911adb4..1e3243808178 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -7,18 +7,6 @@
#include "ice_dcb_lib.h"
#include "ice_sriov.h"
-static bool ice_alloc_rx_buf_zc(struct ice_rx_ring *rx_ring)
-{
- rx_ring->xdp_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->xdp_buf), GFP_KERNEL);
- return !!rx_ring->xdp_buf;
-}
-
-static bool ice_alloc_rx_buf(struct ice_rx_ring *rx_ring)
-{
- rx_ring->rx_buf = kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL);
- return !!rx_ring->rx_buf;
-}
-
/**
* __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
* @qs_cfg: gathered variables needed for PF->VSI queues assignment
@@ -519,11 +507,8 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index, ring->q_vector->napi.napi_id);
- kfree(ring->rx_buf);
ring->xsk_pool = ice_xsk_pool(ring);
if (ring->xsk_pool) {
- if (!ice_alloc_rx_buf_zc(ring))
- return -ENOMEM;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
@@ -538,8 +523,6 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
} else {
- if (!ice_alloc_rx_buf(ring))
- return -ENOMEM;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 173fe6c31341..8c30eea61b6d 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2898,10 +2898,18 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
+ /* reallocate Rx queues that are used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, true);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
+ /* reallocate Rx queues that were used for zero-copy */
+ xdp_ring_err = ice_realloc_zc_buf(vsi, false);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Rx resources failed");
} else {
/* safe to call even when prog == vsi->xdp_prog as
* dev_xdp_install in net/core/dev.c incremented prog's
@@ -3905,7 +3913,7 @@ static int ice_init_pf(struct ice_pf *pf)
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
if (!pf->avail_rxqs) {
- devm_kfree(ice_pf_to_dev(pf), pf->avail_txqs);
+ bitmap_free(pf->avail_txqs);
pf->avail_txqs = NULL;
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index e48e29258450..03ce85f6e6df 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -192,6 +192,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
if (err)
return err;
+ ice_clean_rx_ring(rx_ring);
ice_qvec_toggle_napi(vsi, q_vector, false);
ice_qp_clean_rings(vsi, q_idx);
@@ -317,6 +318,62 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
}
/**
+ * ice_realloc_rx_xdp_bufs - reallocate for either XSK or normal buffer
+ * @rx_ring: Rx ring
+ * @pool_present: is pool for XSK present
+ *
+ * Try allocating memory and return ENOMEM, if failed to allocate.
+ * If allocation was successful, substitute buffer with allocated one.
+ * Returns 0 on success, negative on failure
+ */
+static int
+ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
+{
+ size_t elem_size = pool_present ? sizeof(*rx_ring->xdp_buf) :
+ sizeof(*rx_ring->rx_buf);
+ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
+
+ if (!sw_ring)
+ return -ENOMEM;
+
+ if (pool_present) {
+ kfree(rx_ring->rx_buf);
+ rx_ring->rx_buf = NULL;
+ rx_ring->xdp_buf = sw_ring;
+ } else {
+ kfree(rx_ring->xdp_buf);
+ rx_ring->xdp_buf = NULL;
+ rx_ring->rx_buf = sw_ring;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_realloc_zc_buf - reallocate XDP ZC queue pairs
+ * @vsi: Current VSI
+ * @zc: is zero copy set
+ *
+ * Reallocate buffer for rx_rings that might be used by XSK.
+ * XDP requires more memory, than rx_buf provides.
+ * Returns 0 on success, negative on failure
+ */
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
+{
+ struct ice_rx_ring *rx_ring;
+ unsigned long q;
+
+ for_each_set_bit(q, vsi->af_xdp_zc_qps,
+ max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
+ rx_ring = vsi->rx_rings[q];
+ if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
* ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
* @pool: buffer pool to enable/associate to a ring, NULL to disable
@@ -345,11 +402,17 @@ int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
if (if_running) {
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[qid];
+
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
goto xsk_pool_if_up;
}
+
+ ret = ice_realloc_rx_xdp_bufs(rx_ring, pool_present);
+ if (ret)
+ goto xsk_pool_if_up;
}
pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 21faec8e97db..4edbe81eb646 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -27,6 +27,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, u32 budget, int napi_budget);
+int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
static inline bool
ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
@@ -72,5 +73,12 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
+
+static inline int
+ice_realloc_zc_buf(struct ice_vsi __always_unused *vsi,
+ bool __always_unused zc)
+{
+ return 0;
+}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7e0142..0eec05d905eb 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -700,10 +700,10 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- struct dentry *mvpp2_dir, *mvpp2_root;
+ static struct dentry *mvpp2_root;
+ struct dentry *mvpp2_dir;
int ret, i;
- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index dab8f3f771f8..cfe804bc8d20 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -412,7 +412,7 @@ __mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
if (entry->hash != 0xffff) {
ppe->foe_table[entry->hash].ib1 &= ~MTK_FOE_IB1_STATE;
ppe->foe_table[entry->hash].ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE,
- MTK_FOE_STATE_BIND);
+ MTK_FOE_STATE_UNBIND);
dma_wmb();
}
entry->hash = 0xffff;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 1f5cf1c9a947..69ffce04d630 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -293,6 +293,9 @@ mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
if (!ppe)
return;
+ if (hash > MTK_PPE_HASH_MASK)
+ return;
+
now = (u16)jiffies;
diff = now - ppe->foe_check_time[hash];
if (diff < HZ / 10)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c085b031abfc..89b2d9cea33f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -494,6 +494,24 @@ static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
return err;
}
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+
+ if (!err)
+ return val.vbool;
+
+ mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
+ return MLX5_CAP_GEN(dev, roce);
+}
+EXPORT_SYMBOL(mlx5_is_roce_on);
+
static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
@@ -597,7 +615,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
if (MLX5_CAP_GEN(dev, roce_rw_supported))
- MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+ MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
+ mlx5_is_roce_on(dev));
max_uc_list = max_uc_list_get_devlink_param(dev);
if (max_uc_list > 0)
@@ -623,7 +642,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
*/
static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
{
- return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
+ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) ||
(!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
}
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index 5fdf9b7179f5..5a1027b07215 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -75,6 +75,7 @@ struct mlxbf_gige {
struct net_device *netdev;
struct platform_device *pdev;
void __iomem *mdio_io;
+ void __iomem *clk_io;
struct mii_bus *mdiobus;
spinlock_t lock; /* for packet processing indices */
u16 rx_q_entries;
@@ -137,7 +138,8 @@ enum mlxbf_gige_res {
MLXBF_GIGE_RES_MDIO9,
MLXBF_GIGE_RES_GPIO0,
MLXBF_GIGE_RES_LLU,
- MLXBF_GIGE_RES_PLU
+ MLXBF_GIGE_RES_PLU,
+ MLXBF_GIGE_RES_CLK
};
/* Version of register data returned by mlxbf_gige_get_regs() */
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
index 2e6c1b7af096..85155cd9405c 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c
@@ -22,10 +22,23 @@
#include <linux/property.h>
#include "mlxbf_gige.h"
+#include "mlxbf_gige_regs.h"
#define MLXBF_GIGE_MDIO_GW_OFFSET 0x0
#define MLXBF_GIGE_MDIO_CFG_OFFSET 0x4
+#define MLXBF_GIGE_MDIO_FREQ_REFERENCE 156250000ULL
+#define MLXBF_GIGE_MDIO_COREPLL_CONST 16384ULL
+#define MLXBF_GIGE_MDC_CLK_NS 400
+#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG1 0x4
+#define MLXBF_GIGE_MDIO_PLL_I1CLK_REG2 0x8
+#define MLXBF_GIGE_MDIO_CORE_F_SHIFT 0
+#define MLXBF_GIGE_MDIO_CORE_F_MASK GENMASK(25, 0)
+#define MLXBF_GIGE_MDIO_CORE_R_SHIFT 26
+#define MLXBF_GIGE_MDIO_CORE_R_MASK GENMASK(31, 26)
+#define MLXBF_GIGE_MDIO_CORE_OD_SHIFT 0
+#define MLXBF_GIGE_MDIO_CORE_OD_MASK GENMASK(3, 0)
+
/* Support clause 22 */
#define MLXBF_GIGE_MDIO_CL22_ST1 0x1
#define MLXBF_GIGE_MDIO_CL22_WRITE 0x1
@@ -50,27 +63,76 @@
#define MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK GENMASK(23, 16)
#define MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK GENMASK(31, 24)
+#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
+ FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+
+#define MLXBF_GIGE_BF2_COREPLL_ADDR 0x02800c30
+#define MLXBF_GIGE_BF2_COREPLL_SIZE 0x0000000c
+
+static struct resource corepll_params[] = {
+ [MLXBF_GIGE_VERSION_BF2] = {
+ .start = MLXBF_GIGE_BF2_COREPLL_ADDR,
+ .end = MLXBF_GIGE_BF2_COREPLL_ADDR + MLXBF_GIGE_BF2_COREPLL_SIZE - 1,
+ .name = "COREPLL_RES"
+ },
+};
+
+/* Returns core clock i1clk in Hz */
+static u64 calculate_i1clk(struct mlxbf_gige *priv)
+{
+ u8 core_od, core_r;
+ u64 freq_output;
+ u32 reg1, reg2;
+ u32 core_f;
+
+ reg1 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG1);
+ reg2 = readl(priv->clk_io + MLXBF_GIGE_MDIO_PLL_I1CLK_REG2);
+
+ core_f = (reg1 & MLXBF_GIGE_MDIO_CORE_F_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_F_SHIFT;
+ core_r = (reg1 & MLXBF_GIGE_MDIO_CORE_R_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_R_SHIFT;
+ core_od = (reg2 & MLXBF_GIGE_MDIO_CORE_OD_MASK) >>
+ MLXBF_GIGE_MDIO_CORE_OD_SHIFT;
+
+ /* Compute PLL output frequency as follow:
+ *
+ * CORE_F / 16384
+ * freq_output = freq_reference * ----------------------------
+ * (CORE_R + 1) * (CORE_OD + 1)
+ */
+ freq_output = div_u64((MLXBF_GIGE_MDIO_FREQ_REFERENCE * core_f),
+ MLXBF_GIGE_MDIO_COREPLL_CONST);
+ freq_output = div_u64(freq_output, (core_r + 1) * (core_od + 1));
+
+ return freq_output;
+}
+
/* Formula for encoding the MDIO period. The encoded value is
* passed to the MDIO config register.
*
- * mdc_clk = 2*(val + 1)*i1clk
+ * mdc_clk = 2*(val + 1)*(core clock in sec)
*
- * 400 ns = 2*(val + 1)*(((1/430)*1000) ns)
+ * i1clk is in Hz:
+ * 400 ns = 2*(val + 1)*(1/i1clk)
*
- * val = (((400 * 430 / 1000) / 2) - 1)
+ * val = (((400/10^9) / (1/i1clk) / 2) - 1)
+ * val = (400/2 * i1clk)/10^9 - 1
*/
-#define MLXBF_GIGE_I1CLK_MHZ 430
-#define MLXBF_GIGE_MDC_CLK_NS 400
+static u8 mdio_period_map(struct mlxbf_gige *priv)
+{
+ u8 mdio_period;
+ u64 i1clk;
-#define MLXBF_GIGE_MDIO_PERIOD (((MLXBF_GIGE_MDC_CLK_NS * MLXBF_GIGE_I1CLK_MHZ / 1000) / 2) - 1)
+ i1clk = calculate_i1clk(priv);
-#define MLXBF_GIGE_MDIO_CFG_VAL (FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_MODE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO3_3_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_FULL_DRIVE_MASK, 1) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, \
- MLXBF_GIGE_MDIO_PERIOD) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_IN_SAMP_MASK, 6) | \
- FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDIO_OUT_SAMP_MASK, 13))
+ mdio_period = div_u64((MLXBF_GIGE_MDC_CLK_NS >> 1) * i1clk, 1000000000) - 1;
+
+ return mdio_period;
+}
static u32 mlxbf_gige_mdio_create_cmd(u16 data, int phy_add,
int phy_reg, u32 opcode)
@@ -124,9 +186,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
int phy_reg, u16 val)
{
struct mlxbf_gige *priv = bus->priv;
+ u32 temp;
u32 cmd;
int ret;
- u32 temp;
if (phy_reg & MII_ADDR_C45)
return -EOPNOTSUPP;
@@ -144,18 +206,44 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add,
return ret;
}
+static void mlxbf_gige_mdio_cfg(struct mlxbf_gige *priv)
+{
+ u8 mdio_period;
+ u32 val;
+
+ mdio_period = mdio_period_map(priv);
+
+ val = MLXBF_GIGE_MDIO_CFG_VAL;
+ val |= FIELD_PREP(MLXBF_GIGE_MDIO_CFG_MDC_PERIOD_MASK, mdio_period);
+ writel(val, priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+}
+
int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv)
{
struct device *dev = &pdev->dev;
+ struct resource *res;
int ret;
priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9);
if (IS_ERR(priv->mdio_io))
return PTR_ERR(priv->mdio_io);
- /* Configure mdio parameters */
- writel(MLXBF_GIGE_MDIO_CFG_VAL,
- priv->mdio_io + MLXBF_GIGE_MDIO_CFG_OFFSET);
+ /* clk resource shared with other drivers so cannot use
+ * devm_platform_ioremap_resource
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_CLK);
+ if (!res) {
+ /* For backward compatibility with older ACPI tables, also keep
+ * CLK resource internal to the driver.
+ */
+ res = &corepll_params[MLXBF_GIGE_VERSION_BF2];
+ }
+
+ priv->clk_io = devm_ioremap(dev, res->start, resource_size(res));
+ if (IS_ERR(priv->clk_io))
+ return PTR_ERR(priv->clk_io);
+
+ mlxbf_gige_mdio_cfg(priv);
priv->mdiobus = devm_mdiobus_alloc(dev);
if (!priv->mdiobus) {
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 5fb33c9294bf..7be3a793984d 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -8,6 +8,8 @@
#ifndef __MLXBF_GIGE_REGS_H__
#define __MLXBF_GIGE_REGS_H__
+#define MLXBF_GIGE_VERSION 0x0000
+#define MLXBF_GIGE_VERSION_BF2 0x0
#define MLXBF_GIGE_STATUS 0x0010
#define MLXBF_GIGE_STATUS_READY BIT(0)
#define MLXBF_GIGE_INT_STATUS 0x0028
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 39904dacf4f0..b3472fb94617 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -423,7 +423,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0);
+ 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
+ 0);
rt = ip_route_output_key(tun->net, &fl4);
if (IS_ERR(rt))
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
index 6dea7f8c1481..51f8a0816377 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c
@@ -425,7 +425,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
lan966x_ifh_get_src_port(skb->data, &src_port);
lan966x_ifh_get_timestamp(skb->data, &timestamp);
- WARN_ON(src_port >= lan966x->num_phys_ports);
+ if (WARN_ON(src_port >= lan966x->num_phys_ports))
+ goto free_skb;
skb->dev = lan966x->ports[src_port]->dev;
skb_pull(skb, IFH_LEN * sizeof(u32));
@@ -449,6 +450,8 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx)
return skb;
+free_skb:
+ kfree_skb(skb);
unmap_page:
dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr,
FDMA_DCB_STATUS_BLOCKL(db->status),
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index 304f84aadc36..21844beba72d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -113,6 +113,8 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
/* This assumes STATUS_WORD_POS == 1, Status
* just after last data
*/
+ if (!byte_swap)
+ val = ntohl((__force __be32)val);
byte_cnt -= (4 - XTR_VALID_BYTES(val));
eof_flag = true;
break;
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 5f9240182351..00d8198072ae 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -1465,10 +1465,6 @@ static void mana_gd_shutdown(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-#ifndef PCI_VENDOR_ID_MICROSOFT
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#endif
-
static const struct pci_device_id mana_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_PF_DEVICE_ID) },
{ PCI_DEVICE(PCI_VENDOR_ID_MICROSOFT, MANA_VF_DEVICE_ID) },
diff --git a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
index 4e5df9f2c372..7b92026e1a6f 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/qos_conf.c
@@ -127,10 +127,11 @@ static int nfp_policer_validate(const struct flow_action *action,
return -EOPNOTSUPP;
}
- if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE &&
+ act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
NL_SET_ERR_MSG_MOD(extack,
- "Offload not supported when conform action is not pipe or ok");
+ "Offload not supported when conform action is not continue, pipe or ok");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index cf4d6f1129fa..349a2b1a19a2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1630,21 +1630,21 @@ static void nfp_net_stat64(struct net_device *netdev,
unsigned int start;
do {
- start = u64_stats_fetch_begin(&r_vec->rx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
data[0] = r_vec->rx_pkts;
data[1] = r_vec->rx_bytes;
data[2] = r_vec->rx_drops;
- } while (u64_stats_fetch_retry(&r_vec->rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
stats->rx_packets += data[0];
stats->rx_bytes += data[1];
stats->rx_dropped += data[2];
do {
- start = u64_stats_fetch_begin(&r_vec->tx_sync);
+ start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
data[0] = r_vec->tx_pkts;
data[1] = r_vec->tx_bytes;
data[2] = r_vec->tx_errors;
- } while (u64_stats_fetch_retry(&r_vec->tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
stats->tx_packets += data[0];
stats->tx_bytes += data[1];
stats->tx_errors += data[2];
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index eeb1455a4e5d..b1b1b648e40c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -649,7 +649,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].rx_sync);
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
@@ -657,10 +657,10 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].rx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].rx_sync, start));
do {
- start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync);
+ start = u64_stats_fetch_begin_irq(&nn->r_vecs[i].tx_sync);
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[6] = nn->r_vecs[i].hw_csum_tx;
@@ -670,7 +670,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data)
tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
- } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start));
+ } while (u64_stats_fetch_retry_irq(&nn->r_vecs[i].tx_sync, start));
data += NN_RVEC_PER_Q_STATS;
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
index edd300033735..4cc38799eabc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c
@@ -507,6 +507,7 @@ int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state)
if (nfp_nsp_get_abi_ver_minor(nsp) < 32) {
nfp_err(nfp_nsp_cpp(nsp),
"set id mode operation not supported, please update flash\n");
+ nfp_eth_config_cleanup_end(nsp);
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index bc70c6abd6a5..58cf7cc54f40 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -1273,7 +1273,7 @@ static int ofdpa_port_ipv4_neigh(struct ofdpa_port *ofdpa_port,
bool removing;
int err = 0;
- entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 3bf20211cceb..3829c2805b16 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1037,6 +1037,8 @@ static int smsc911x_mii_probe(struct net_device *dev)
return ret;
}
+ /* Indicate that the MAC is responsible for managing PHY PM */
+ phydev->mac_managed_pm = true;
phy_attached_info(phydev);
phy_set_max_speed(phydev, SPEED_100);
@@ -2587,6 +2589,8 @@ static int smsc911x_suspend(struct device *dev)
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
+ if (!device_may_wakeup(dev))
+ phy_stop(ndev->phydev);
}
/* enable wake on LAN, energy detection and the external PME
@@ -2628,6 +2632,8 @@ static int smsc911x_resume(struct device *dev)
if (netif_running(ndev)) {
netif_device_attach(ndev);
netif_start_queue(ndev);
+ if (!device_may_wakeup(dev))
+ phy_start(ndev->phydev);
}
return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 4f2b82a884b9..9af25be42401 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -1136,8 +1136,6 @@ static void intel_eth_pci_remove(struct pci_dev *pdev)
clk_disable_unprepare(priv->plat->stmmac_clk);
clk_unregister_fixed_rate(priv->plat->stmmac_clk);
-
- pcim_iounmap_regions(pdev, BIT(0));
}
static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 6afdf1622944..5cf218c674a5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1310,10 +1310,11 @@ static void adf7242_remove(struct spi_device *spi)
debugfs_remove_recursive(lp->debugfs_root);
+ ieee802154_unregister_hw(lp->hw);
+
cancel_delayed_work_sync(&lp->work);
destroy_workqueue(lp->wqueue);
- ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 42c0b451088d..450b16ad40a4 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2293,7 +2293,7 @@ static int ca8210_set_csma_params(
* @retries: Number of retries
*
* Sets the number of times to retry a transmission if no acknowledgment was
- * was received from the other end when one was requested.
+ * received from the other end when one was requested.
*
* Return: 0 or linux error code
*/
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index 1e1f40f628a0..c69b87d3837d 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -504,6 +504,7 @@ cc2520_tx(struct ieee802154_hw *hw, struct sk_buff *skb)
goto err_tx;
if (status & CC2520_STATUS_TX_UNDERFLOW) {
+ rc = -EINVAL;
dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
goto err_tx;
}
diff --git a/drivers/net/mdio/fwnode_mdio.c b/drivers/net/mdio/fwnode_mdio.c
index 3e79c2c51929..1c1584fca632 100644
--- a/drivers/net/mdio/fwnode_mdio.c
+++ b/drivers/net/mdio/fwnode_mdio.c
@@ -47,7 +47,9 @@ int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio,
* just fall back to poll mode
*/
if (rc == -EPROBE_DEFER)
- rc = -ENODEV;
+ rc = driver_deferred_probe_check_state(&phy->mdio.dev);
+ if (rc == -EPROBE_DEFER)
+ return rc;
if (rc > 0) {
phy->irq = rc;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index e470e3398abc..9a1a5b203624 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -67,10 +67,10 @@ nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
unsigned int start;
do {
- start = u64_stats_fetch_begin(&ns->syncp);
+ start = u64_stats_fetch_begin_irq(&ns->syncp);
stats->tx_bytes = ns->tx_bytes;
stats->tx_packets = ns->tx_packets;
- } while (u64_stats_fetch_retry(&ns->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
}
static int
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index 73f7962a37d3..c49062ad72c6 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -243,13 +243,7 @@ static irqreturn_t meson_gxl_handle_interrupt(struct phy_device *phydev)
irq_status == INTSRC_ENERGY_DETECT)
return IRQ_HANDLED;
- /* Give PHY some time before MAC starts sending data. This works
- * around an issue where network doesn't come up properly.
- */
- if (!(irq_status & INTSRC_LINK_DOWN))
- phy_queue_state_machine(phydev, msecs_to_jiffies(100));
- else
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev);
return IRQ_HANDLED;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e78d0bf69bc3..6f52b4fb6888 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -2873,12 +2873,18 @@ static int lan8814_config_init(struct phy_device *phydev)
return 0;
}
+/* It is expected that there will not be any 'lan8814_take_coma_mode'
+ * function called in suspend. Because the GPIO line can be shared, so if one of
+ * the phys goes back in coma mode, then all the other PHYs will go, which is
+ * wrong.
+ */
static int lan8814_release_coma_mode(struct phy_device *phydev)
{
struct gpio_desc *gpiod;
gpiod = devm_gpiod_get_optional(&phydev->mdio.dev, "coma-mode",
- GPIOD_OUT_HIGH_OPEN_DRAIN);
+ GPIOD_OUT_HIGH_OPEN_DRAIN |
+ GPIOD_FLAGS_BIT_NONEXCLUSIVE);
if (IS_ERR(gpiod))
return PTR_ERR(gpiod);
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index d4c93d59bc53..8569a545e0a3 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -28,12 +28,16 @@
/* Interrupt Source Register */
#define LAN87XX_INTERRUPT_SOURCE (0x18)
+#define LAN87XX_INTERRUPT_SOURCE_2 (0x08)
/* Interrupt Mask Register */
#define LAN87XX_INTERRUPT_MASK (0x19)
#define LAN87XX_MASK_LINK_UP (0x0004)
#define LAN87XX_MASK_LINK_DOWN (0x0002)
+#define LAN87XX_INTERRUPT_MASK_2 (0x09)
+#define LAN87XX_MASK_COMM_RDY BIT(10)
+
/* MISC Control 1 Register */
#define LAN87XX_CTRL_1 (0x11)
#define LAN87XX_MASK_RGMII_TXC_DLY_EN (0x4000)
@@ -424,17 +428,55 @@ static int lan87xx_phy_config_intr(struct phy_device *phydev)
int rc, val = 0;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
- /* unmask all source and clear them before enable */
- rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, 0x7FFF);
+ /* clear all interrupt */
+ rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc < 0)
+ return rc;
+
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
- val = LAN87XX_MASK_LINK_UP | LAN87XX_MASK_LINK_DOWN;
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
+ if (rc < 0)
+ return rc;
+
+ /* enable link down and comm ready interrupt */
+ val = LAN87XX_MASK_LINK_DOWN;
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
+ if (rc < 0)
+ return rc;
+
+ val = LAN87XX_MASK_COMM_RDY;
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
} else {
rc = phy_write(phydev, LAN87XX_INTERRUPT_MASK, val);
- if (rc)
+ if (rc < 0)
return rc;
rc = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_MASK_2, val);
+ if (rc < 0)
+ return rc;
+
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
}
return rc < 0 ? rc : 0;
@@ -444,6 +486,14 @@ static irqreturn_t lan87xx_handle_interrupt(struct phy_device *phydev)
{
int irq_status;
+ irq_status = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_MISC,
+ LAN87XX_INTERRUPT_SOURCE_2, 0);
+ if (irq_status < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
irq_status = phy_read(phydev, LAN87XX_INTERRUPT_SOURCE);
if (irq_status < 0) {
phy_error(phydev);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 2de09ad5bac0..e11f70911acc 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -777,6 +777,13 @@ static const struct usb_device_id products[] = {
},
#endif
+/* Lenovo ThinkPad OneLink+ Dock (based on Realtek RTL8153) */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3054, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 709e3c59e340..0cb187def5bc 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1087,6 +1087,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0801)}, /* Quectel RM520N */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d142ac8fcf6e..688905ea0a6d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -770,6 +770,7 @@ enum rtl8152_flags {
RX_EPROTO,
};
+#define DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK 0x3054
#define DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2 0x3082
#define DEVICE_ID_THINKPAD_USB_C_DONGLE 0x720c
#define DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2 0xa387
@@ -9581,6 +9582,7 @@ static bool rtl8152_supports_lenovo_macpassthru(struct usb_device *udev)
if (vendor_id == VENDOR_ID_LENOVO) {
switch (product_id) {
+ case DEVICE_ID_THINKPAD_ONELINK_PLUS_DOCK:
case DEVICE_ID_THINKPAD_THUNDERBOLT3_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN2:
case DEVICE_ID_THINKPAD_USB_C_DOCK_GEN3:
@@ -9828,6 +9830,7 @@ static const struct usb_device_id rtl8152_table[] = {
REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f),
+ REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3069),
REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3082),
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index c62f299b9e0a..d8a5dbf89a02 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -2403,7 +2403,7 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
/* Repeat initial/next rate.
* For legacy IL_NUMBER_TRY == 1, this loop will not execute.
* For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0) {
+ while (repeat_rate > 0 && idx < (LINK_QUAL_MAX_RETRY_NUM - 1)) {
if (is_legacy(tbl_type.lq_type)) {
if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
ant_toggle_cnt++;
@@ -2422,8 +2422,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
cpu_to_le32(new_rate);
repeat_rate--;
idx++;
- if (idx >= LINK_QUAL_MAX_RETRY_NUM)
- goto out;
}
il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@@ -2468,7 +2466,6 @@ il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
repeat_rate--;
}
-out:
lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6e55f153ff26..1f301a5fb396 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -5060,6 +5060,10 @@ static int hwsim_virtio_handle_cmd(struct sk_buff *skb)
nlh = nlmsg_hdr(skb);
gnlh = nlmsg_data(nlh);
+
+ if (skb->len < nlh->nlmsg_len)
+ return -EINVAL;
+
err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX,
hwsim_genl_policy, NULL);
if (err) {
@@ -5102,7 +5106,8 @@ static void hwsim_virtio_rx_work(struct work_struct *work)
spin_unlock_irqrestore(&hwsim_virtio_lock, flags);
skb->data = skb->head;
- skb_set_tail_pointer(skb, len);
+ skb_reset_tail_pointer(skb);
+ skb_put(skb, len);
hwsim_virtio_handle_cmd(skb);
spin_lock_irqsave(&hwsim_virtio_lock, flags);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index e1800674089a..576a0149251b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -261,7 +261,7 @@ int mt7921e_mac_reset(struct mt7921_dev *dev)
err = mt7921e_driver_own(dev);
if (err)
- return err;
+ goto out;
err = mt7921_run_firmware(dev);
if (err)
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index 43c085c74b7a..bb1a315a7b7e 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -245,6 +245,7 @@ struct wilc {
u8 *rx_buffer;
u32 rx_buffer_offset;
u8 *tx_buffer;
+ u32 *vmm_table;
struct txq_handle txq[NQUEUES];
int txq_entries;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 600cc57e9da2..7390f94cd4ca 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -28,6 +28,7 @@ struct wilc_sdio {
u32 block_size;
bool isinit;
int has_thrpt_enh3;
+ u8 *cmd53_buf;
};
struct sdio_cmd52 {
@@ -47,6 +48,7 @@ struct sdio_cmd53 {
u32 count: 9;
u8 *buffer;
u32 block_size;
+ bool use_global_buf;
};
static const struct wilc_hif_func wilc_hif_sdio;
@@ -91,6 +93,8 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
{
struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev);
int size, ret;
+ struct wilc_sdio *sdio_priv = wilc->bus_data;
+ u8 *buf = cmd->buffer;
sdio_claim_host(func);
@@ -101,12 +105,23 @@ static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd)
else
size = cmd->count;
+ if (cmd->use_global_buf) {
+ if (size > sizeof(u32))
+ return -EINVAL;
+
+ buf = sdio_priv->cmd53_buf;
+ }
+
if (cmd->read_write) { /* write */
- ret = sdio_memcpy_toio(func, cmd->address,
- (void *)cmd->buffer, size);
+ if (cmd->use_global_buf)
+ memcpy(buf, cmd->buffer, size);
+
+ ret = sdio_memcpy_toio(func, cmd->address, buf, size);
} else { /* read */
- ret = sdio_memcpy_fromio(func, (void *)cmd->buffer,
- cmd->address, size);
+ ret = sdio_memcpy_fromio(func, buf, cmd->address, size);
+
+ if (cmd->use_global_buf)
+ memcpy(cmd->buffer, buf, size);
}
sdio_release_host(func);
@@ -128,6 +143,12 @@ static int wilc_sdio_probe(struct sdio_func *func,
if (!sdio_priv)
return -ENOMEM;
+ sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL);
+ if (!sdio_priv->cmd53_buf) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO,
&wilc_hif_sdio);
if (ret)
@@ -161,6 +182,7 @@ dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
free:
+ kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
return ret;
}
@@ -172,6 +194,7 @@ static void wilc_sdio_remove(struct sdio_func *func)
clk_disable_unprepare(wilc->rtc_clk);
wilc_netdev_cleanup(wilc);
+ kfree(sdio_priv->cmd53_buf);
kfree(sdio_priv);
}
@@ -375,8 +398,9 @@ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data)
cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
- cmd.count = 4;
+ cmd.count = sizeof(u32);
cmd.buffer = (u8 *)&data;
+ cmd.use_global_buf = true;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
if (ret)
@@ -414,6 +438,7 @@ static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
nblk = size / block_size;
nleft = size % block_size;
+ cmd.use_global_buf = false;
if (nblk > 0) {
cmd.block_mode = 1;
cmd.increment = 1;
@@ -492,8 +517,9 @@ static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data)
cmd.address = WILC_SDIO_FBR_DATA_REG;
cmd.block_mode = 0;
cmd.increment = 1;
- cmd.count = 4;
+ cmd.count = sizeof(u32);
cmd.buffer = (u8 *)data;
+ cmd.use_global_buf = true;
cmd.block_size = sdio_priv->block_size;
ret = wilc_sdio_cmd53(wilc, &cmd);
@@ -535,6 +561,7 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
nblk = size / block_size;
nleft = size % block_size;
+ cmd.use_global_buf = false;
if (nblk > 0) {
cmd.block_mode = 1;
cmd.increment = 1;
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 947d9a0a494e..58bbf50081e4 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -714,7 +714,7 @@ int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count)
int ret = 0;
int counter;
int timeout;
- u32 vmm_table[WILC_VMM_TBL_SIZE];
+ u32 *vmm_table = wilc->vmm_table;
u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0};
const struct wilc_hif_func *func;
int srcu_idx;
@@ -1252,6 +1252,8 @@ void wilc_wlan_cleanup(struct net_device *dev)
while ((rqe = wilc_wlan_rxq_remove(wilc)))
kfree(rqe);
+ kfree(wilc->vmm_table);
+ wilc->vmm_table = NULL;
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
@@ -1489,6 +1491,14 @@ int wilc_wlan_init(struct net_device *dev)
goto fail;
}
+ if (!wilc->vmm_table)
+ wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL);
+
+ if (!wilc->vmm_table) {
+ ret = -ENOBUFS;
+ goto fail;
+ }
+
if (!wilc->tx_buffer)
wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL);
@@ -1513,7 +1523,8 @@ int wilc_wlan_init(struct net_device *dev)
return 0;
fail:
-
+ kfree(wilc->vmm_table);
+ wilc->vmm_table = NULL;
kfree(wilc->rx_buffer);
wilc->rx_buffer = NULL;
kfree(wilc->tx_buffer);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 990360d75cb6..e85b3c5d4acc 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -256,7 +256,6 @@ static void backend_disconnect(struct backend_info *be)
unsigned int queue_index;
xen_unregister_watchers(vif);
- xenbus_rm(XBT_NIL, be->dev->nodename, "hotplug-status");
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
@@ -984,6 +983,7 @@ static int netback_remove(struct xenbus_device *dev)
struct backend_info *be = dev_get_drvdata(&dev->dev);
unregister_hotplug_status_watch(be);
+ xenbus_rm(XBT_NIL, dev->nodename, "hotplug-status");
if (be->vif) {
kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
backend_disconnect(be);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index af367b22871b..66446f1e06cf 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4703,6 +4703,8 @@ static void nvme_fw_act_work(struct work_struct *work)
nvme_start_queues(ctrl);
/* read FW slot information to clear the AER */
nvme_get_fw_slot_info(ctrl);
+
+ queue_work(nvme_wq, &ctrl->async_event_work);
}
static u32 nvme_aer_type(u32 result)
@@ -4715,9 +4717,10 @@ static u32 nvme_aer_subtype(u32 result)
return (result & 0xff00) >> 8;
}
-static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
+static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
{
u32 aer_notice_type = nvme_aer_subtype(result);
+ bool requeue = true;
trace_nvme_async_event(ctrl, aer_notice_type);
@@ -4734,6 +4737,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
*/
if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING)) {
nvme_auth_stop(ctrl);
+ requeue = false;
queue_work(nvme_wq, &ctrl->fw_act_work);
}
break;
@@ -4750,6 +4754,7 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
default:
dev_warn(ctrl->device, "async event result %08x\n", result);
}
+ return requeue;
}
static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
@@ -4765,13 +4770,14 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
u32 result = le32_to_cpu(res->u32);
u32 aer_type = nvme_aer_type(result);
u32 aer_subtype = nvme_aer_subtype(result);
+ bool requeue = true;
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
switch (aer_type) {
case NVME_AER_NOTICE:
- nvme_handle_aen_notice(ctrl, result);
+ requeue = nvme_handle_aen_notice(ctrl, result);
break;
case NVME_AER_ERROR:
/*
@@ -4792,7 +4798,9 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
default:
break;
}
- queue_work(nvme_wq, &ctrl->async_event_work);
+
+ if (requeue)
+ queue_work(nvme_wq, &ctrl->async_event_work);
}
EXPORT_SYMBOL_GPL(nvme_complete_async_event);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 3a1c37f32f30..98864b853eef 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3517,6 +3517,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS, },
{ PCI_DEVICE(0xc0a9, 0x540a), /* Crucial P2 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 044da18c06f5..d5871fd6f769 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -121,7 +121,6 @@ struct nvme_tcp_queue {
struct mutex send_mutex;
struct llist_head req_list;
struct list_head send_list;
- bool more_requests;
/* recv state */
void *pdu;
@@ -320,7 +319,7 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
{
return !list_empty(&queue->send_list) ||
- !llist_empty(&queue->req_list) || queue->more_requests;
+ !llist_empty(&queue->req_list);
}
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
@@ -339,9 +338,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
*/
if (queue->io_cpu == raw_smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
- queue->more_requests = !last;
nvme_tcp_send_all(queue);
- queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
}
@@ -1229,7 +1226,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
else if (unlikely(result < 0))
return;
- if (!pending)
+ if (!pending || !queue->rd_enabled)
return;
} while (!time_after(jiffies, deadline)); /* quota is exhausted */
diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c
index cf690df34775..c4113b43dbfe 100644
--- a/drivers/nvme/target/auth.c
+++ b/drivers/nvme/target/auth.c
@@ -196,6 +196,7 @@ int nvmet_setup_auth(struct nvmet_ctrl *ctrl)
if (IS_ERR(ctrl->ctrl_key)) {
ret = PTR_ERR(ctrl->ctrl_key);
ctrl->ctrl_key = NULL;
+ goto out_free_hash;
}
pr_debug("%s: using ctrl hash %s key %*ph\n", __func__,
ctrl->ctrl_key->hash > 0 ?
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a1345790005f..7f4083cf953a 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -735,6 +735,8 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
+ struct nvmet_ns *ns = req->ns;
+
if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req);
req->cqe->sq_id = cpu_to_le16(req->sq->qid);
@@ -745,9 +747,9 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
trace_nvmet_req_complete(req);
- if (req->ns)
- nvmet_put_namespace(req->ns);
req->ops->queue_response(req);
+ if (ns)
+ nvmet_put_namespace(ns);
}
void nvmet_req_complete(struct nvmet_req *req, u16 status)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index dc3b4dc8fe08..a3694a32f6d5 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1506,6 +1506,9 @@ static void nvmet_tcp_state_change(struct sock *sk)
goto done;
switch (sk->sk_state) {
+ case TCP_FIN_WAIT2:
+ case TCP_LAST_ACK:
+ break;
case TCP_FIN_WAIT1:
case TCP_CLOSE_WAIT:
case TCP_CLOSE:
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index c7ef69f29fe4..835bfda86fcf 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -100,6 +100,7 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
struct nvme_id_ns_zns *id_zns;
u64 zsze;
u16 status;
+ u32 mar, mor;
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
req->error_loc = offsetof(struct nvme_identify, nsid);
@@ -130,8 +131,20 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
zsze = (bdev_zone_sectors(req->ns->bdev) << 9) >>
req->ns->blksize_shift;
id_zns->lbafe[0].zsze = cpu_to_le64(zsze);
- id_zns->mor = cpu_to_le32(bdev_max_open_zones(req->ns->bdev));
- id_zns->mar = cpu_to_le32(bdev_max_active_zones(req->ns->bdev));
+
+ mor = bdev_max_open_zones(req->ns->bdev);
+ if (!mor)
+ mor = U32_MAX;
+ else
+ mor--;
+ id_zns->mor = cpu_to_le32(mor);
+
+ mar = bdev_max_active_zones(req->ns->bdev);
+ if (!mar)
+ mar = U32_MAX;
+ else
+ mar--;
+ id_zns->mar = cpu_to_le32(mar);
done:
status = nvmet_copy_to_sgl(req, 0, id_zns, sizeof(*id_zns));
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 7bc92923104c..1c573e7a60bc 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -314,7 +314,7 @@ static int unflatten_dt_nodes(const void *blob,
for (offset = 0;
offset >= 0 && depth >= initial_depth;
offset = fdt_next_node(blob, offset, &depth)) {
- if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH))
+ if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
continue;
if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index f223afe47d10..a66386043aa6 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1546,6 +1546,7 @@ static int __init ccio_probe(struct parisc_device *dev)
}
ccio_ioc_init(ioc);
if (ccio_init_resources(ioc)) {
+ iounmap(ioc->ioc_regs);
kfree(ioc);
return -ENOMEM;
}
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 3a8c98615634..bdef7a8d6ab8 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -221,16 +221,7 @@ static size_t irt_num_entry;
static struct irt_entry *iosapic_alloc_irt(int num_entries)
{
- unsigned long a;
-
- /* The IRT needs to be 8-byte aligned for the PDC call.
- * Normally kmalloc would guarantee larger alignment, but
- * if CONFIG_DEBUG_SLAB is enabled, then we can get only
- * 4-byte alignment on 32-bit kernels
- */
- a = (unsigned long)kmalloc(sizeof(struct irt_entry) * num_entries + 8, GFP_KERNEL);
- a = (a + 7UL) & ~7UL;
- return (struct irt_entry *)a;
+ return kcalloc(num_entries, sizeof(struct irt_entry), GFP_KERNEL);
}
/**
diff --git a/drivers/peci/controller/peci-aspeed.c b/drivers/peci/controller/peci-aspeed.c
index 1925ddc13f00..731c5d8f75c6 100644
--- a/drivers/peci/controller/peci-aspeed.c
+++ b/drivers/peci/controller/peci-aspeed.c
@@ -523,7 +523,7 @@ static int aspeed_peci_probe(struct platform_device *pdev)
return PTR_ERR(priv->base);
priv->irq = platform_get_irq(pdev, 0);
- if (!priv->irq)
+ if (priv->irq < 0)
return priv->irq;
ret = devm_request_irq(&pdev->dev, priv->irq, aspeed_peci_irq_handler,
diff --git a/drivers/peci/cpu.c b/drivers/peci/cpu.c
index 68eb61c65d34..de4a7b3e5966 100644
--- a/drivers/peci/cpu.c
+++ b/drivers/peci/cpu.c
@@ -188,8 +188,6 @@ static void adev_release(struct device *dev)
{
struct auxiliary_device *adev = to_auxiliary_dev(dev);
- auxiliary_device_uninit(adev);
-
kfree(adev->name);
kfree(adev);
}
@@ -234,6 +232,7 @@ static void unregister_adev(void *_adev)
struct auxiliary_device *adev = _adev;
auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
}
static int devm_adev_add(struct device *dev, int idx)
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 513de1f54e2d..933b96e243b8 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -117,7 +117,7 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
if (num_irqs == 1) {
int irq = platform_get_irq(pdev, 0);
- if (irq && irq_is_percpu_devid(irq))
+ if ((irq > 0) && irq_is_percpu_devid(irq))
return pmu_parse_percpu_irq(pmu, irq);
}
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 6f6681bbfd36..8de4ca2fef21 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -473,7 +473,7 @@ static int pmu_sbi_get_ctrinfo(int nctr)
if (!pmu_ctr_list)
return -ENOMEM;
- for (i = 0; i <= nctr; i++) {
+ for (i = 0; i < nctr; i++) {
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_GET_INFO, i, 0, 0, 0, 0, 0);
if (ret.error)
/* The logical counter ids are not expected to be contiguous */
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index c5fd154990c8..c7df8c5fe585 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -331,6 +331,7 @@ struct ocelot_pinctrl {
const struct ocelot_pincfg_data *pincfg_data;
struct ocelot_pmx_func func[FUNC_MAX];
u8 stride;
+ struct workqueue_struct *wq;
};
struct ocelot_match_data {
@@ -338,6 +339,11 @@ struct ocelot_match_data {
struct ocelot_pincfg_data pincfg_data;
};
+struct ocelot_irq_work {
+ struct work_struct irq_work;
+ struct irq_desc *irq_desc;
+};
+
#define LUTON_P(p, f0, f1) \
static struct ocelot_pin_caps luton_pin_##p = { \
.pin = p, \
@@ -1813,6 +1819,75 @@ static void ocelot_irq_mask(struct irq_data *data)
gpiochip_disable_irq(chip, gpio);
}
+static void ocelot_irq_work(struct work_struct *work)
+{
+ struct ocelot_irq_work *w = container_of(work, struct ocelot_irq_work, irq_work);
+ struct irq_chip *parent_chip = irq_desc_get_chip(w->irq_desc);
+ struct gpio_chip *chip = irq_desc_get_chip_data(w->irq_desc);
+ struct irq_data *data = irq_desc_get_irq_data(w->irq_desc);
+ unsigned int gpio = irqd_to_hwirq(data);
+
+ local_irq_disable();
+ chained_irq_enter(parent_chip, w->irq_desc);
+ generic_handle_domain_irq(chip->irq.domain, gpio);
+ chained_irq_exit(parent_chip, w->irq_desc);
+ local_irq_enable();
+
+ kfree(w);
+}
+
+static void ocelot_irq_unmask_level(struct irq_data *data)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
+ struct ocelot_pinctrl *info = gpiochip_get_data(chip);
+ struct irq_desc *desc = irq_data_to_desc(data);
+ unsigned int gpio = irqd_to_hwirq(data);
+ unsigned int bit = BIT(gpio % 32);
+ bool ack = false, active = false;
+ u8 trigger_level;
+ int val;
+
+ trigger_level = irqd_get_trigger_type(data);
+
+ /* Check if the interrupt line is still active. */
+ regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val);
+ if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) ||
+ (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH))
+ active = true;
+
+ /*
+ * Check if the interrupt controller has seen any changes in the
+ * interrupt line.
+ */
+ regmap_read(info->map, REG(OCELOT_GPIO_INTR, info, gpio), &val);
+ if (val & bit)
+ ack = true;
+
+ /* Enable the interrupt now */
+ gpiochip_enable_irq(chip, gpio);
+ regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio),
+ bit, bit);
+
+ /*
+ * In case the interrupt line is still active and the interrupt
+ * controller has not seen any changes in the interrupt line, then it
+ * means that there happen another interrupt while the line was active.
+ * So we missed that one, so we need to kick the interrupt again
+ * handler.
+ */
+ if (active && !ack) {
+ struct ocelot_irq_work *work;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ work->irq_desc = desc;
+ INIT_WORK(&work->irq_work, ocelot_irq_work);
+ queue_work(info->wq, &work->irq_work);
+ }
+}
+
static void ocelot_irq_unmask(struct irq_data *data)
{
struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
@@ -1836,13 +1911,12 @@ static void ocelot_irq_ack(struct irq_data *data)
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type);
-static struct irq_chip ocelot_eoi_irqchip = {
+static struct irq_chip ocelot_level_irqchip = {
.name = "gpio",
.irq_mask = ocelot_irq_mask,
- .irq_eoi = ocelot_irq_ack,
- .irq_unmask = ocelot_irq_unmask,
- .flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
- IRQCHIP_IMMUTABLE,
+ .irq_ack = ocelot_irq_ack,
+ .irq_unmask = ocelot_irq_unmask_level,
+ .flags = IRQCHIP_IMMUTABLE,
.irq_set_type = ocelot_irq_set_type,
GPIOCHIP_IRQ_RESOURCE_HELPERS
};
@@ -1859,14 +1933,9 @@ static struct irq_chip ocelot_irqchip = {
static int ocelot_irq_set_type(struct irq_data *data, unsigned int type)
{
- type &= IRQ_TYPE_SENSE_MASK;
-
- if (!(type & (IRQ_TYPE_EDGE_BOTH | IRQ_TYPE_LEVEL_HIGH)))
- return -EINVAL;
-
- if (type & IRQ_TYPE_LEVEL_HIGH)
- irq_set_chip_handler_name_locked(data, &ocelot_eoi_irqchip,
- handle_fasteoi_irq, NULL);
+ if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ irq_set_chip_handler_name_locked(data, &ocelot_level_irqchip,
+ handle_level_irq, NULL);
if (type & IRQ_TYPE_EDGE_BOTH)
irq_set_chip_handler_name_locked(data, &ocelot_irqchip,
handle_edge_irq, NULL);
@@ -1996,6 +2065,10 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
if (!info->desc)
return -ENOMEM;
+ info->wq = alloc_ordered_workqueue("ocelot_ordered", 0);
+ if (!info->wq)
+ return -ENOMEM;
+
info->pincfg_data = &data->pincfg_data;
reset = devm_reset_control_get_optional_shared(dev, "switch");
@@ -2018,7 +2091,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
dev_err(dev, "Failed to create regmap\n");
return PTR_ERR(info->map);
}
- dev_set_drvdata(dev, info->map);
+ dev_set_drvdata(dev, info);
info->dev = dev;
/* Pinconf registers */
@@ -2043,6 +2116,15 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static int ocelot_pinctrl_remove(struct platform_device *pdev)
+{
+ struct ocelot_pinctrl *info = platform_get_drvdata(pdev);
+
+ destroy_workqueue(info->wq);
+
+ return 0;
+}
+
static struct platform_driver ocelot_pinctrl_driver = {
.driver = {
.name = "pinctrl-ocelot",
@@ -2050,6 +2132,7 @@ static struct platform_driver ocelot_pinctrl_driver = {
.suppress_bind_attrs = true,
},
.probe = ocelot_pinctrl_probe,
+ .remove = ocelot_pinctrl_remove,
};
module_platform_driver(ocelot_pinctrl_driver);
MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index 6bec7f143134..704a99d2f93c 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -530,10 +530,10 @@ DECLARE_MSM_GPIO_PINS(187);
DECLARE_MSM_GPIO_PINS(188);
DECLARE_MSM_GPIO_PINS(189);
-static const unsigned int sdc2_clk_pins[] = { 190 };
-static const unsigned int sdc2_cmd_pins[] = { 191 };
-static const unsigned int sdc2_data_pins[] = { 192 };
-static const unsigned int ufs_reset_pins[] = { 193 };
+static const unsigned int ufs_reset_pins[] = { 190 };
+static const unsigned int sdc2_clk_pins[] = { 191 };
+static const unsigned int sdc2_cmd_pins[] = { 192 };
+static const unsigned int sdc2_data_pins[] = { 193 };
enum sc8180x_functions {
msm_mux_adsp_ext,
@@ -1582,7 +1582,7 @@ static const int sc8180x_acpi_reserved_gpios[] = {
static const struct msm_gpio_wakeirq_map sc8180x_pdc_map[] = {
{ 3, 31 }, { 5, 32 }, { 8, 33 }, { 9, 34 }, { 10, 100 }, { 12, 104 },
{ 24, 37 }, { 26, 38 }, { 27, 41 }, { 28, 42 }, { 30, 39 }, { 36, 43 },
- { 37, 43 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
+ { 37, 44 }, { 38, 45 }, { 39, 118 }, { 39, 125 }, { 41, 47 },
{ 42, 48 }, { 46, 50 }, { 47, 49 }, { 48, 51 }, { 49, 53 }, { 50, 52 },
{ 51, 116 }, { 51, 123 }, { 53, 54 }, { 54, 55 }, { 55, 56 },
{ 56, 57 }, { 58, 58 }, { 60, 60 }, { 68, 62 }, { 70, 63 }, { 76, 86 },
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
index afc1f5df7545..b82ad135bf2a 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-a100-r.c
@@ -99,7 +99,7 @@ MODULE_DEVICE_TABLE(of, a100_r_pinctrl_match);
static struct platform_driver a100_r_pinctrl_driver = {
.probe = a100_r_pinctrl_probe,
.driver = {
- .name = "sun50iw10p1-r-pinctrl",
+ .name = "sun50i-a100-r-pinctrl",
.of_match_table = a100_r_pinctrl_match,
},
};
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
index 55834ccb4ac7..1e071df4c9f5 100644
--- a/drivers/platform/mellanox/mlxreg-lc.c
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -460,8 +460,6 @@ static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
u32 regval;
int err;
- mutex_lock(&mlxreg_lc->lock);
-
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, &regval);
if (err)
goto regmap_read_fail;
@@ -474,7 +472,6 @@ static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, regval);
regmap_read_fail:
- mutex_unlock(&mlxreg_lc->lock);
return err;
}
@@ -491,8 +488,6 @@ static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
* line card which is already has been enabled. Disabling does not affect the disabled line
* card.
*/
- mutex_lock(&mlxreg_lc->lock);
-
err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, &regval);
if (err)
goto regmap_read_fail;
@@ -505,7 +500,6 @@ static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, regval);
regmap_read_fail:
- mutex_unlock(&mlxreg_lc->lock);
return err;
}
@@ -538,6 +532,15 @@ mlxreg_lc_sn4800_c16_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
static void
mlxreg_lc_state_update(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
{
+ if (action)
+ mlxreg_lc->state |= state;
+ else
+ mlxreg_lc->state &= ~state;
+}
+
+static void
+mlxreg_lc_state_update_locked(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
+{
mutex_lock(&mlxreg_lc->lock);
if (action)
@@ -560,8 +563,11 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
dev_info(mlxreg_lc->dev, "linecard#%d state %d event kind %d action %d\n",
mlxreg_lc->data->slot, mlxreg_lc->state, kind, action);
- if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED))
+ mutex_lock(&mlxreg_lc->lock);
+ if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED)) {
+ mutex_unlock(&mlxreg_lc->lock);
return 0;
+ }
switch (kind) {
case MLXREG_HOTPLUG_LC_SYNCED:
@@ -574,7 +580,7 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
if (!(mlxreg_lc->state & MLXREG_LC_POWERED) && action) {
err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
if (err)
- return err;
+ goto mlxreg_lc_power_on_off_fail;
}
/* In case line card is configured - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED && action)
@@ -588,12 +594,13 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
/* In case line card is configured - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
+ mutex_unlock(&mlxreg_lc->lock);
return err;
}
err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
mlxreg_lc->main_devs_num);
if (err)
- return err;
+ goto mlxreg_lc_create_static_devices_fail;
/* In case line card is already in ready state - enable it. */
if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
@@ -620,6 +627,10 @@ static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind,
break;
}
+mlxreg_lc_power_on_off_fail:
+mlxreg_lc_create_static_devices_fail:
+ mutex_unlock(&mlxreg_lc->lock);
+
return err;
}
@@ -665,7 +676,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
if (err)
goto mlxreg_lc_create_static_devices_failed;
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_POWERED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_POWERED, 1);
}
/* Verify if line card is synchronized. */
@@ -676,7 +687,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
/* Power on line card if necessary. */
if (regval & mlxreg_lc->data->mask) {
mlxreg_lc->state |= MLXREG_LC_SYNCED;
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_SYNCED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1);
if (mlxreg_lc->state & ~MLXREG_LC_POWERED) {
err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
if (err)
@@ -684,7 +695,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
}
}
- mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
return 0;
@@ -814,10 +825,9 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
mutex_init(&mlxreg_lc->lock);
/* Set event notification callback. */
- if (data->notifier) {
- data->notifier->user_handler = mlxreg_lc_event_handler;
- data->notifier->handle = mlxreg_lc;
- }
+ data->notifier->user_handler = mlxreg_lc_event_handler;
+ data->notifier->handle = mlxreg_lc;
+
data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr);
if (!data->hpdev.adapter) {
dev_err(&pdev->dev, "Failed to get adapter for bus %d\n",
@@ -863,7 +873,6 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (err) {
dev_err(&pdev->dev, "Failed to sync regmap for client %s at bus %d at addr 0x%02x\n",
data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
- err = PTR_ERR(regmap);
goto regcache_sync_fail;
}
@@ -878,16 +887,14 @@ static int mlxreg_lc_probe(struct platform_device *pdev)
if (err)
goto mlxreg_lc_config_init_fail;
- return err;
+ return 0;
mlxreg_lc_config_init_fail:
regcache_sync_fail:
regmap_write_fail:
devm_regmap_init_i2c_fail:
- if (data->hpdev.client) {
- i2c_unregister_device(data->hpdev.client);
- data->hpdev.client = NULL;
- }
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
i2c_new_device_fail:
i2c_put_adapter(data->hpdev.adapter);
data->hpdev.adapter = NULL;
@@ -905,6 +912,8 @@ static int mlxreg_lc_remove(struct platform_device *pdev)
struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
+ mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_INITIALIZED, 0);
+
/*
* Probing and removing are invoked by hotplug events raised upon line card insertion and
* removing. If probing procedure fails all data is cleared. However, hotplug event still
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index d5655f6a4a41..585911020cea 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -86,38 +86,38 @@ static const struct software_node ssam_node_bas_dtx = {
.parent = &ssam_node_root,
};
-/* HID keyboard (TID1). */
-static const struct software_node ssam_node_hid_tid1_keyboard = {
+/* HID keyboard (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_keyboard = {
.name = "ssam:01:15:01:01:00",
.parent = &ssam_node_root,
};
-/* HID pen stash (TID1; pen taken / stashed away evens). */
-static const struct software_node ssam_node_hid_tid1_penstash = {
+/* HID pen stash (SAM, TID=1; pen taken / stashed away evens). */
+static const struct software_node ssam_node_hid_sam_penstash = {
.name = "ssam:01:15:01:02:00",
.parent = &ssam_node_root,
};
-/* HID touchpad (TID1). */
-static const struct software_node ssam_node_hid_tid1_touchpad = {
+/* HID touchpad (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_touchpad = {
.name = "ssam:01:15:01:03:00",
.parent = &ssam_node_root,
};
-/* HID device instance 6 (TID1, unknown HID device). */
-static const struct software_node ssam_node_hid_tid1_iid6 = {
+/* HID device instance 6 (SAM, TID=1, HID sensor collection). */
+static const struct software_node ssam_node_hid_sam_sensors = {
.name = "ssam:01:15:01:06:00",
.parent = &ssam_node_root,
};
-/* HID device instance 7 (TID1, unknown HID device). */
-static const struct software_node ssam_node_hid_tid1_iid7 = {
+/* HID device instance 7 (SAM, TID=1, UCM UCSI HID client). */
+static const struct software_node ssam_node_hid_sam_ucm_ucsi = {
.name = "ssam:01:15:01:07:00",
.parent = &ssam_node_root,
};
-/* HID system controls (TID1). */
-static const struct software_node ssam_node_hid_tid1_sysctrl = {
+/* HID system controls (SAM, TID=1). */
+static const struct software_node ssam_node_hid_sam_sysctrl = {
.name = "ssam:01:15:01:08:00",
.parent = &ssam_node_root,
};
@@ -182,8 +182,8 @@ static const struct software_node ssam_node_hid_kip_touchpad = {
.parent = &ssam_node_hub_kip,
};
-/* HID device instance 5 (KIP hub, unknown HID device). */
-static const struct software_node ssam_node_hid_kip_iid5 = {
+/* HID device instance 5 (KIP hub, type-cover firmware update). */
+static const struct software_node ssam_node_hid_kip_fwupd = {
.name = "ssam:01:15:02:05:00",
.parent = &ssam_node_hub_kip,
};
@@ -241,12 +241,12 @@ static const struct software_node *ssam_node_group_sls[] = {
&ssam_node_bat_main,
&ssam_node_tmp_pprof,
&ssam_node_pos_tablet_switch,
- &ssam_node_hid_tid1_keyboard,
- &ssam_node_hid_tid1_penstash,
- &ssam_node_hid_tid1_touchpad,
- &ssam_node_hid_tid1_iid6,
- &ssam_node_hid_tid1_iid7,
- &ssam_node_hid_tid1_sysctrl,
+ &ssam_node_hid_sam_keyboard,
+ &ssam_node_hid_sam_penstash,
+ &ssam_node_hid_sam_touchpad,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
+ &ssam_node_hid_sam_sysctrl,
NULL,
};
@@ -278,7 +278,9 @@ static const struct software_node *ssam_node_group_sp8[] = {
&ssam_node_hid_kip_keyboard,
&ssam_node_hid_kip_penstash,
&ssam_node_hid_kip_touchpad,
- &ssam_node_hid_kip_iid5,
+ &ssam_node_hid_kip_fwupd,
+ &ssam_node_hid_sam_sensors,
+ &ssam_node_hid_sam_ucm_ucsi,
NULL,
};
@@ -325,6 +327,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Laptop Go 1 */
{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
+ /* Surface Laptop Go 2 */
+ { "MSHW0290", (unsigned long)ssam_node_group_slg1 },
+
/* Surface Laptop Studio */
{ "MSHW0123", (unsigned long)ssam_node_group_sls },
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index e0230ea0cb7e..f1259d81d86d 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -99,6 +99,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */
{KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */
{KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */
+ {KE_KEY, 0x27, {KEY_HELP} },
{KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */
{KE_IGNORE, 0x41, {KEY_MUTE} },
{KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} },
@@ -112,7 +113,13 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
{KE_IGNORE, 0x48, {KEY_VOLUMEUP} },
{KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} },
{KE_IGNORE, 0x4a, {KEY_VOLUMEDOWN} },
- {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} },
+ /*
+ * 0x61 is KEY_SWITCHVIDEOMODE. Usually this is a duplicate input event
+ * with the "Video Bus" input device events. But sometimes it is not
+ * a dup. Map it to KEY_UNKNOWN instead of using KE_IGNORE so that
+ * udev/hwdb can override it on systems where it is not a dup.
+ */
+ {KE_KEY, 0x61, {KEY_UNKNOWN} },
{KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} },
{KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} },
{KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 89b604e04d7f..eec7d0ed7cf2 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -107,7 +107,7 @@ module_param(fnlock_default, bool, 0444);
#define WMI_EVENT_MASK 0xFFFF
#define FAN_CURVE_POINTS 8
-#define FAN_CURVE_BUF_LEN (FAN_CURVE_POINTS * 2)
+#define FAN_CURVE_BUF_LEN 32
#define FAN_CURVE_DEV_CPU 0x00
#define FAN_CURVE_DEV_GPU 0x01
/* Mask to determine if setting temperature or percentage */
@@ -1118,7 +1118,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
}
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MICMUTE_LED)) {
- asus->micmute_led.name = "asus::micmute";
+ asus->micmute_led.name = "platform::micmute";
asus->micmute_led.max_brightness = 1;
asus->micmute_led.brightness = ledtrig_audio_get(LED_AUDIO_MICMUTE);
asus->micmute_led.brightness_set_blocking = micmute_led_set;
@@ -2233,8 +2233,10 @@ static int fan_curve_get_factory_default(struct asus_wmi *asus, u32 fan_dev)
curves = &asus->custom_fan_curves[fan_idx];
err = asus_wmi_evaluate_method_buf(asus->dsts_id, fan_dev, mode, buf,
FAN_CURVE_BUF_LEN);
- if (err)
+ if (err) {
+ pr_warn("%s (0x%08x) failed: %d\n", __func__, fan_dev, err);
return err;
+ }
fan_curve_copy_from_buf(curves, buf);
curves->device_id = fan_dev;
@@ -2252,9 +2254,6 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
- pr_debug("fan_curve_get_factory_default(0x%08x) failed: %d\n",
- fan_dev, err);
- /* Don't cause probe to fail on devices without fan-curves */
return 0;
}
diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c
index fb2e141f3eb8..384d0962ae93 100644
--- a/drivers/platform/x86/p2sb.c
+++ b/drivers/platform/x86/p2sb.c
@@ -42,10 +42,24 @@ static int p2sb_get_devfn(unsigned int *devfn)
return 0;
}
+/* Copy resource from the first BAR of the device in question */
static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
{
- /* Copy resource from the first BAR of the device in question */
- *mem = pdev->resource[0];
+ struct resource *bar0 = &pdev->resource[0];
+
+ /* Make sure we have no dangling pointers in the output */
+ memset(mem, 0, sizeof(*mem));
+
+ /*
+ * We copy only selected fields from the original resource.
+ * Because a PCI device will be removed soon, we may not use
+ * any allocated data, hence we may not copy any pointers.
+ */
+ mem->start = bar0->start;
+ mem->end = bar0->end;
+ mem->flags = bar0->flags;
+ mem->desc = bar0->desc;
+
return 0;
}
diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
index 154317e9910d..5c757c7f64de 100644
--- a/drivers/platform/x86/pmc_atom.c
+++ b/drivers/platform/x86/pmc_atom.c
@@ -232,7 +232,7 @@ static void pmc_power_off(void)
pm1_cnt_port = acpi_base_addr + PM1_CNT;
pm1_cnt_value = inl(pm1_cnt_port);
- pm1_cnt_value &= SLEEP_TYPE_MASK;
+ pm1_cnt_value &= ~SLEEP_TYPE_MASK;
pm1_cnt_value |= SLEEP_TYPE_S5;
pm1_cnt_value |= SLEEP_ENABLE;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 22d4e8633e30..2dbb9fc011a7 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -10592,10 +10592,9 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
/* Ensure initial values are correct */
dytc_profile_refresh();
- /* Set AMT correctly now we know current profile */
- if ((dytc_capabilities & BIT(DYTC_FC_PSC)) &&
- (dytc_capabilities & BIT(DYTC_FC_AMT)))
- dytc_control_amt(dytc_current_profile == PLATFORM_PROFILE_BALANCED);
+ /* Workaround for https://bugzilla.kernel.org/show_bug.cgi?id=216347 */
+ if (dytc_capabilities & BIT(DYTC_FC_PSC))
+ dytc_profile_set(NULL, PLATFORM_PROFILE_BALANCED);
return 0;
}
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
index 480375977435..4acd6fa8d43b 100644
--- a/drivers/platform/x86/x86-android-tablets.c
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -663,9 +663,23 @@ static const struct x86_i2c_client_info chuwi_hi8_i2c_clients[] __initconst = {
},
};
+static int __init chuwi_hi8_init(void)
+{
+ /*
+ * Avoid the acpi_unregister_gsi() call in x86_acpi_irq_helper_get()
+ * breaking the touchscreen + logging various errors when the Windows
+ * BIOS is used.
+ */
+ if (acpi_dev_present("MSSL0001", NULL, 1))
+ return -ENODEV;
+
+ return 0;
+}
+
static const struct x86_dev_info chuwi_hi8_info __initconst = {
.i2c_client_info = chuwi_hi8_i2c_clients,
.i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients),
+ .init = chuwi_hi8_init,
};
#define CZC_EC_EXTRA_PORT 0x68
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d8373cb04f90..d3e8dc32832d 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2733,13 +2733,18 @@ static int _regulator_do_enable(struct regulator_dev *rdev)
*/
static int _regulator_handle_consumer_enable(struct regulator *regulator)
{
+ int ret;
struct regulator_dev *rdev = regulator->rdev;
lockdep_assert_held_once(&rdev->mutex.base);
regulator->enable_count++;
- if (regulator->uA_load && regulator->enable_count == 1)
- return drms_uA_update(rdev);
+ if (regulator->uA_load && regulator->enable_count == 1) {
+ ret = drms_uA_update(rdev);
+ if (ret)
+ regulator->enable_count--;
+ return ret;
+ }
return 0;
}
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 6b617024a67d..d899d6e98fb8 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -766,7 +766,7 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
((pfuze_chip->chip_id == PFUZE3000) ? "3000" : "3001"))));
memcpy(pfuze_chip->regulator_descs, pfuze_chip->pfuze_regulators,
- sizeof(pfuze_chip->regulator_descs));
+ regulator_num * sizeof(struct pfuze_regulator));
ret = pfuze_parse_regulators_dt(pfuze_chip);
if (ret)
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 0738238ed6cc..9857dba09c95 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -182,6 +182,15 @@ void scsi_remove_host(struct Scsi_Host *shost)
mutex_unlock(&shost->scan_mutex);
scsi_proc_host_rm(shost);
+ /*
+ * New SCSI devices cannot be attached anymore because of the SCSI host
+ * state so drop the tag set refcnt. Wait until the tag set refcnt drops
+ * to zero because .exit_cmd_priv implementations may need the host
+ * pointer.
+ */
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
+ wait_for_completion(&shost->tagset_freed);
+
spin_lock_irqsave(shost->host_lock, flags);
if (scsi_host_set_state(shost, SHOST_DEL))
BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
@@ -190,15 +199,6 @@ void scsi_remove_host(struct Scsi_Host *shost)
transport_unregister_device(&shost->shost_gendev);
device_unregister(&shost->shost_dev);
device_del(&shost->shost_gendev);
-
- /*
- * After scsi_remove_host() has returned the scsi LLD module can be
- * unloaded and/or the host resources can be released. Hence wait until
- * the dependent SCSI targets and devices are gone before returning.
- */
- wait_event(shost->targets_wq, atomic_read(&shost->target_count) == 0);
-
- scsi_mq_destroy_tags(shost);
}
EXPORT_SYMBOL(scsi_remove_host);
@@ -254,6 +254,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
if (error)
goto fail;
+ kref_init(&shost->tagset_refcnt);
+ init_completion(&shost->tagset_freed);
+
/*
* Increase usage count temporarily here so that calling
* scsi_autopm_put_host() will trigger runtime idle if there is
@@ -309,8 +312,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
return error;
/*
- * Any resources associated with the SCSI host in this function except
- * the tag set will be freed by scsi_host_dev_release().
+ * Any host allocation in this function will be freed in
+ * scsi_host_dev_release().
*/
out_del_dev:
device_del(&shost->shost_dev);
@@ -326,7 +329,7 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
- scsi_mq_destroy_tags(shost);
+ kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
fail:
return error;
}
@@ -406,7 +409,6 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
INIT_LIST_HEAD(&shost->starved_list);
init_waitqueue_head(&shost->host_wait);
mutex_init(&shost->scan_mutex);
- init_waitqueue_head(&shost->targets_wq);
index = ida_alloc(&host_index_ida, GFP_KERNEL);
if (index < 0) {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c69c5a0979ec..55a1ad6eed03 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -8053,7 +8053,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Allocate device driver memory */
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
if (rc)
- return -ENOMEM;
+ goto out_destroy_workqueue;
/* IF Type 2 ports get initialized now. */
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
@@ -8481,6 +8481,9 @@ out_free_bsmbx:
lpfc_destroy_bootstrap_mbox(phba);
out_free_mem:
lpfc_mem_free(phba);
+out_destroy_workqueue:
+ destroy_workqueue(phba->wq);
+ phba->wq = NULL;
return rc;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 084c0f9fdc3a..938a5e435943 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4272,7 +4272,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
@@ -4562,7 +4562,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
- cmd->result = DID_REQUEUE << 16;
+ cmd->result = DID_TRANSPORT_DISRUPTED << 16;
break;
}
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index def37a7e5980..bd6a5f1bd532 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3670,6 +3670,7 @@ static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
fw_event = list_first_entry(&ioc->fw_event_list,
struct fw_event_work, list);
list_del_init(&fw_event->list);
+ fw_event_work_put(fw_event);
}
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
@@ -3751,7 +3752,6 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
if (cancel_work_sync(&fw_event->work))
fw_event_work_put(fw_event);
- fw_event_work_put(fw_event);
}
ioc->fw_events_cleanup = 0;
}
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 086ec5b5862d..c59eac7a32f2 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -586,13 +586,10 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
- /*
- * Decreasing the module reference count before the device reference
- * count is safe since scsi_remove_host() only returns after all
- * devices have been removed.
- */
- module_put(sdev->host->hostt->module);
+ struct module *mod = sdev->host->hostt->module;
+
put_device(&sdev->sdev_gendev);
+ module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ef08029a0079..96e7e3eaca29 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1983,9 +1983,13 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost)
return blk_mq_alloc_tag_set(tag_set);
}
-void scsi_mq_destroy_tags(struct Scsi_Host *shost)
+void scsi_mq_free_tags(struct kref *kref)
{
+ struct Scsi_Host *shost = container_of(kref, typeof(*shost),
+ tagset_refcnt);
+
blk_mq_free_tag_set(&shost->tag_set);
+ complete(&shost->tagset_freed);
}
/**
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 429663bd78ec..f385b3f04d6e 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -94,7 +94,7 @@ extern void scsi_run_host_queues(struct Scsi_Host *shost);
extern void scsi_requeue_run_queue(struct work_struct *work);
extern void scsi_start_queue(struct scsi_device *sdev);
extern int scsi_mq_setup_tags(struct Scsi_Host *shost);
-extern void scsi_mq_destroy_tags(struct Scsi_Host *shost);
+extern void scsi_mq_free_tags(struct kref *kref);
extern void scsi_exit_queue(void);
extern void scsi_evt_thread(struct work_struct *work);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index ac6059702d13..5d27f5196de6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -340,6 +340,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev);
goto out;
}
+ kref_get(&sdev->host->tagset_refcnt);
sdev->request_queue = q;
q->queuedata = sdev;
__scsi_init_queue(sdev->host, q);
@@ -406,14 +407,9 @@ static void scsi_target_destroy(struct scsi_target *starget)
static void scsi_target_dev_release(struct device *dev)
{
struct device *parent = dev->parent;
- struct Scsi_Host *shost = dev_to_shost(parent);
struct scsi_target *starget = to_scsi_target(dev);
kfree(starget);
-
- if (atomic_dec_return(&shost->target_count) == 0)
- wake_up(&shost->targets_wq);
-
put_device(parent);
}
@@ -526,10 +522,6 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
starget->state = STARGET_CREATED;
starget->scsi_level = SCSI_2;
starget->max_target_blocked = SCSI_DEFAULT_TARGET_BLOCKED;
- init_waitqueue_head(&starget->sdev_wq);
-
- atomic_inc(&shost->target_count);
-
retry:
spin_lock_irqsave(shost->host_lock, flags);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 9dad2fd5297f..5d61f58399dc 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -443,15 +443,18 @@ static void scsi_device_cls_release(struct device *class_dev)
static void scsi_device_dev_release_usercontext(struct work_struct *work)
{
- struct scsi_device *sdev = container_of(work, struct scsi_device,
- ew.work);
- struct scsi_target *starget = sdev->sdev_target;
+ struct scsi_device *sdev;
struct device *parent;
struct list_head *this, *tmp;
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
unsigned long flags;
+ struct module *mod;
+
+ sdev = container_of(work, struct scsi_device, ew.work);
+
+ mod = sdev->host->hostt->module;
scsi_dh_release_device(sdev);
@@ -513,16 +516,19 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree(sdev->inquiry);
kfree(sdev);
- if (starget && atomic_dec_return(&starget->sdev_count) == 0)
- wake_up(&starget->sdev_wq);
-
if (parent)
put_device(parent);
+ module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
+
+ /* Set module pointer as NULL in case of module unloading */
+ if (!try_module_get(sdp->host->hostt->module))
+ sdp->host->hostt->module = NULL;
+
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
@@ -1470,6 +1476,7 @@ void __scsi_remove_device(struct scsi_device *sdev)
mutex_unlock(&sdev->state_mutex);
blk_mq_destroy_queue(sdev->request_queue);
+ kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
cancel_work_sync(&sdev->requeue_work);
if (sdev->host->hostt->slave_destroy)
@@ -1529,14 +1536,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
-
- /*
- * After scsi_remove_target() returns its caller can remove resources
- * associated with @starget, e.g. an rport or session. Wait until all
- * devices associated with @starget have been removed to prevent that
- * a SCSI error handling callback function triggers a use-after-free.
- */
- wait_event(starget->sdev_wq, atomic_read(&starget->sdev_count) == 0);
}
/**
@@ -1647,9 +1646,6 @@ void scsi_sysfs_device_initialize(struct scsi_device *sdev)
list_add_tail(&sdev->same_target_siblings, &starget->devices);
list_add_tail(&sdev->siblings, &shost->__devices);
spin_unlock_irqrestore(shost->host_lock, flags);
-
- atomic_inc(&starget->sdev_count);
-
/*
* device can now only be removed via __scsi_remove_device() so hold
* the target. Target will be held in CREATED state until something
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index d6b30d521307..775da69b8efa 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -684,13 +684,14 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
const struct of_device_id *of_id = NULL;
struct device_node *dn;
void __iomem *base;
- int ret, i;
+ int ret, i, s;
/* AON ctrl registers */
base = brcmstb_ioremap_match(aon_ctrl_dt_ids, 0, NULL);
if (IS_ERR(base)) {
pr_err("error mapping AON_CTRL\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto aon_err;
}
ctrl.aon_ctrl_base = base;
@@ -700,8 +701,10 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
/* Assume standard offset */
ctrl.aon_sram = ctrl.aon_ctrl_base +
AON_CTRL_SYSTEM_DATA_RAM_OFS;
+ s = 0;
} else {
ctrl.aon_sram = base;
+ s = 1;
}
writel_relaxed(0, ctrl.aon_sram + AON_REG_PANIC);
@@ -711,7 +714,8 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
(const void **)&ddr_phy_data);
if (IS_ERR(base)) {
pr_err("error mapping DDR PHY\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_phy_err;
}
ctrl.support_warm_boot = ddr_phy_data->supports_warm_boot;
ctrl.pll_status_offset = ddr_phy_data->pll_status_offset;
@@ -731,17 +735,20 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
for_each_matching_node(dn, ddr_shimphy_dt_ids) {
i = ctrl.num_memc;
if (i >= MAX_NUM_MEMC) {
+ of_node_put(dn);
pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC);
break;
}
base = of_io_request_and_map(dn, 0, dn->full_name);
if (IS_ERR(base)) {
+ of_node_put(dn);
if (!ctrl.support_warm_boot)
break;
pr_err("error mapping DDR SHIMPHY %d\n", i);
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto ddr_shimphy_err;
}
ctrl.memcs[i].ddr_shimphy_base = base;
ctrl.num_memc++;
@@ -752,14 +759,18 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
for_each_matching_node(dn, brcmstb_memc_of_match) {
base = of_iomap(dn, 0);
if (!base) {
+ of_node_put(dn);
pr_err("error mapping DDR Sequencer %d\n", i);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto brcmstb_memc_err;
}
of_id = of_match_node(brcmstb_memc_of_match, dn);
if (!of_id) {
iounmap(base);
- return -EINVAL;
+ of_node_put(dn);
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ddr_seq_data = of_id->data;
@@ -779,21 +790,24 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
dn = of_find_matching_node(NULL, sram_dt_ids);
if (!dn) {
pr_err("SRAM not found\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto brcmstb_memc_err;
}
ret = brcmstb_init_sram(dn);
of_node_put(dn);
if (ret) {
pr_err("error setting up SRAM for PM\n");
- return ret;
+ goto brcmstb_memc_err;
}
ctrl.pdev = pdev;
ctrl.s3_params = kmalloc(sizeof(*ctrl.s3_params), GFP_KERNEL);
- if (!ctrl.s3_params)
- return -ENOMEM;
+ if (!ctrl.s3_params) {
+ ret = -ENOMEM;
+ goto s3_params_err;
+ }
ctrl.s3_params_pa = dma_map_single(&pdev->dev, ctrl.s3_params,
sizeof(*ctrl.s3_params),
DMA_TO_DEVICE);
@@ -813,7 +827,21 @@ static int brcmstb_pm_probe(struct platform_device *pdev)
out:
kfree(ctrl.s3_params);
-
+s3_params_err:
+ iounmap(ctrl.boot_sram);
+brcmstb_memc_err:
+ for (i--; i >= 0; i--)
+ iounmap(ctrl.memcs[i].ddr_ctrl);
+ddr_shimphy_err:
+ for (i = 0; i < ctrl.num_memc; i++)
+ iounmap(ctrl.memcs[i].ddr_shimphy_base);
+
+ iounmap(ctrl.memcs[0].ddr_phy_base);
+ddr_phy_err:
+ iounmap(ctrl.aon_ctrl_base);
+ if (s)
+ iounmap(ctrl.aon_sram);
+aon_err:
pr_warn("PM: initialization failed with code %d\n", ret);
return ret;
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 07d52cafbb31..fcec6ed83d5e 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
select SOC_BUS
+ select FSL_GUTS
select DIMLIB
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 6383a4edc360..88aee59730e3 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -335,6 +335,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
}
}
+ reset_control_assert(domain->reset);
+
/* Enable reset clocks for all devices in the domain */
ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
if (ret) {
@@ -342,7 +344,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
goto out_regulator_disable;
}
- reset_control_assert(domain->reset);
+ /* delays for reset to propagate */
+ udelay(5);
if (domain->bits.pxx) {
/* request the domain to power up */
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
index dff7529268e4..972f289d300a 100644
--- a/drivers/soc/imx/imx8m-blk-ctrl.c
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -243,7 +243,6 @@ static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
ret = PTR_ERR(domain->power_dev);
goto cleanup_pds;
}
- dev_set_name(domain->power_dev, "%s", data->name);
domain->genpd.name = data->name;
domain->genpd.power_on = imx8m_blk_ctrl_power_on;
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index 9df970eeca45..3a992a6478c3 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -169,7 +169,7 @@ struct qcom_swrm_ctrl {
u8 wcmd_id;
struct qcom_swrm_port_config pconfig[QCOM_SDW_MAX_PORTS];
struct sdw_stream_runtime *sruntime[SWRM_MAX_DAIS];
- enum sdw_slave_status status[SDW_MAX_DEVICES];
+ enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
int (*reg_read)(struct qcom_swrm_ctrl *ctrl, int reg, u32 *val);
int (*reg_write)(struct qcom_swrm_ctrl *ctrl, int reg, int val);
u32 slave_status;
@@ -420,7 +420,7 @@ static int qcom_swrm_get_alert_slave_dev_num(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
- for (dev_num = 0; dev_num < SDW_MAX_DEVICES; dev_num++) {
+ for (dev_num = 0; dev_num <= SDW_MAX_DEVICES; dev_num++) {
status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ));
if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) {
@@ -440,7 +440,7 @@ static void qcom_swrm_get_device_status(struct qcom_swrm_ctrl *ctrl)
ctrl->reg_read(ctrl, SWRM_MCP_SLV_STATUS, &val);
ctrl->slave_status = val;
- for (i = 0; i < SDW_MAX_DEVICES; i++) {
+ for (i = 0; i <= SDW_MAX_DEVICES; i++) {
u32 s;
s = (val >> (i * 2));
@@ -1356,10 +1356,6 @@ static int qcom_swrm_probe(struct platform_device *pdev)
ctrl->bus.compute_params = &qcom_swrm_compute_params;
ctrl->bus.clk_stop_timeout = 300;
- ctrl->audio_cgcr = devm_reset_control_get_exclusive(dev, "swr_audio_cgcr");
- if (IS_ERR(ctrl->audio_cgcr))
- dev_err(dev, "Failed to get audio_cgcr reset required for soundwire-v1.6.0\n");
-
ret = qcom_swrm_get_port_config(ctrl);
if (ret)
goto err_clk;
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
index 267342dfa738..2dcbe166df63 100644
--- a/drivers/spi/spi-bitbang-txrx.h
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -116,6 +116,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+ u8 rxbit = bits - 1;
u32 oldbit = !(word & 1);
/* clock starts at inactive polarity */
for (; likely(bits); bits--) {
@@ -135,7 +136,7 @@ bitbang_txrx_le_cpha0(struct spi_device *spi,
/* sample LSB (from slave) on leading edge */
word >>= 1;
if ((flags & SPI_MASTER_NO_RX) == 0)
- word |= getmiso(spi) << (bits - 1);
+ word |= getmiso(spi) << rxbit;
setsck(spi, cpol);
}
return word;
@@ -148,6 +149,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
{
/* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+ u8 rxbit = bits - 1;
u32 oldbit = !(word & 1);
/* clock starts at inactive polarity */
for (; likely(bits); bits--) {
@@ -168,7 +170,7 @@ bitbang_txrx_le_cpha1(struct spi_device *spi,
/* sample LSB (from slave) on trailing edge */
word >>= 1;
if ((flags & SPI_MASTER_NO_RX) == 0)
- word |= getmiso(spi) << (bits - 1);
+ word |= getmiso(spi) << rxbit;
}
return word;
}
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 72b1a5a2298c..e12ab5b43f34 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -39,6 +39,7 @@
#define CQSPI_DISABLE_DAC_MODE BIT(1)
#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
#define CQSPI_NO_SUPPORT_WR_COMPLETION BIT(3)
+#define CQSPI_SLOW_SRAM BIT(4)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
@@ -87,6 +88,7 @@ struct cqspi_st {
bool use_dma_read;
u32 pd_dev_id;
bool wr_completion;
+ bool slow_sram;
};
struct cqspi_driver_platdata {
@@ -333,7 +335,10 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
}
}
- irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+ else if (!cqspi->slow_sram)
+ irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
+ else
+ irq_status &= CQSPI_REG_IRQ_WATERMARK | CQSPI_IRQ_MASK_WR;
if (irq_status)
complete(&cqspi->transfer_complete);
@@ -673,7 +678,18 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
/* Clear all interrupts. */
writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
- writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+ /*
+ * On SoCFPGA platform reading the SRAM is slow due to
+ * hardware limitation and causing read interrupt storm to CPU,
+ * so enabling only watermark interrupt to disable all read
+ * interrupts later as we want to run "bytes to read" loop with
+ * all the read interrupts disabled for max performance.
+ */
+
+ if (!cqspi->slow_sram)
+ writel(CQSPI_IRQ_MASK_RD, reg_base + CQSPI_REG_IRQMASK);
+ else
+ writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
reinit_completion(&cqspi->transfer_complete);
writel(CQSPI_REG_INDIRECTRD_START_MASK,
@@ -684,6 +700,13 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS)))
ret = -ETIMEDOUT;
+ /*
+ * Disable all read interrupts until
+ * we are out of "bytes to read"
+ */
+ if (cqspi->slow_sram)
+ writel(0x0, reg_base + CQSPI_REG_IRQMASK);
+
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
if (ret && bytes_to_read == 0) {
@@ -715,8 +738,11 @@ static int cqspi_indirect_read_execute(struct cqspi_flash_pdata *f_pdata,
bytes_to_read = cqspi_get_rd_sram_level(cqspi);
}
- if (remaining > 0)
+ if (remaining > 0) {
reinit_completion(&cqspi->transfer_complete);
+ if (cqspi->slow_sram)
+ writel(CQSPI_REG_IRQ_WATERMARK, reg_base + CQSPI_REG_IRQMASK);
+ }
}
/* Check indirect done status */
@@ -1667,6 +1693,8 @@ static int cqspi_probe(struct platform_device *pdev)
cqspi->use_dma_read = true;
if (ddata->quirks & CQSPI_NO_SUPPORT_WR_COMPLETION)
cqspi->wr_completion = false;
+ if (ddata->quirks & CQSPI_SLOW_SRAM)
+ cqspi->slow_sram = true;
if (of_device_is_compatible(pdev->dev.of_node,
"xlnx,versal-ospi-1.0"))
@@ -1779,7 +1807,9 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
};
static const struct cqspi_driver_platdata socfpga_qspi = {
- .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_NO_SUPPORT_WR_COMPLETION,
+ .quirks = CQSPI_DISABLE_DAC_MODE
+ | CQSPI_NO_SUPPORT_WR_COMPLETION
+ | CQSPI_SLOW_SRAM,
};
static const struct cqspi_driver_platdata versal_ospi = {
diff --git a/drivers/spi/spi-mux.c b/drivers/spi/spi-mux.c
index f5d32ec4634e..0709e987bd5a 100644
--- a/drivers/spi/spi-mux.c
+++ b/drivers/spi/spi-mux.c
@@ -161,6 +161,7 @@ static int spi_mux_probe(struct spi_device *spi)
ctlr->num_chipselect = mux_control_states(priv->mux);
ctlr->bus_num = -1;
ctlr->dev.of_node = spi->dev.of_node;
+ ctlr->must_async = true;
ret = devm_spi_register_controller(&spi->dev, ctlr);
if (ret)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 83da8862b8f2..32c01e684af3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1727,8 +1727,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
- if (!ret)
- kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
ctlr->cur_msg = NULL;
ctlr->fallback = false;
@@ -4033,7 +4032,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
* guard against reentrancy from a different context. The io_mutex
* will catch those cases.
*/
- if (READ_ONCE(ctlr->queue_empty)) {
+ if (READ_ONCE(ctlr->queue_empty) && !ctlr->must_async) {
message->actual_length = 0;
message->status = -EINPROGRESS;
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index cac9553666e6..aa100b5141e1 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -18,6 +18,7 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
MODULE_AUTHOR("Realtek Semiconductor Corp.");
MODULE_VERSION(DRIVERVERSION);
+MODULE_FIRMWARE("rtlwifi/rtl8188eufw.bin");
#define CONFIG_BR_EXT_BRNAME "br0"
#define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index cc2b44f60c46..9147d176da4f 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -28,6 +28,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
/*=== Realtek demoboard ===*/
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill USB-N150 Nano */
/*=== Customer ID ===*/
/****** 8188EUS ********/
{USB_DEVICE(0x07B8, 0x8179)}, /* Abocom - Abocom */
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 2326aae6709e..bb7db96ed821 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -117,34 +117,6 @@ static void r871x_internal_cmd_hdl(struct _adapter *padapter, u8 *pbuf)
kfree(pdrvcmd->pbuf);
}
-static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
-static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
-{
- void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
- struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
-
- /* invoke cmd->callback function */
- pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (!pcmd_callback)
- r8712_free_cmd_obj(pcmd);
- else
- pcmd_callback(padapter, pcmd);
- return H2C_SUCCESS;
-}
-
static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
{
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
@@ -213,14 +185,6 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
pcmd_r = NULL;
switch (pcmd->cmdcode) {
- case GEN_CMD_CODE(_Read_MACREG):
- read_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
- case GEN_CMD_CODE(_Write_MACREG):
- write_macreg_hdl(padapter, (u8 *)pcmd);
- pcmd_r = pcmd;
- break;
case GEN_CMD_CODE(_Read_BBREG):
read_bbreg_hdl(padapter, (u8 *)pcmd);
break;
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 1175f3a46859..27295bda3e0b 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -9,6 +9,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
+#include <linux/uaccess.h>
#include <linux/uio.h>
#include "tee_private.h"
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig
index e76a6c173637..f12d0a3ee3e2 100644
--- a/drivers/thunderbolt/Kconfig
+++ b/drivers/thunderbolt/Kconfig
@@ -29,8 +29,7 @@ config USB4_DEBUGFS_WRITE
config USB4_KUNIT_TEST
bool "KUnit tests" if !KUNIT_ALL_TESTS
- depends on (USB4=m || KUNIT=y)
- depends on KUNIT
+ depends on USB4 && KUNIT=y
default KUNIT_ALL_TESTS
config USB4_DMA_TEST
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index e5ede5debfb0..0c661a706160 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -407,7 +407,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
static int tb_async_error(const struct ctl_pkg *pkg)
{
- const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg;
+ const struct cfg_error_pkg *error = pkg->buffer;
if (pkg->frame.eof != TB_CFG_PKG_ERROR)
return false;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 244f8cd38b25..c63c1f4ff9dc 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -3786,14 +3786,18 @@ int tb_switch_pcie_l1_enable(struct tb_switch *sw)
*/
int tb_switch_xhci_connect(struct tb_switch *sw)
{
- bool usb_port1, usb_port3, xhci_port1, xhci_port3;
struct tb_port *port1, *port3;
int ret;
+ if (sw->generation != 3)
+ return 0;
+
port1 = &sw->ports[1];
port3 = &sw->ports[3];
if (tb_switch_is_alpine_ridge(sw)) {
+ bool usb_port1, usb_port3, xhci_port1, xhci_port3;
+
usb_port1 = tb_lc_is_usb_plugged(port1);
usb_port3 = tb_lc_is_usb_plugged(port3);
xhci_port1 = tb_lc_is_xhci_connected(port1);
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index caa5c14ed57f..01c112e2e214 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -248,7 +248,7 @@ struct gsm_mux {
bool constipated; /* Asked by remote to shut up */
bool has_devices; /* Devices were registered */
- spinlock_t tx_lock;
+ struct mutex tx_mutex;
unsigned int tx_bytes; /* TX data outstanding */
#define TX_THRESH_HI 8192
#define TX_THRESH_LO 2048
@@ -256,7 +256,7 @@ struct gsm_mux {
struct list_head tx_data_list; /* Pending data packets */
/* Control messages */
- struct timer_list kick_timer; /* Kick TX queuing on timeout */
+ struct delayed_work kick_timeout; /* Kick TX queuing on timeout */
struct timer_list t2_timer; /* Retransmit timer for commands */
int cretries; /* Command retry counter */
struct gsm_control *pending_cmd;/* Our current pending command */
@@ -680,7 +680,6 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
struct gsm_msg *msg;
u8 *dp;
int ocr;
- unsigned long flags;
msg = gsm_data_alloc(gsm, addr, 0, control);
if (!msg)
@@ -702,10 +701,10 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control)
gsm_print_packet("Q->", addr, cr, control, NULL, 0);
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
list_add_tail(&msg->list, &gsm->tx_ctrl_list);
gsm->tx_bytes += msg->len;
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
gsmld_write_trigger(gsm);
return 0;
@@ -730,7 +729,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
spin_unlock_irqrestore(&dlci->lock, flags);
/* Clear data packets in MUX write queue */
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) {
if (msg->addr != addr)
continue;
@@ -738,7 +737,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci)
list_del(&msg->list);
kfree(msg);
}
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
}
/**
@@ -1009,7 +1008,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
gsm->tx_bytes += msg->len;
gsmld_write_trigger(gsm);
- mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100);
+ schedule_delayed_work(&gsm->kick_timeout, 10 * gsm->t1 * HZ / 100);
}
/**
@@ -1024,10 +1023,9 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
{
- unsigned long flags;
- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ mutex_lock(&dlci->gsm->tx_mutex);
__gsm_data_queue(dlci, msg);
- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ mutex_unlock(&dlci->gsm->tx_mutex);
}
/**
@@ -1039,7 +1037,7 @@ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg)
* is data. Keep to the MRU of the mux. This path handles the usual tty
* interface which is a byte stream with optional modem data.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
@@ -1099,7 +1097,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
* is data. Keep to the MRU of the mux. This path handles framed data
* queued as skbuffs to the DLCI.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
@@ -1115,7 +1113,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
if (dlci->adaption == 4)
overhead = 1;
- /* dlci->skb is locked by tx_lock */
+ /* dlci->skb is locked by tx_mutex */
if (dlci->skb == NULL) {
dlci->skb = skb_dequeue_tail(&dlci->skb_list);
if (dlci->skb == NULL)
@@ -1169,7 +1167,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm,
* Push an empty frame in to the transmit queue to update the modem status
* bits and to transmit an optional break.
*
- * Caller must hold the tx_lock of the mux.
+ * Caller must hold the tx_mutex of the mux.
*/
static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci,
@@ -1283,13 +1281,12 @@ static int gsm_dlci_data_sweep(struct gsm_mux *gsm)
static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
{
- unsigned long flags;
int sweep;
if (dlci->constipated)
return;
- spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
+ mutex_lock(&dlci->gsm->tx_mutex);
/* If we have nothing running then we need to fire up */
sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO);
if (dlci->gsm->tx_bytes == 0) {
@@ -1300,7 +1297,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
}
if (sweep)
gsm_dlci_data_sweep(dlci->gsm);
- spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
+ mutex_unlock(&dlci->gsm->tx_mutex);
}
/*
@@ -1984,24 +1981,23 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len)
}
/**
- * gsm_kick_timer - transmit if possible
- * @t: timer contained in our gsm object
+ * gsm_kick_timeout - transmit if possible
+ * @work: work contained in our gsm object
*
* Transmit data from DLCIs if the queue is empty. We can't rely on
* a tty wakeup except when we filled the pipe so we need to fire off
* new data ourselves in other cases.
*/
-static void gsm_kick_timer(struct timer_list *t)
+static void gsm_kick_timeout(struct work_struct *work)
{
- struct gsm_mux *gsm = from_timer(gsm, t, kick_timer);
- unsigned long flags;
+ struct gsm_mux *gsm = container_of(work, struct gsm_mux, kick_timeout.work);
int sent = 0;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
/* If we have nothing running then we need to fire up */
if (gsm->tx_bytes < TX_THRESH_LO)
sent = gsm_dlci_data_sweep(gsm);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
if (sent && debug & 4)
pr_info("%s TX queue stalled\n", __func__);
@@ -2458,7 +2454,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
}
/* Finish outstanding timers, making sure they are done */
- del_timer_sync(&gsm->kick_timer);
+ cancel_delayed_work_sync(&gsm->kick_timeout);
del_timer_sync(&gsm->t2_timer);
/* Finish writing to ldisc */
@@ -2501,13 +2497,6 @@ static int gsm_activate_mux(struct gsm_mux *gsm)
if (dlci == NULL)
return -ENOMEM;
- timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
- timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
- INIT_WORK(&gsm->tx_work, gsmld_write_task);
- init_waitqueue_head(&gsm->event);
- spin_lock_init(&gsm->control_lock);
- spin_lock_init(&gsm->tx_lock);
-
if (gsm->encoding == 0)
gsm->receive = gsm0_receive;
else
@@ -2538,6 +2527,7 @@ static void gsm_free_mux(struct gsm_mux *gsm)
break;
}
}
+ mutex_destroy(&gsm->tx_mutex);
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
@@ -2609,9 +2599,15 @@ static struct gsm_mux *gsm_alloc_mux(void)
}
spin_lock_init(&gsm->lock);
mutex_init(&gsm->mutex);
+ mutex_init(&gsm->tx_mutex);
kref_init(&gsm->ref);
INIT_LIST_HEAD(&gsm->tx_ctrl_list);
INIT_LIST_HEAD(&gsm->tx_data_list);
+ INIT_DELAYED_WORK(&gsm->kick_timeout, gsm_kick_timeout);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
+ INIT_WORK(&gsm->tx_work, gsmld_write_task);
+ init_waitqueue_head(&gsm->event);
+ spin_lock_init(&gsm->control_lock);
gsm->t1 = T1;
gsm->t2 = T2;
@@ -2636,6 +2632,7 @@ static struct gsm_mux *gsm_alloc_mux(void)
}
spin_unlock(&gsm_mux_lock);
if (i == MAX_MUX) {
+ mutex_destroy(&gsm->tx_mutex);
mutex_destroy(&gsm->mutex);
kfree(gsm->txframe);
kfree(gsm->buf);
@@ -2791,17 +2788,16 @@ static void gsmld_write_trigger(struct gsm_mux *gsm)
static void gsmld_write_task(struct work_struct *work)
{
struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work);
- unsigned long flags;
int i, ret;
/* All outstanding control channel and control messages and one data
* frame is sent.
*/
ret = -ENODEV;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
if (gsm->tty)
ret = gsm_data_kick(gsm);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
if (ret >= 0)
for (i = 0; i < NUM_DLCI; i++)
@@ -2858,7 +2854,8 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
flags = *fp++;
switch (flags) {
case TTY_NORMAL:
- gsm->receive(gsm, *cp);
+ if (gsm->receive)
+ gsm->receive(gsm, *cp);
break;
case TTY_OVERRUN:
case TTY_BREAK:
@@ -2946,10 +2943,6 @@ static int gsmld_open(struct tty_struct *tty)
gsmld_attach_gsm(tty, gsm);
- timer_setup(&gsm->kick_timer, gsm_kick_timer, 0);
- timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
- INIT_WORK(&gsm->tx_work, gsmld_write_task);
-
return 0;
}
@@ -3012,7 +3005,6 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr)
{
struct gsm_mux *gsm = tty->disc_data;
- unsigned long flags;
int space;
int ret;
@@ -3020,13 +3012,13 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file,
return -ENODEV;
ret = -ENOBUFS;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
space = tty_write_room(tty);
if (space >= nr)
ret = tty->ops->write(tty, buf, nr);
else
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
return ret;
}
@@ -3323,14 +3315,13 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk)
{
struct gsm_mux *gsm = dlci->gsm;
- unsigned long flags;
if (dlci->state != DLCI_OPEN || dlci->adaption != 2)
return;
- spin_lock_irqsave(&gsm->tx_lock, flags);
+ mutex_lock(&gsm->tx_mutex);
gsm_dlci_modem_output(gsm, dlci, brk);
- spin_unlock_irqrestore(&gsm->tx_lock, flags);
+ mutex_unlock(&gsm->tx_mutex);
}
/**
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 30ba9eef7b39..7450d3853031 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -294,9 +294,6 @@ static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
mode = atmel_uart_readl(port, ATMEL_US_MR);
- /* Resetting serial mode to RS232 (0x0) */
- mode &= ~ATMEL_US_USMODE;
-
if (rs485conf->flags & SER_RS485_ENABLED) {
dev_dbg(port->dev, "Setting UART to RS485\n");
if (rs485conf->flags & SER_RS485_RX_DURING_TX)
@@ -306,6 +303,7 @@ static int atmel_config_rs485(struct uart_port *port, struct ktermios *termios,
atmel_uart_writel(port, ATMEL_US_TTGR,
rs485conf->delay_rts_after_send);
+ mode &= ~ATMEL_US_USMODE;
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index f6c33cd228c8..b20f6f2fa51c 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1394,9 +1394,9 @@ static int lpuart_config_rs485(struct uart_port *port, struct ktermios *termios,
* Note: UART is assumed to be active high.
*/
if (rs485->flags & SER_RS485_RTS_ON_SEND)
- modem &= ~UARTMODEM_TXRTSPOL;
- else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
modem |= UARTMODEM_TXRTSPOL;
+ else if (rs485->flags & SER_RS485_RTS_AFTER_SEND)
+ modem &= ~UARTMODEM_TXRTSPOL;
}
writeb(modem, sport->port.membase + UARTMODEM);
@@ -2191,6 +2191,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
uart_update_timeout(port, termios->c_cflag, baud);
/* wait transmit engin complete */
+ lpuart32_write(&sport->port, 0, UARTMODIR);
lpuart32_wait_bit_set(&sport->port, UARTSTAT, UARTSTAT_TC);
/* disable transmit and receive */
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 9fdecc795b6b..5e287dedce01 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -470,7 +470,6 @@ static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
while (head) {
struct tty_buffer *next;
- unsigned char *p, *f = NULL;
unsigned int count;
/*
@@ -489,11 +488,16 @@ static void lookahead_bufs(struct tty_port *port, struct tty_buffer *head)
continue;
}
- p = char_buf_ptr(head, head->lookahead);
- if (~head->flags & TTYB_NORMAL)
- f = flag_buf_ptr(head, head->lookahead);
+ if (port->client_ops->lookahead_buf) {
+ unsigned char *p, *f = NULL;
+
+ p = char_buf_ptr(head, head->lookahead);
+ if (~head->flags & TTYB_NORMAL)
+ f = flag_buf_ptr(head, head->lookahead);
+
+ port->client_ops->lookahead_buf(port, p, f, count);
+ }
- port->client_ops->lookahead_buf(port, p, f, count);
head->lookahead += count;
}
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index ae9c926acd6f..0b669c82ddc9 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -4662,9 +4662,11 @@ static int con_font_set(struct vc_data *vc, struct console_font_op *op)
console_lock();
if (vc->vc_mode != KD_TEXT)
rc = -EINVAL;
- else if (vc->vc_sw->con_font_set)
+ else if (vc->vc_sw->con_font_set) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_set(vc, &font, op->flags);
- else
+ } else
rc = -ENOSYS;
console_unlock();
kfree(font.data);
@@ -4691,9 +4693,11 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op)
console_unlock();
return -EINVAL;
}
- if (vc->vc_sw->con_font_default)
+ if (vc->vc_sw->con_font_default) {
+ if (vc_is_sel(vc))
+ clear_selection();
rc = vc->vc_sw->con_font_default(vc, &font, s);
- else
+ } else
rc = -ENOSYS;
console_unlock();
if (!rc) {
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index d21b69997e75..5adcb349718c 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -1530,7 +1530,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
TRB_LEN(le32_to_cpu(trb->length));
if (priv_req->num_of_trb > 1 &&
- le32_to_cpu(trb->control) & TRB_SMM)
+ le32_to_cpu(trb->control) & TRB_SMM &&
+ le32_to_cpu(trb->control) & TRB_CHAIN)
transfer_end = true;
cdns3_ep_inc_deq(priv_ep);
@@ -1690,6 +1691,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep)
ep_cfg &= ~EP_CFG_ENABLE;
writel(ep_cfg, &priv_dev->regs->ep_cfg);
priv_ep->flags &= ~EP_QUIRK_ISO_OUT_EN;
+ priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
}
cdns3_transfer_completed(priv_dev, priv_ep);
} else if (!(priv_ep->flags & EP_STALLED) &&
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 483bcb1213f7..cc637c4599e1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1810,6 +1810,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
},
+ { USB_DEVICE(0x0c26, 0x0020), /* Icom ICF3400 Serie */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 2633acde7ac1..d4b1e70d1498 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -6038,6 +6038,11 @@ re_enumerate:
* the reset is over (using their post_reset method).
*
* Return: The same as for usb_reset_and_verify_device().
+ * However, if a reset is already in progress (for instance, if a
+ * driver doesn't have pre_ or post_reset() callbacks, and while
+ * being unbound or re-bound during the ongoing reset its disconnect()
+ * or probe() routine tries to perform a second, nested reset), the
+ * routine returns -EINPROGRESS.
*
* Note:
* The caller must own the device lock. For example, it's safe to use
@@ -6071,6 +6076,10 @@ int usb_reset_device(struct usb_device *udev)
return -EISDIR;
}
+ if (udev->reset_in_progress)
+ return -EINPROGRESS;
+ udev->reset_in_progress = 1;
+
port_dev = hub->ports[udev->portnum - 1];
/*
@@ -6135,6 +6144,7 @@ int usb_reset_device(struct usb_device *udev)
usb_autosuspend_device(udev);
memalloc_noio_restore(noio_flag);
+ udev->reset_in_progress = 0;
return ret;
}
EXPORT_SYMBOL_GPL(usb_reset_device);
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index c8ba87df7abe..fd0ccf6f3ec5 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -154,9 +154,9 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
} else if (hsotg->plat && hsotg->plat->phy_init) {
ret = hsotg->plat->phy_init(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_power_on(hsotg->phy);
+ ret = phy_init(hsotg->phy);
if (ret == 0)
- ret = phy_init(hsotg->phy);
+ ret = phy_power_on(hsotg->phy);
}
return ret;
@@ -188,9 +188,9 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
} else if (hsotg->plat && hsotg->plat->phy_exit) {
ret = hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type);
} else {
- ret = phy_exit(hsotg->phy);
+ ret = phy_power_off(hsotg->phy);
if (ret == 0)
- ret = phy_power_off(hsotg->phy);
+ ret = phy_exit(hsotg->phy);
}
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c5c238ab3083..8c8e32651473 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -833,15 +833,16 @@ static void dwc3_core_exit(struct dwc3 *dwc)
{
dwc3_event_buffers_cleanup(dwc);
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
+ phy_power_off(dwc->usb2_generic_phy);
+ phy_power_off(dwc->usb3_generic_phy);
+
usb_phy_shutdown(dwc->usb2_phy);
usb_phy_shutdown(dwc->usb3_phy);
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
- usb_phy_set_suspend(dwc->usb2_phy, 1);
- usb_phy_set_suspend(dwc->usb3_phy, 1);
- phy_power_off(dwc->usb2_generic_phy);
- phy_power_off(dwc->usb3_generic_phy);
dwc3_clk_disable(dwc);
reset_control_assert(dwc->reset);
}
@@ -1821,7 +1822,6 @@ static int dwc3_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dwc);
dwc3_cache_hwparams(dwc);
- device_init_wakeup(&pdev->dev, of_property_read_bool(dev->of_node, "wakeup-source"));
spin_lock_init(&dwc->lock);
mutex_init(&dwc->mutex);
@@ -1879,16 +1879,16 @@ err5:
dwc3_debugfs_exit(dwc);
dwc3_event_buffers_cleanup(dwc);
- usb_phy_shutdown(dwc->usb2_phy);
- usb_phy_shutdown(dwc->usb3_phy);
- phy_exit(dwc->usb2_generic_phy);
- phy_exit(dwc->usb3_generic_phy);
-
usb_phy_set_suspend(dwc->usb2_phy, 1);
usb_phy_set_suspend(dwc->usb3_phy, 1);
phy_power_off(dwc->usb2_generic_phy);
phy_power_off(dwc->usb3_generic_phy);
+ usb_phy_shutdown(dwc->usb2_phy);
+ usb_phy_shutdown(dwc->usb3_phy);
+ phy_exit(dwc->usb2_generic_phy);
+ phy_exit(dwc->usb3_generic_phy);
+
dwc3_ulpi_exit(dwc);
err4:
@@ -1983,7 +1983,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
dwc3_core_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
- if (!PMSG_IS_AUTO(msg) && !device_can_wakeup(dwc->dev)) {
+ if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
dwc3_core_exit(dwc);
break;
}
@@ -2044,7 +2044,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
spin_unlock_irqrestore(&dwc->lock, flags);
break;
case DWC3_GCTL_PRTCAP_HOST:
- if (!PMSG_IS_AUTO(msg) && !device_can_wakeup(dwc->dev)) {
+ if (!PMSG_IS_AUTO(msg) && !device_may_wakeup(dwc->dev)) {
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 6b018048fe2e..4ee4ca09873a 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -44,6 +44,7 @@
#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
#define PCI_DEVICE_ID_INTEL_ADLM 0x54ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+#define PCI_DEVICE_ID_INTEL_RPL 0x460e
#define PCI_DEVICE_ID_INTEL_RPLS 0x7a61
#define PCI_DEVICE_ID_INTEL_MTLP 0x7ec1
#define PCI_DEVICE_ID_INTEL_MTL 0x7e7e
@@ -456,6 +457,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPLS),
(kernel_ulong_t) &dwc3_pci_intel_swnode, },
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index c5e482f53e9d..d3f3937d7005 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/pm_domain.h>
#include <linux/usb/of.h>
#include <linux/reset.h>
#include <linux/iopoll.h>
@@ -299,11 +298,24 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom)
icc_put(qcom->icc_path_apps);
}
+/* Only usable in contexts where the role can not change. */
+static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom)
+{
+ struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
+
+ return dwc->xhci;
+}
+
static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
{
struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3);
- struct usb_hcd *hcd = platform_get_drvdata(dwc->xhci);
struct usb_device *udev;
+ struct usb_hcd __maybe_unused *hcd;
+
+ /*
+ * FIXME: Fix this layering violation.
+ */
+ hcd = platform_get_drvdata(dwc->xhci);
/*
* It is possible to query the speed of all children of
@@ -311,8 +323,11 @@ static enum usb_device_speed dwc3_qcom_read_usb2_speed(struct dwc3_qcom *qcom)
* currently supports only 1 port per controller. So
* this is sufficient.
*/
+#ifdef CONFIG_USB
udev = usb_hub_find_child(hcd->self.root_hub, 1);
-
+#else
+ udev = NULL;
+#endif
if (!udev)
return USB_SPEED_UNKNOWN;
@@ -387,7 +402,7 @@ static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom)
dwc3_qcom_enable_wakeup_irq(qcom->ss_phy_irq, 0);
}
-static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
+static int dwc3_qcom_suspend(struct dwc3_qcom *qcom, bool wakeup)
{
u32 val;
int i, ret;
@@ -406,7 +421,11 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
if (ret)
dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret);
- if (device_may_wakeup(qcom->dev)) {
+ /*
+ * The role is stable during suspend as role switching is done from a
+ * freezable workqueue.
+ */
+ if (dwc3_qcom_is_host(qcom) && wakeup) {
qcom->usb2_speed = dwc3_qcom_read_usb2_speed(qcom);
dwc3_qcom_enable_interrupts(qcom);
}
@@ -416,7 +435,7 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom)
return 0;
}
-static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
+static int dwc3_qcom_resume(struct dwc3_qcom *qcom, bool wakeup)
{
int ret;
int i;
@@ -424,7 +443,7 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom)
if (!qcom->is_suspended)
return 0;
- if (device_may_wakeup(qcom->dev))
+ if (dwc3_qcom_is_host(qcom) && wakeup)
dwc3_qcom_disable_interrupts(qcom);
for (i = 0; i < qcom->num_clocks; i++) {
@@ -458,7 +477,11 @@ static irqreturn_t qcom_dwc3_resume_irq(int irq, void *data)
if (qcom->pm_suspended)
return IRQ_HANDLED;
- if (dwc->xhci)
+ /*
+ * This is safe as role switching is done from a freezable workqueue
+ * and the wakeup interrupts are disabled as part of resume.
+ */
+ if (dwc3_qcom_is_host(qcom))
pm_runtime_resume(&dwc->xhci->dev);
return IRQ_HANDLED;
@@ -757,13 +780,13 @@ dwc3_qcom_create_urs_usb_platdev(struct device *dev)
static int dwc3_qcom_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct device *dev = &pdev->dev;
- struct dwc3_qcom *qcom;
- struct resource *res, *parent_res = NULL;
- int ret, i;
- bool ignore_pipe_clk;
- struct generic_pm_domain *genpd;
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct dwc3_qcom *qcom;
+ struct resource *res, *parent_res = NULL;
+ int ret, i;
+ bool ignore_pipe_clk;
+ bool wakeup_source;
qcom = devm_kzalloc(&pdev->dev, sizeof(*qcom), GFP_KERNEL);
if (!qcom)
@@ -772,8 +795,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
- genpd = pd_to_genpd(qcom->dev->pm_domain);
-
if (has_acpi_companion(dev)) {
qcom->acpi_pdata = acpi_device_get_match_data(dev);
if (!qcom->acpi_pdata) {
@@ -881,16 +902,9 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ret)
goto interconnect_exit;
- if (device_can_wakeup(&qcom->dwc3->dev)) {
- /*
- * Setting GENPD_FLAG_ALWAYS_ON flag takes care of keeping
- * genpd on in both runtime suspend and system suspend cases.
- */
- genpd->flags |= GENPD_FLAG_ALWAYS_ON;
- device_init_wakeup(&pdev->dev, true);
- } else {
- genpd->flags |= GENPD_FLAG_RPM_ALWAYS_ON;
- }
+ wakeup_source = of_property_read_bool(dev->of_node, "wakeup-source");
+ device_init_wakeup(&pdev->dev, wakeup_source);
+ device_init_wakeup(&qcom->dwc3->dev, wakeup_source);
qcom->is_suspended = false;
pm_runtime_set_active(dev);
@@ -944,39 +958,45 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- int ret = 0;
+ bool wakeup = device_may_wakeup(dev);
+ int ret;
- ret = dwc3_qcom_suspend(qcom);
- if (!ret)
- qcom->pm_suspended = true;
+ ret = dwc3_qcom_suspend(qcom, wakeup);
+ if (ret)
+ return ret;
- return ret;
+ qcom->pm_suspended = true;
+
+ return 0;
}
static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
+ bool wakeup = device_may_wakeup(dev);
int ret;
- ret = dwc3_qcom_resume(qcom);
- if (!ret)
- qcom->pm_suspended = false;
+ ret = dwc3_qcom_resume(qcom, wakeup);
+ if (ret)
+ return ret;
- return ret;
+ qcom->pm_suspended = false;
+
+ return 0;
}
static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- return dwc3_qcom_suspend(qcom);
+ return dwc3_qcom_suspend(qcom, true);
}
static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
- return dwc3_qcom_resume(qcom);
+ return dwc3_qcom_resume(qcom, true);
}
static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index aeeec751c53c..eca945feeec3 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2539,9 +2539,6 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
is_on = !!is_on;
- if (dwc->pullups_connected == is_on)
- return 0;
-
dwc->softconnect = is_on;
/*
@@ -2566,6 +2563,11 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
return 0;
}
+ if (dwc->pullups_connected == is_on) {
+ pm_runtime_put(dwc->dev);
+ return 0;
+ }
+
if (!is_on) {
ret = dwc3_gadget_soft_disconnect(dwc);
} else {
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f56c30cf151e..a7154fe8206d 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -11,8 +11,13 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include "../host/xhci-plat.h"
#include "core.h"
+static const struct xhci_plat_priv dwc3_xhci_plat_priv = {
+ .quirks = XHCI_SKIP_PHY_INIT,
+};
+
static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
int irq, char *name)
{
@@ -92,6 +97,11 @@ int dwc3_host_init(struct dwc3 *dwc)
goto err;
}
+ ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv,
+ sizeof(dwc3_xhci_plat_priv));
+ if (ret)
+ goto err;
+
memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props));
if (dwc->usb3_lpm_capable)
@@ -135,4 +145,5 @@ err:
void dwc3_host_exit(struct dwc3 *dwc)
{
platform_device_unregister(dwc->xhci);
+ dwc->xhci = NULL;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 1905a8d8e0c9..08726e4c68a5 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -291,6 +291,12 @@ static struct usb_endpoint_descriptor ss_ep_int_desc = {
.bInterval = 4,
};
+static struct usb_ss_ep_comp_descriptor ss_ep_int_desc_comp = {
+ .bLength = sizeof(ss_ep_int_desc_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .wBytesPerInterval = cpu_to_le16(6),
+};
+
/* Audio Streaming OUT Interface - Alt0 */
static struct usb_interface_descriptor std_as_out_if0_desc = {
.bLength = sizeof std_as_out_if0_desc,
@@ -604,7 +610,8 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
(struct usb_descriptor_header *)&in_feature_unit_desc,
(struct usb_descriptor_header *)&io_out_ot_desc,
- (struct usb_descriptor_header *)&ss_ep_int_desc,
+ (struct usb_descriptor_header *)&ss_ep_int_desc,
+ (struct usb_descriptor_header *)&ss_ep_int_desc_comp,
(struct usb_descriptor_header *)&std_as_out_if0_desc,
(struct usb_descriptor_header *)&std_as_out_if1_desc,
@@ -800,6 +807,7 @@ static void setup_headers(struct f_uac2_opts *opts,
struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
+ struct usb_ss_ep_comp_descriptor *ep_int_desc_comp = NULL;
struct usb_endpoint_descriptor *epout_desc;
struct usb_endpoint_descriptor *epin_desc;
struct usb_endpoint_descriptor *epin_fback_desc;
@@ -827,6 +835,7 @@ static void setup_headers(struct f_uac2_opts *opts,
epin_fback_desc = &ss_epin_fback_desc;
epin_fback_desc_comp = &ss_epin_fback_desc_comp;
ep_int_desc = &ss_ep_int_desc;
+ ep_int_desc_comp = &ss_ep_int_desc_comp;
}
i = 0;
@@ -855,8 +864,11 @@ static void setup_headers(struct f_uac2_opts *opts,
if (EPOUT_EN(opts))
headers[i++] = USBDHDR(&io_out_ot_desc);
- if (FUOUT_EN(opts) || FUIN_EN(opts))
+ if (FUOUT_EN(opts) || FUIN_EN(opts)) {
headers[i++] = USBDHDR(ep_int_desc);
+ if (ep_int_desc_comp)
+ headers[i++] = USBDHDR(ep_int_desc_comp);
+ }
if (EPOUT_EN(opts)) {
headers[i++] = USBDHDR(&std_as_out_if0_desc);
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c
index 03035dbbe97b..208c6a92780a 100644
--- a/drivers/usb/gadget/function/storage_common.c
+++ b/drivers/usb/gadget/function/storage_common.c
@@ -294,8 +294,10 @@ EXPORT_SYMBOL_GPL(fsg_lun_fsync_sub);
void store_cdrom_address(u8 *dest, int msf, u32 addr)
{
if (msf) {
- /* Convert to Minutes-Seconds-Frames */
- addr >>= 2; /* Convert to 2048-byte frames */
+ /*
+ * Convert to Minutes-Seconds-Frames.
+ * Sector size is already set to 2048 bytes.
+ */
addr += 2*75; /* Lead-in occupies 2 seconds */
dest[3] = addr % 75; /* Frames */
addr /= 75;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index cafcf260394c..c63c0c2cf649 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -736,7 +736,10 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
ret = gadget->ops->pullup(gadget, 0);
if (!ret) {
gadget->connected = 0;
- gadget->udc->driver->disconnect(gadget);
+ mutex_lock(&udc_lock);
+ if (gadget->udc->driver)
+ gadget->udc->driver->disconnect(gadget);
+ mutex_unlock(&udc_lock);
}
out:
@@ -1489,7 +1492,6 @@ static int gadget_bind_driver(struct device *dev)
usb_gadget_udc_set_speed(udc, driver->max_speed);
- mutex_lock(&udc_lock);
ret = driver->bind(udc->gadget, driver);
if (ret)
goto err_bind;
@@ -1499,7 +1501,6 @@ static int gadget_bind_driver(struct device *dev)
goto err_start;
usb_gadget_enable_async_callbacks(udc);
usb_udc_connect_control(udc);
- mutex_unlock(&udc_lock);
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
return 0;
@@ -1512,6 +1513,7 @@ static int gadget_bind_driver(struct device *dev)
dev_err(&udc->dev, "failed to start %s: %d\n",
driver->function, ret);
+ mutex_lock(&udc_lock);
udc->driver = NULL;
driver->is_bound = false;
mutex_unlock(&udc_lock);
@@ -1529,7 +1531,6 @@ static void gadget_unbind_driver(struct device *dev)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
- mutex_lock(&udc_lock);
usb_gadget_disconnect(gadget);
usb_gadget_disable_async_callbacks(udc);
if (gadget->irq)
@@ -1537,6 +1538,7 @@ static void gadget_unbind_driver(struct device *dev)
udc->driver->unbind(gadget);
usb_gadget_udc_stop(udc);
+ mutex_lock(&udc_lock);
driver->is_bound = false;
udc->driver = NULL;
mutex_unlock(&udc_lock);
@@ -1612,7 +1614,7 @@ static ssize_t soft_connect_store(struct device *dev,
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
ssize_t ret;
- mutex_lock(&udc_lock);
+ device_lock(&udc->gadget->dev);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
ret = -EOPNOTSUPP;
@@ -1633,7 +1635,7 @@ static ssize_t soft_connect_store(struct device *dev,
ret = n;
out:
- mutex_unlock(&udc_lock);
+ device_unlock(&udc->gadget->dev);
return ret;
}
static DEVICE_ATTR_WO(soft_connect);
@@ -1652,11 +1654,15 @@ static ssize_t function_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
- struct usb_gadget_driver *drv = udc->driver;
+ struct usb_gadget_driver *drv;
+ int rc = 0;
- if (!drv || !drv->function)
- return 0;
- return scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
+ mutex_lock(&udc_lock);
+ drv = udc->driver;
+ if (drv && drv->function)
+ rc = scnprintf(buf, PAGE_SIZE, "%s\n", drv->function);
+ mutex_unlock(&udc_lock);
+ return rc;
}
static DEVICE_ATTR_RO(function);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0fdc014c9401..4619d5e89d5b 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd)
* It will release and re-aquire the lock while calling ACPI
* method.
*/
-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
+static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd,
u16 index, bool on, unsigned long *flags)
__must_hold(&xhci->lock)
{
@@ -1648,6 +1648,17 @@ int xhci_hub_status_data(struct usb_hcd *hcd, char *buf)
status = bus_state->resuming_ports;
+ /*
+ * SS devices are only visible to roothub after link training completes.
+ * Keep polling roothubs for a grace period after xHC start
+ */
+ if (xhci->run_graceperiod) {
+ if (time_before(jiffies, xhci->run_graceperiod))
+ status = 1;
+ else
+ xhci->run_graceperiod = 0;
+ }
+
mask = PORT_CSC | PORT_PEC | PORT_OCC | PORT_PLC | PORT_WRC | PORT_CEC;
/* For each port, did anything change? If so, set that bit in buf. */
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 06a6b19acaae..579899eb24c1 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -425,7 +425,6 @@ static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
{
- u32 extra_cs_count;
u32 start_ss, last_ss;
u32 start_cs, last_cs;
@@ -461,18 +460,12 @@ static int check_sch_tt(struct mu3h_sch_ep_info *sch_ep, u32 offset)
if (last_cs > 7)
return -ESCH_CS_OVERFLOW;
- if (sch_ep->ep_type == ISOC_IN_EP)
- extra_cs_count = (last_cs == 7) ? 1 : 2;
- else /* ep_type : INTR IN / INTR OUT */
- extra_cs_count = 1;
-
- cs_count += extra_cs_count;
if (cs_count > 7)
cs_count = 7; /* HW limit */
sch_ep->cs_count = cs_count;
- /* one for ss, the other for idle */
- sch_ep->num_budget_microframes = cs_count + 2;
+ /* ss, idle are ignored */
+ sch_ep->num_budget_microframes = cs_count;
/*
* if interval=1, maxp >752, num_budge_micoframe is larger
@@ -771,8 +764,8 @@ int xhci_mtk_drop_ep(struct usb_hcd *hcd, struct usb_device *udev,
if (ret)
return ret;
- if (ep->hcpriv)
- drop_ep_quirk(hcd, udev, ep);
+ /* needn't check @ep->hcpriv, xhci_endpoint_disable set it NULL */
+ drop_ep_quirk(hcd, udev, ep);
return 0;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 044855818cb1..a8641b6536ee 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -398,12 +398,17 @@ static int xhci_plat_remove(struct platform_device *dev)
pm_runtime_get_sync(&dev->dev);
xhci->xhc_state |= XHCI_STATE_REMOVING;
- usb_remove_hcd(shared_hcd);
- xhci->shared_hcd = NULL;
+ if (shared_hcd) {
+ usb_remove_hcd(shared_hcd);
+ xhci->shared_hcd = NULL;
+ }
+
usb_phy_shutdown(hcd->usb_phy);
usb_remove_hcd(hcd);
- usb_put_hcd(shared_hcd);
+
+ if (shared_hcd)
+ usb_put_hcd(shared_hcd);
clk_disable_unprepare(clk);
clk_disable_unprepare(reg_clk);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 65858f607437..38649284ff88 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -151,9 +151,11 @@ int xhci_start(struct xhci_hcd *xhci)
xhci_err(xhci, "Host took too long to start, "
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
- if (!ret)
+ if (!ret) {
/* clear state flags. Including dying, halted or removing */
xhci->xhc_state = 0;
+ xhci->run_graceperiod = jiffies + msecs_to_jiffies(500);
+ }
return ret;
}
@@ -791,8 +793,6 @@ static void xhci_stop(struct usb_hcd *hcd)
void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- unsigned long flags;
- int i;
if (xhci->quirks & XHCI_SPURIOUS_REBOOT)
usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev));
@@ -808,21 +808,12 @@ void xhci_shutdown(struct usb_hcd *hcd)
del_timer_sync(&xhci->shared_hcd->rh_timer);
}
- spin_lock_irqsave(&xhci->lock, flags);
+ spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
-
- /* Power off USB2 ports*/
- for (i = 0; i < xhci->usb2_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags);
-
- /* Power off USB3 ports*/
- for (i = 0; i < xhci->usb3_rhub.num_ports; i++)
- xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags);
-
/* Workaround for spurious wakeups at shutdown with HSW */
if (xhci->quirks & XHCI_SPURIOUS_WAKEUP)
xhci_reset(xhci, XHCI_RESET_SHORT_USEC);
- spin_unlock_irqrestore(&xhci->lock, flags);
+ spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 1960b47acfb2..7caa0db5e826 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1826,7 +1826,7 @@ struct xhci_hcd {
/* Host controller watchdog timer structures */
unsigned int xhc_state;
-
+ unsigned long run_graceperiod;
u32 command;
struct s3_save s3;
/* Host controller is dying - not responding to commands. "I'm not dead yet!"
@@ -2196,8 +2196,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd);
-void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index,
- bool on, unsigned long *flags);
void xhci_hc_died(struct xhci_hcd *xhci);
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index d1df153e7f5a..d63c63942af1 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -71,10 +71,7 @@ static int onboard_hub_power_off(struct onboard_hub *hub)
{
int err;
- if (hub->reset_gpio) {
- gpiod_set_value_cansleep(hub->reset_gpio, 1);
- fsleep(hub->pdata->reset_us);
- }
+ gpiod_set_value_cansleep(hub->reset_gpio, 1);
err = regulator_disable(hub->vdd);
if (err) {
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index f906dfd360d3..6c8f7763e75e 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -86,7 +86,7 @@ config USB_MUSB_TUSB6010
tristate "TUSB6010"
depends on HAS_IOMEM
depends on ARCH_OMAP2PLUS || COMPILE_TEST
- depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
+ depends on NOP_USB_XCEIV!=m || USB_MUSB_HDRC=m
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2798fca71261..af01a462cc43 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -97,7 +97,10 @@ struct ch341_private {
u8 mcr;
u8 msr;
u8 lcr;
+
unsigned long quirks;
+ u8 version;
+
unsigned long break_end;
};
@@ -250,8 +253,12 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
/*
* CH341A buffers data until a full endpoint-size packet (32 bytes)
* has been received unless bit 7 is set.
+ *
+ * At least one device with version 0x27 appears to have this bit
+ * inverted.
*/
- val |= BIT(7);
+ if (priv->version > 0x27)
+ val |= BIT(7);
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_DIVISOR << 8 | CH341_REG_PRESCALER,
@@ -265,6 +272,9 @@ static int ch341_set_baudrate_lcr(struct usb_device *dev,
* (stop bits, parity and word length). Version 0x30 and above use
* CH341_REG_LCR only and CH341_REG_LCR2 is always set to zero.
*/
+ if (priv->version < 0x30)
+ return 0;
+
r = ch341_control_out(dev, CH341_REQ_WRITE_REG,
CH341_REG_LCR2 << 8 | CH341_REG_LCR, lcr);
if (r)
@@ -308,7 +318,9 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
if (r)
return r;
- dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
+
+ priv->version = buffer[0];
+ dev_dbg(&dev->dev, "Chip version: 0x%02x\n", priv->version);
r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c374620a486f..a34957c4b64c 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -130,6 +130,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x83AA) }, /* Mark-10 Digital Force Gauge */
{ USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */
{ USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */
+ { USB_DEVICE(0x10C4, 0x8414) }, /* Decagon USB Cable Adapter */
{ USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
{ USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index d5a3986dfee7..52d59be92034 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1045,6 +1045,8 @@ static const struct usb_device_id id_table_combined[] = {
/* IDS GmbH devices */
{ USB_DEVICE(IDS_VID, IDS_SI31A_PID) },
{ USB_DEVICE(IDS_VID, IDS_CM31A_PID) },
+ /* Omron devices */
+ { USB_DEVICE(OMRON_VID, OMRON_CS1W_CIF31_PID) },
/* U-Blox devices */
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ZED_PID) },
{ USB_DEVICE(UBLOX_VID, UBLOX_C099F9P_ODIN_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 4e92c165c86b..31c8ccabbbb7 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -662,6 +662,12 @@
#define INFINEON_TRIBOARD_TC2X7_PID 0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
/*
+ * Omron corporation (https://www.omron.com)
+ */
+ #define OMRON_VID 0x0590
+ #define OMRON_CS1W_CIF31_PID 0x00b2
+
+/*
* Acton Research Corp.
*/
#define ACTON_VID 0x0647 /* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index de59fa919540..a5e8374a8d71 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -253,6 +253,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM05G 0x030a
+#define QUECTEL_PRODUCT_EM060K 0x030b
#define QUECTEL_PRODUCT_EM12 0x0512
#define QUECTEL_PRODUCT_RM500Q 0x0800
#define QUECTEL_PRODUCT_EC200S_CN 0x6002
@@ -438,6 +439,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9
#define CINTERION_PRODUCT_MV32_WA 0x00f1
#define CINTERION_PRODUCT_MV32_WB 0x00f2
+#define CINTERION_PRODUCT_MV32_WA_RMNET 0x00f3
+#define CINTERION_PRODUCT_MV32_WB_RMNET 0x00f4
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -573,6 +576,10 @@ static void option_instat_callback(struct urb *urb);
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
+/* OPPO products */
+#define OPPO_VENDOR_ID 0x22d9
+#define OPPO_PRODUCT_R11 0x276c
+
/* Device flags */
@@ -1138,6 +1145,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
{ USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
.driver_info = RSVD(6) | ZLP },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
@@ -1993,8 +2003,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0)},
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff),
.driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff),
.driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB_RMNET, 0xff),
+ .driver_info = RSVD(0) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -2155,6 +2169,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
+ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1a05e3dcfec8..4993227ab293 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2294,6 +2294,13 @@ UNUSUAL_DEV( 0x1e74, 0x4621, 0x0000, 0x0000,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BULK_IGNORE_TAG | US_FL_MAX_SECTORS_64 ),
+/* Reported by Witold Lipieta <witold.lipieta@thaumatec.com> */
+UNUSUAL_DEV( 0x1fc9, 0x0117, 0x0100, 0x0100,
+ "NXP Semiconductors",
+ "PN7462AU",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Supplied with some Castlewood ORB removable drives */
UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
"Double-H Technology",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 4051c8cd0cd8..23ab3b048d9b 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -62,6 +62,13 @@ UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+/* Reported-by: Tom Hu <huxiaoying@kylinos.cn> */
+UNUSUAL_DEV(0x0b05, 0x1932, 0x0000, 0x9999,
+ "ASUS",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* Reported-by: David Webb <djw@noc.ac.uk> */
UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
"Seagate",
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index c1d8c23baa39..de66a2949e33 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -99,8 +99,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
case DP_STATUS_CON_UFP_D:
case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
conf |= DP_CONF_UFP_U_AS_UFP_D;
- pin_assign = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo) &
- DP_CAP_UFP_D_PIN_ASSIGN(dp->port->vdo);
+ pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
+ DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
break;
default:
break;
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebc29ec20e3f..bd5e5dd70431 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -2346,6 +2346,7 @@ static void __exit typec_exit(void)
ida_destroy(&typec_index_ida);
bus_unregister(&typec_bus);
class_unregister(&typec_mux_class);
+ class_unregister(&retimer_class);
}
module_exit(typec_exit);
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 47b733f78fb0..a8e273fe204a 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -571,9 +571,11 @@ err_unregister_switch:
static int is_memory(struct acpi_resource *res, void *data)
{
- struct resource r;
+ struct resource_win win = {};
+ struct resource *r = &win.res;
- return !acpi_dev_resource_memory(res, &r);
+ return !(acpi_dev_resource_memory(res, r) ||
+ acpi_dev_resource_address_space(res, &win));
}
/* IOM ACPI IDs and IOM_PORT_STATUS_OFFSET */
@@ -583,6 +585,9 @@ static const struct acpi_device_id iom_acpi_ids[] = {
/* AlderLake */
{ "INTC1079", 0x160, },
+
+ /* Meteor Lake */
+ { "INTC107A", 0x160, },
{}
};
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index ea5a917c51b1..904c7b4ce2f0 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -6320,6 +6320,13 @@ static int tcpm_psy_set_prop(struct power_supply *psy,
struct tcpm_port *port = power_supply_get_drvdata(psy);
int ret;
+ /*
+ * All the properties below are related to USB PD. The check needs to be
+ * property specific when a non-pd related property is added.
+ */
+ if (!port->pd_supported)
+ return -EOPNOTSUPP;
+
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
ret = tcpm_psy_set_online(port, val);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 1aea46493b85..7f2624f42724 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1200,32 +1200,6 @@ out_unlock:
return ret;
}
-static void ucsi_unregister_connectors(struct ucsi *ucsi)
-{
- struct ucsi_connector *con;
- int i;
-
- if (!ucsi->connector)
- return;
-
- for (i = 0; i < ucsi->cap.num_connectors; i++) {
- con = &ucsi->connector[i];
-
- if (!con->wq)
- break;
-
- cancel_work_sync(&con->work);
- ucsi_unregister_partner(con);
- ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(con);
- destroy_workqueue(con->wq);
- typec_unregister_port(con->port);
- }
-
- kfree(ucsi->connector);
- ucsi->connector = NULL;
-}
-
/**
* ucsi_init - Initialize UCSI interface
* @ucsi: UCSI to be initialized
@@ -1234,6 +1208,7 @@ static void ucsi_unregister_connectors(struct ucsi *ucsi)
*/
static int ucsi_init(struct ucsi *ucsi)
{
+ struct ucsi_connector *con;
u64 command;
int ret;
int i;
@@ -1264,7 +1239,7 @@ static int ucsi_init(struct ucsi *ucsi)
}
/* Allocate the connectors. Released in ucsi_unregister() */
- ucsi->connector = kcalloc(ucsi->cap.num_connectors,
+ ucsi->connector = kcalloc(ucsi->cap.num_connectors + 1,
sizeof(*ucsi->connector), GFP_KERNEL);
if (!ucsi->connector) {
ret = -ENOMEM;
@@ -1288,7 +1263,15 @@ static int ucsi_init(struct ucsi *ucsi)
return 0;
err_unregister:
- ucsi_unregister_connectors(ucsi);
+ for (con = ucsi->connector; con->port; con++) {
+ ucsi_unregister_partner(con);
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(con);
+ if (con->wq)
+ destroy_workqueue(con->wq);
+ typec_unregister_port(con->port);
+ con->port = NULL;
+ }
err_reset:
memset(&ucsi->cap, 0, sizeof(ucsi->cap));
@@ -1402,6 +1385,7 @@ EXPORT_SYMBOL_GPL(ucsi_register);
void ucsi_unregister(struct ucsi *ucsi)
{
u64 cmd = UCSI_SET_NOTIFICATION_ENABLE;
+ int i;
/* Make sure that we are not in the middle of driver initialization */
cancel_delayed_work_sync(&ucsi->work);
@@ -1409,7 +1393,18 @@ void ucsi_unregister(struct ucsi *ucsi)
/* Disable notifications */
ucsi->ops->async_write(ucsi, UCSI_CONTROL, &cmd, sizeof(cmd));
- ucsi_unregister_connectors(ucsi);
+ for (i = 0; i < ucsi->cap.num_connectors; i++) {
+ cancel_work_sync(&ucsi->connector[i].work);
+ ucsi_unregister_partner(&ucsi->connector[i]);
+ ucsi_unregister_altmodes(&ucsi->connector[i],
+ UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
+ if (ucsi->connector[i].wq)
+ destroy_workqueue(ucsi->connector[i].wq);
+ typec_unregister_port(ucsi->connector[i].port);
+ }
+
+ kfree(ucsi->connector);
}
EXPORT_SYMBOL_GPL(ucsi_unregister);
diff --git a/drivers/vfio/pci/vfio_pci_zdev.c b/drivers/vfio/pci/vfio_pci_zdev.c
index e163aa9f6144..0cbdcd14f1c8 100644
--- a/drivers/vfio/pci/vfio_pci_zdev.c
+++ b/drivers/vfio/pci/vfio_pci_zdev.c
@@ -151,7 +151,10 @@ int vfio_pci_zdev_open_device(struct vfio_pci_core_device *vdev)
if (!vdev->vdev.kvm)
return 0;
- return kvm_s390_pci_register_kvm(zdev, vdev->vdev.kvm);
+ if (zpci_kvm_hook.kvm_register)
+ return zpci_kvm_hook.kvm_register(zdev, vdev->vdev.kvm);
+
+ return -ENOENT;
}
void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
@@ -161,5 +164,6 @@ void vfio_pci_zdev_close_device(struct vfio_pci_core_device *vdev)
if (!zdev || !vdev->vdev.kvm)
return;
- kvm_s390_pci_unregister_kvm(zdev);
+ if (zpci_kvm_hook.kvm_unregister)
+ zpci_kvm_hook.kvm_unregister(zdev);
}
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index db516c90a977..8706482665d1 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -558,6 +558,18 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr,
ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM,
pages, NULL, NULL);
if (ret > 0) {
+ int i;
+
+ /*
+ * The zero page is always resident, we don't need to pin it
+ * and it falls into our invalid/reserved test so we don't
+ * unpin in put_pfn(). Unpin all zero pages in the batch here.
+ */
+ for (i = 0 ; i < ret; i++) {
+ if (unlikely(is_zero_pfn(page_to_pfn(pages[i]))))
+ unpin_user_page(pages[i]);
+ }
+
*pfn = page_to_pfn(pages[0]);
goto done;
}
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 886c564787f1..b58b445bb529 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -74,10 +74,6 @@
#define SYNTHVID_DEPTH_WIN8 32
#define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
-#define PCI_VENDOR_ID_MICROSOFT 0x1414
-#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
-
-
enum pipe_msg_type {
PIPE_MSG_INVALID,
PIPE_MSG_DATA,
diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig
index ce91add81401..dc4d25c26256 100644
--- a/drivers/virt/nitro_enclaves/Kconfig
+++ b/drivers/virt/nitro_enclaves/Kconfig
@@ -17,7 +17,7 @@ config NITRO_ENCLAVES
config NITRO_ENCLAVES_MISC_DEV_TEST
bool "Tests for the misc device functionality of the Nitro Enclaves" if !KUNIT_ALL_TESTS
- depends on NITRO_ENCLAVES && KUNIT
+ depends on NITRO_ENCLAVES && KUNIT=y
default KUNIT_ALL_TESTS
help
Enable KUnit tests for the misc device functionality of the Nitro
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 738029de3c67..e1ec725c2819 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1047,6 +1047,9 @@ int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
size_t size;
int i, ret;
+ if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
+ return -ENOMEM;
+
size = args->nr_pages << PAGE_SHIFT;
if (args->coherent)
args->vaddr = dma_alloc_coherent(args->dev, size,
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index c4210a3964d8..bbcc5afd1576 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -76,7 +76,7 @@ void afs_lock_op_done(struct afs_call *call)
if (call->error == 0) {
spin_lock(&vnode->lock);
trace_afs_flock_ev(vnode, NULL, afs_flock_timestamp, 0);
- vnode->locked_at = call->reply_time;
+ vnode->locked_at = call->issue_time;
afs_schedule_lock_extension(vnode);
spin_unlock(&vnode->lock);
}
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 4943413d9c5f..7d37f63ef0f0 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -131,7 +131,7 @@ bad:
static time64_t xdr_decode_expiry(struct afs_call *call, u32 expiry)
{
- return ktime_divns(call->reply_time, NSEC_PER_SEC) + expiry;
+ return ktime_divns(call->issue_time, NSEC_PER_SEC) + expiry;
}
static void xdr_decode_AFSCallBack(const __be32 **_bp,
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 64ad55494349..723d162078a3 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -137,7 +137,6 @@ struct afs_call {
bool need_attention; /* T if RxRPC poked us */
bool async; /* T if asynchronous */
bool upgrade; /* T to request service upgrade */
- bool have_reply_time; /* T if have got reply_time */
bool intr; /* T if interruptible */
bool unmarshalling_error; /* T if an unmarshalling error occurred */
u16 service_id; /* Actual service ID (after upgrade) */
@@ -151,7 +150,7 @@ struct afs_call {
} __attribute__((packed));
__be64 tmp64;
};
- ktime_t reply_time; /* Time of first reply packet */
+ ktime_t issue_time; /* Time of issue of operation */
};
struct afs_call_type {
diff --git a/fs/afs/misc.c b/fs/afs/misc.c
index 933e67fcdab1..805328ca5428 100644
--- a/fs/afs/misc.c
+++ b/fs/afs/misc.c
@@ -69,6 +69,7 @@ int afs_abort_to_error(u32 abort_code)
/* Unified AFS error table */
case UAEPERM: return -EPERM;
case UAENOENT: return -ENOENT;
+ case UAEAGAIN: return -EAGAIN;
case UAEACCES: return -EACCES;
case UAEBUSY: return -EBUSY;
case UAEEXIST: return -EEXIST;
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index d5c4785c862d..eccc3cd0cb70 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -351,6 +351,7 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
if (call->max_lifespan)
rxrpc_kernel_set_max_life(call->net->socket, rxcall,
call->max_lifespan);
+ call->issue_time = ktime_get_real();
/* send the request */
iov[0].iov_base = call->request;
@@ -501,12 +502,6 @@ static void afs_deliver_to_call(struct afs_call *call)
return;
}
- if (!call->have_reply_time &&
- rxrpc_kernel_get_reply_time(call->net->socket,
- call->rxcall,
- &call->reply_time))
- call->have_reply_time = true;
-
ret = call->type->deliver(call);
state = READ_ONCE(call->state);
if (ret == 0 && call->unmarshalling_error)
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index fdc7d675b4b0..11571cca86c1 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -232,8 +232,7 @@ static void xdr_decode_YFSCallBack(const __be32 **_bp,
struct afs_callback *cb = &scb->callback;
ktime_t cb_expiry;
- cb_expiry = call->reply_time;
- cb_expiry = ktime_add(cb_expiry, xdr_to_u64(x->expiration_time) * 100);
+ cb_expiry = ktime_add(call->issue_time, xdr_to_u64(x->expiration_time) * 100);
cb->expires_at = ktime_divns(cb_expiry, NSEC_PER_SEC);
scb->have_cb = true;
*_bp += xdr_size(x);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 9ef162dbd4bc..df8c99c99df9 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1088,8 +1088,6 @@ struct btrfs_fs_info {
spinlock_t zone_active_bgs_lock;
struct list_head zone_active_bgs;
- /* Waiters when BTRFS_FS_NEED_ZONE_FINISH is set */
- wait_queue_head_t zone_finish_wait;
/* Updates are not protected by any lock */
struct btrfs_commit_stats commit_stats;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 820b1f1e6b67..1af28b066b42 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3068,7 +3068,6 @@ void btrfs_init_fs_info(struct btrfs_fs_info *fs_info)
init_waitqueue_head(&fs_info->transaction_blocked_wait);
init_waitqueue_head(&fs_info->async_submit_wait);
init_waitqueue_head(&fs_info->delayed_iputs_wait);
- init_waitqueue_head(&fs_info->zone_finish_wait);
/* Usable values until the real ones are cached from the superblock */
fs_info->nodesize = 4096;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ad250892028d..1372210869b1 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1644,10 +1644,9 @@ static noinline int run_delalloc_zoned(struct btrfs_inode *inode,
done_offset = end;
if (done_offset == start) {
- struct btrfs_fs_info *info = inode->root->fs_info;
-
- wait_var_event(&info->zone_finish_wait,
- !test_bit(BTRFS_FS_NEED_ZONE_FINISH, &info->flags));
+ wait_on_bit_io(&inode->root->fs_info->flags,
+ BTRFS_FS_NEED_ZONE_FINISH,
+ TASK_UNINTERRUPTIBLE);
continue;
}
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index d0cbeb7ae81c..435559ba94fa 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -199,7 +199,7 @@ static u64 calc_chunk_size(const struct btrfs_fs_info *fs_info, u64 flags)
ASSERT(flags & BTRFS_BLOCK_GROUP_TYPE_MASK);
if (flags & BTRFS_BLOCK_GROUP_DATA)
- return SZ_1G;
+ return BTRFS_MAX_DATA_CHUNK_SIZE;
else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
return SZ_32M;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 064ab2a79c80..f63ff91e2883 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5267,6 +5267,9 @@ static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
ctl->stripe_size);
}
+ /* Stripe size should not go beyond 1G. */
+ ctl->stripe_size = min_t(u64, ctl->stripe_size, SZ_1G);
+
/* Align to BTRFS_STRIPE_LEN */
ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
ctl->chunk_size = ctl->stripe_size * data_stripes;
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index b150b07ba1a7..62e7007a7e46 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -421,10 +421,19 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
* since btrfs adds the pages one by one to a bio, and btrfs cannot
* increase the metadata reservation even if it increases the number of
* extents, it is safe to stick with the limit.
+ *
+ * With the zoned emulation, we can have non-zoned device on the zoned
+ * mode. In this case, we don't have a valid max zone append size. So,
+ * use max_segments * PAGE_SIZE as the pseudo max_zone_append_size.
*/
- zone_info->max_zone_append_size =
- min_t(u64, (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
- (u64)bdev_max_segments(bdev) << PAGE_SHIFT);
+ if (bdev_is_zoned(bdev)) {
+ zone_info->max_zone_append_size = min_t(u64,
+ (u64)bdev_max_zone_append_sectors(bdev) << SECTOR_SHIFT,
+ (u64)bdev_max_segments(bdev) << PAGE_SHIFT);
+ } else {
+ zone_info->max_zone_append_size =
+ (u64)bdev_max_segments(bdev) << PAGE_SHIFT;
+ }
if (!IS_ALIGNED(nr_sectors, zone_sectors))
zone_info->nr_zones++;
@@ -1178,7 +1187,7 @@ int btrfs_ensure_empty_zones(struct btrfs_device *device, u64 start, u64 size)
* offset.
*/
static int calculate_alloc_pointer(struct btrfs_block_group *cache,
- u64 *offset_ret)
+ u64 *offset_ret, bool new)
{
struct btrfs_fs_info *fs_info = cache->fs_info;
struct btrfs_root *root;
@@ -1188,6 +1197,21 @@ static int calculate_alloc_pointer(struct btrfs_block_group *cache,
int ret;
u64 length;
+ /*
+ * Avoid tree lookups for a new block group, there's no use for it.
+ * It must always be 0.
+ *
+ * Also, we have a lock chain of extent buffer lock -> chunk mutex.
+ * For new a block group, this function is called from
+ * btrfs_make_block_group() which is already taking the chunk mutex.
+ * Thus, we cannot call calculate_alloc_pointer() which takes extent
+ * buffer locks to avoid deadlock.
+ */
+ if (new) {
+ *offset_ret = 0;
+ return 0;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
@@ -1323,6 +1347,13 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
else
num_conventional++;
+ /*
+ * Consider a zone as active if we can allow any number of
+ * active zones.
+ */
+ if (!device->zone_info->max_active_zones)
+ __set_bit(i, active);
+
if (!is_sequential) {
alloc_offsets[i] = WP_CONVENTIONAL;
continue;
@@ -1389,45 +1420,23 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
__set_bit(i, active);
break;
}
-
- /*
- * Consider a zone as active if we can allow any number of
- * active zones.
- */
- if (!device->zone_info->max_active_zones)
- __set_bit(i, active);
}
if (num_sequential > 0)
cache->seq_zone = true;
if (num_conventional > 0) {
- /*
- * Avoid calling calculate_alloc_pointer() for new BG. It
- * is no use for new BG. It must be always 0.
- *
- * Also, we have a lock chain of extent buffer lock ->
- * chunk mutex. For new BG, this function is called from
- * btrfs_make_block_group() which is already taking the
- * chunk mutex. Thus, we cannot call
- * calculate_alloc_pointer() which takes extent buffer
- * locks to avoid deadlock.
- */
-
/* Zone capacity is always zone size in emulation */
cache->zone_capacity = cache->length;
- if (new) {
- cache->alloc_offset = 0;
- goto out;
- }
- ret = calculate_alloc_pointer(cache, &last_alloc);
- if (ret || map->num_stripes == num_conventional) {
- if (!ret)
- cache->alloc_offset = last_alloc;
- else
- btrfs_err(fs_info,
+ ret = calculate_alloc_pointer(cache, &last_alloc, new);
+ if (ret) {
+ btrfs_err(fs_info,
"zoned: failed to determine allocation offset of bg %llu",
- cache->start);
+ cache->start);
+ goto out;
+ } else if (map->num_stripes == num_conventional) {
+ cache->alloc_offset = last_alloc;
+ cache->zone_is_active = 1;
goto out;
}
}
@@ -1495,13 +1504,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
goto out;
}
- if (cache->zone_is_active) {
- btrfs_get_block_group(cache);
- spin_lock(&fs_info->zone_active_bgs_lock);
- list_add_tail(&cache->active_bg_list, &fs_info->zone_active_bgs);
- spin_unlock(&fs_info->zone_active_bgs_lock);
- }
-
out:
if (cache->alloc_offset > fs_info->zone_size) {
btrfs_err(fs_info,
@@ -1526,10 +1528,16 @@ out:
ret = -EIO;
}
- if (!ret)
+ if (!ret) {
cache->meta_write_pointer = cache->alloc_offset + cache->start;
-
- if (ret) {
+ if (cache->zone_is_active) {
+ btrfs_get_block_group(cache);
+ spin_lock(&fs_info->zone_active_bgs_lock);
+ list_add_tail(&cache->active_bg_list,
+ &fs_info->zone_active_bgs);
+ spin_unlock(&fs_info->zone_active_bgs_lock);
+ }
+ } else {
kfree(cache->physical_map);
cache->physical_map = NULL;
}
@@ -2007,8 +2015,7 @@ static int do_zone_finish(struct btrfs_block_group *block_group, bool fully_writ
/* For active_bg_list */
btrfs_put_block_group(block_group);
- clear_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
- wake_up_all(&fs_info->zone_finish_wait);
+ clear_and_wake_up_bit(BTRFS_FS_NEED_ZONE_FINISH, &fs_info->flags);
return 0;
}
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 6cba2c6de2f9..2ad58c465208 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -111,6 +111,7 @@ struct cachefiles_cache {
char *tag; /* cache binding tag */
refcount_t unbind_pincount;/* refcount to do daemon unbind */
struct xarray reqs; /* xarray of pending on-demand requests */
+ unsigned long req_id_next;
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
u32 ondemand_id_next;
};
diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c
index 1fee702d5529..0254ed39f68c 100644
--- a/fs/cachefiles/ondemand.c
+++ b/fs/cachefiles/ondemand.c
@@ -158,9 +158,13 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args)
/* fail OPEN request if daemon reports an error */
if (size < 0) {
- if (!IS_ERR_VALUE(size))
- size = -EINVAL;
- req->error = size;
+ if (!IS_ERR_VALUE(size)) {
+ req->error = -EINVAL;
+ ret = -EINVAL;
+ } else {
+ req->error = size;
+ ret = 0;
+ }
goto out;
}
@@ -238,14 +242,19 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
unsigned long id = 0;
size_t n;
int ret = 0;
- XA_STATE(xas, &cache->reqs, 0);
+ XA_STATE(xas, &cache->reqs, cache->req_id_next);
/*
- * Search for a request that has not ever been processed, to prevent
- * requests from being processed repeatedly.
+ * Cyclically search for a request that has not ever been processed,
+ * to prevent requests from being processed repeatedly, and make
+ * request distribution fair.
*/
xa_lock(&cache->reqs);
req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW);
+ if (!req && cache->req_id_next > 0) {
+ xas_set(&xas, 0);
+ req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW);
+ }
if (!req) {
xa_unlock(&cache->reqs);
return 0;
@@ -260,6 +269,7 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache,
}
xas_clear_mark(&xas, CACHEFILES_REQ_NEW);
+ cache->req_id_next = xas.xa_index + 1;
xa_unlock(&cache->reqs);
id = xas.xa_index;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f54d8bf2732a..8042d7280dec 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1248,6 +1248,12 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
lock_two_nondirectories(target_inode, src_inode);
cifs_dbg(FYI, "about to flush pages\n");
+
+ rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
+ off + len - 1);
+ if (rc)
+ goto out;
+
/* should we flush first and last page first */
truncate_inode_pages(&target_inode->i_data, 0);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 81f4c15936d0..5b4a7a32bdc5 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -153,6 +153,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 38
-#define CIFS_VERSION "2.38"
+#define SMB3_PRODUCT_BUILD 39
+#define CIFS_VERSION "2.39"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a0a06b6f252b..7ae6f2c08153 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -702,9 +702,6 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
int length = 0;
int total_read;
- smb_msg->msg_control = NULL;
- smb_msg->msg_controllen = 0;
-
for (total_read = 0; msg_data_left(smb_msg); total_read += length) {
try_to_freeze();
@@ -760,7 +757,7 @@ int
cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
unsigned int to_read)
{
- struct msghdr smb_msg;
+ struct msghdr smb_msg = {};
struct kvec iov = {.iov_base = buf, .iov_len = to_read};
iov_iter_kvec(&smb_msg.msg_iter, READ, &iov, 1, to_read);
@@ -770,15 +767,13 @@ cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
ssize_t
cifs_discard_from_socket(struct TCP_Server_Info *server, size_t to_read)
{
- struct msghdr smb_msg;
+ struct msghdr smb_msg = {};
/*
* iov_iter_discard already sets smb_msg.type and count and iov_offset
* and cifs_readv_from_socket sets msg_control and msg_controllen
* so little to initialize in struct msghdr
*/
- smb_msg.msg_name = NULL;
- smb_msg.msg_namelen = 0;
iov_iter_discard(&smb_msg.msg_iter, READ, to_read);
return cifs_readv_from_socket(server, &smb_msg);
@@ -788,7 +783,7 @@ int
cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page,
unsigned int page_offset, unsigned int to_read)
{
- struct msghdr smb_msg;
+ struct msghdr smb_msg = {};
struct bio_vec bv = {
.bv_page = page, .bv_len = to_read, .bv_offset = page_offset};
iov_iter_bvec(&smb_msg.msg_iter, READ, &bv, 1, to_read);
@@ -2350,7 +2345,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
ses = tcon->ses;
cifs_dbg(FYI, "%s: tc_count=%d\n", __func__, tcon->tc_count);
spin_lock(&cifs_tcp_ses_lock);
+ spin_lock(&tcon->tc_lock);
if (--tcon->tc_count > 0) {
+ spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
return;
}
@@ -2359,6 +2356,7 @@ cifs_put_tcon(struct cifs_tcon *tcon)
WARN_ON(tcon->tc_count < 0);
list_del_init(&tcon->tcon_list);
+ spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
/* cancel polling of interfaces */
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index fa738adc031f..6f38b134a346 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3575,6 +3575,9 @@ static ssize_t __cifs_writev(
ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
{
+ struct file *file = iocb->ki_filp;
+
+ cifs_revalidate_mapping(file->f_inode);
return __cifs_writev(iocb, from, true);
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 4810bd62266a..421be43af425 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1600,17 +1600,8 @@ smb2_copychunk_range(const unsigned int xid,
int chunks_copied = 0;
bool chunk_sizes_updated = false;
ssize_t bytes_written, total_bytes_written = 0;
- struct inode *inode;
pcchunk = kmalloc(sizeof(struct copychunk_ioctl), GFP_KERNEL);
-
- /*
- * We need to flush all unwritten data before we can send the
- * copychunk ioctl to the server.
- */
- inode = d_inode(trgtfile->dentry);
- filemap_write_and_wait(inode->i_mapping);
-
if (pcchunk == NULL)
return -ENOMEM;
@@ -3678,39 +3669,50 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
{
int rc;
unsigned int xid;
- struct inode *inode;
+ struct inode *inode = file_inode(file);
struct cifsFileInfo *cfile = file->private_data;
- struct cifsInodeInfo *cifsi;
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
__le64 eof;
+ loff_t old_eof;
xid = get_xid();
- inode = d_inode(cfile->dentry);
- cifsi = CIFS_I(inode);
+ inode_lock(inode);
- if (off >= i_size_read(inode) ||
- off + len >= i_size_read(inode)) {
+ old_eof = i_size_read(inode);
+ if ((off >= old_eof) ||
+ off + len >= old_eof) {
rc = -EINVAL;
goto out;
}
+ filemap_invalidate_lock(inode->i_mapping);
+ rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof - 1);
+ if (rc < 0)
+ goto out_2;
+
+ truncate_pagecache_range(inode, off, old_eof);
+
rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
- i_size_read(inode) - off - len, off);
+ old_eof - off - len, off);
if (rc < 0)
- goto out;
+ goto out_2;
- eof = cpu_to_le64(i_size_read(inode) - len);
+ eof = cpu_to_le64(old_eof - len);
rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->pid, &eof);
if (rc < 0)
- goto out;
+ goto out_2;
rc = 0;
cifsi->server_eof = i_size_read(inode) - len;
truncate_setsize(inode, cifsi->server_eof);
fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof);
+out_2:
+ filemap_invalidate_unlock(inode->i_mapping);
out:
+ inode_unlock(inode);
free_xid(xid);
return rc;
}
@@ -3721,34 +3723,47 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
int rc;
unsigned int xid;
struct cifsFileInfo *cfile = file->private_data;
+ struct inode *inode = file_inode(file);
__le64 eof;
- __u64 count;
+ __u64 count, old_eof;
xid = get_xid();
- if (off >= i_size_read(file->f_inode)) {
+ inode_lock(inode);
+
+ old_eof = i_size_read(inode);
+ if (off >= old_eof) {
rc = -EINVAL;
goto out;
}
- count = i_size_read(file->f_inode) - off;
- eof = cpu_to_le64(i_size_read(file->f_inode) + len);
+ count = old_eof - off;
+ eof = cpu_to_le64(old_eof + len);
+
+ filemap_invalidate_lock(inode->i_mapping);
+ rc = filemap_write_and_wait_range(inode->i_mapping, off, old_eof + len - 1);
+ if (rc < 0)
+ goto out_2;
+ truncate_pagecache_range(inode, off, old_eof);
rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid, cfile->pid, &eof);
if (rc < 0)
- goto out;
+ goto out_2;
rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
if (rc < 0)
- goto out;
+ goto out_2;
- rc = smb3_zero_range(file, tcon, off, len, 1);
+ rc = smb3_zero_data(file, tcon, off, len, xid);
if (rc < 0)
- goto out;
+ goto out_2;
rc = 0;
+out_2:
+ filemap_invalidate_unlock(inode->i_mapping);
out:
+ inode_unlock(inode);
free_xid(xid);
return rc;
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 128e44e57528..6352ab32c7e7 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -965,16 +965,17 @@ SMB2_negotiate(const unsigned int xid,
} else if (rc != 0)
goto neg_exit;
+ rc = -EIO;
if (strcmp(server->vals->version_string,
SMB3ANY_VERSION_STRING) == 0) {
if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
cifs_server_dbg(VFS,
"SMB2 dialect returned but not requested\n");
- return -EIO;
+ goto neg_exit;
} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
cifs_server_dbg(VFS,
"SMB2.1 dialect returned but not requested\n");
- return -EIO;
+ goto neg_exit;
} else if (rsp->DialectRevision == cpu_to_le16(SMB311_PROT_ID)) {
/* ops set to 3.0 by default for default so update */
server->ops = &smb311_operations;
@@ -985,7 +986,7 @@ SMB2_negotiate(const unsigned int xid,
if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
cifs_server_dbg(VFS,
"SMB2 dialect returned but not requested\n");
- return -EIO;
+ goto neg_exit;
} else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
/* ops set to 3.0 by default for default so update */
server->ops = &smb21_operations;
@@ -999,7 +1000,7 @@ SMB2_negotiate(const unsigned int xid,
/* if requested single dialect ensure returned dialect matched */
cifs_server_dbg(VFS, "Invalid 0x%x dialect returned: not requested\n",
le16_to_cpu(rsp->DialectRevision));
- return -EIO;
+ goto neg_exit;
}
cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
@@ -1017,9 +1018,10 @@ SMB2_negotiate(const unsigned int xid,
else {
cifs_server_dbg(VFS, "Invalid dialect returned by server 0x%x\n",
le16_to_cpu(rsp->DialectRevision));
- rc = -EIO;
goto neg_exit;
}
+
+ rc = 0;
server->dialect = le16_to_cpu(rsp->DialectRevision);
/*
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index c2fe035e573b..9a2753e21170 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -194,10 +194,6 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
*sent = 0;
- smb_msg->msg_name = (struct sockaddr *) &server->dstaddr;
- smb_msg->msg_namelen = sizeof(struct sockaddr);
- smb_msg->msg_control = NULL;
- smb_msg->msg_controllen = 0;
if (server->noblocksnd)
smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
else
@@ -309,7 +305,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
sigset_t mask, oldmask;
size_t total_len = 0, sent, size;
struct socket *ssocket = server->ssocket;
- struct msghdr smb_msg;
+ struct msghdr smb_msg = {};
__be32 rfc1002_marker;
if (cifs_rdma_enabled(server)) {
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 3dcf0b8b4e93..232cfdf095ae 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -745,6 +745,28 @@ void debugfs_remove(struct dentry *dentry)
EXPORT_SYMBOL_GPL(debugfs_remove);
/**
+ * debugfs_lookup_and_remove - lookup a directory or file and recursively remove it
+ * @name: a pointer to a string containing the name of the item to look up.
+ * @parent: a pointer to the parent dentry of the item.
+ *
+ * This is the equlivant of doing something like
+ * debugfs_remove(debugfs_lookup(..)) but with the proper reference counting
+ * handled for the directory being looked up.
+ */
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent)
+{
+ struct dentry *dentry;
+
+ dentry = debugfs_lookup(name, parent);
+ if (!dentry)
+ return;
+
+ debugfs_remove(dentry);
+ dput(dentry);
+}
+EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
+
+/**
* debugfs_rename - rename a file/directory in the debugfs filesystem
* @old_dir: a pointer to the parent dentry for the renamed object. This
* should be a directory dentry.
diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c
index 8e01d89c3319..b5fd9d71e67f 100644
--- a/fs/erofs/fscache.c
+++ b/fs/erofs/fscache.c
@@ -222,8 +222,10 @@ static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
rreq = erofs_fscache_alloc_request(folio_mapping(folio),
folio_pos(folio), folio_size(folio));
- if (IS_ERR(rreq))
+ if (IS_ERR(rreq)) {
+ ret = PTR_ERR(rreq);
goto out;
+ }
return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
rreq, mdev.m_pa);
@@ -301,8 +303,10 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
rreq = erofs_fscache_alloc_request(folio_mapping(folio),
folio_pos(folio), folio_size(folio));
- if (IS_ERR(rreq))
+ if (IS_ERR(rreq)) {
+ ret = PTR_ERR(rreq);
goto out_unlock;
+ }
pstart = mdev.m_pa + (pos - map.m_la);
return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index cfee49d33b95..a01cc82795a2 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -195,7 +195,6 @@ struct erofs_workgroup {
atomic_t refcount;
};
-#if defined(CONFIG_SMP)
static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
int val)
{
@@ -224,34 +223,6 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
return atomic_cond_read_relaxed(&grp->refcount,
VAL != EROFS_LOCKED_MAGIC);
}
-#else
-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
- int val)
-{
- preempt_disable();
- /* no need to spin on UP platforms, let's just disable preemption. */
- if (val != atomic_read(&grp->refcount)) {
- preempt_enable();
- return false;
- }
- return true;
-}
-
-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
- int orig_val)
-{
- preempt_enable();
-}
-
-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
-{
- int v = atomic_read(&grp->refcount);
-
- /* workgroup is never freezed on uniprocessor systems */
- DBG_BUGON(v == EROFS_LOCKED_MAGIC);
- return v;
-}
-#endif /* !CONFIG_SMP */
#endif /* !CONFIG_EROFS_FS_ZIP */
/* we strictly follow PAGE_SIZE and no buffer head yet */
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 572f0b8151ba..d58549ca1df9 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -141,7 +141,7 @@ struct z_erofs_maprecorder {
u8 type, headtype;
u16 clusterofs;
u16 delta[2];
- erofs_blk_t pblk, compressedlcs;
+ erofs_blk_t pblk, compressedblks;
erofs_off_t nextpackoff;
};
@@ -192,7 +192,7 @@ static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
DBG_BUGON(1);
return -EFSCORRUPTED;
}
- m->compressedlcs = m->delta[0] &
+ m->compressedblks = m->delta[0] &
~Z_EROFS_VLE_DI_D0_CBLKCNT;
m->delta[0] = 1;
}
@@ -293,7 +293,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
DBG_BUGON(1);
return -EFSCORRUPTED;
}
- m->compressedlcs = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
+ m->compressedblks = lo & ~Z_EROFS_VLE_DI_D0_CBLKCNT;
m->delta[0] = 1;
return 0;
} else if (i + 1 != (int)vcnt) {
@@ -497,7 +497,7 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
return 0;
}
lcn = m->lcn + 1;
- if (m->compressedlcs)
+ if (m->compressedblks)
goto out;
err = z_erofs_load_cluster_from_disk(m, lcn, false);
@@ -506,7 +506,7 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
/*
* If the 1st NONHEAD lcluster has already been handled initially w/o
- * valid compressedlcs, which means at least it mustn't be CBLKCNT, or
+ * valid compressedblks, which means at least it mustn't be CBLKCNT, or
* an internal implemenatation error is detected.
*
* The following code can also handle it properly anyway, but let's
@@ -523,12 +523,12 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
* if the 1st NONHEAD lcluster is actually PLAIN or HEAD type
* rather than CBLKCNT, it's a 1 lcluster-sized pcluster.
*/
- m->compressedlcs = 1;
+ m->compressedblks = 1 << (lclusterbits - LOG_BLOCK_SIZE);
break;
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
if (m->delta[0] != 1)
goto err_bonus_cblkcnt;
- if (m->compressedlcs)
+ if (m->compressedblks)
break;
fallthrough;
default:
@@ -539,7 +539,7 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
return -EFSCORRUPTED;
}
out:
- map->m_plen = (u64)m->compressedlcs << lclusterbits;
+ map->m_plen = (u64)m->compressedblks << LOG_BLOCK_SIZE;
return 0;
err_bonus_cblkcnt:
erofs_err(m->inode->i_sb,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 27c720d71b4e..898dd95bc7a7 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -606,6 +606,31 @@ static inline gfp_t nfs_io_gfp_mask(void)
return GFP_KERNEL;
}
+/*
+ * Special version of should_remove_suid() that ignores capabilities.
+ */
+static inline int nfs_should_remove_suid(const struct inode *inode)
+{
+ umode_t mode = inode->i_mode;
+ int kill = 0;
+
+ /* suid always must be killed */
+ if (unlikely(mode & S_ISUID))
+ kill = ATTR_KILL_SUID;
+
+ /*
+ * sgid without any exec bits is just a mandatory locking mark; leave
+ * it alone. If some exec bits are set, it's a real sgid; kill it.
+ */
+ if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
+ kill |= ATTR_KILL_SGID;
+
+ if (unlikely(kill && S_ISREG(mode)))
+ return kill;
+
+ return 0;
+}
+
/* unlink.c */
extern struct rpc_task *
nfs_async_rename(struct inode *old_dir, struct inode *new_dir,
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 068c45b3bc1a..6dab9e408372 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -78,10 +78,15 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
status = nfs4_call_sync(server->client, server, msg,
&args.seq_args, &res.seq_res, 0);
- if (status == 0)
+ if (status == 0) {
+ if (nfs_should_remove_suid(inode)) {
+ spin_lock(&inode->i_lock);
+ nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
+ spin_unlock(&inode->i_lock);
+ }
status = nfs_post_op_update_inode_force_wcc(inode,
res.falloc_fattr);
-
+ }
if (msg->rpc_proc == &nfs4_procedures[NFSPROC4_CLNT_ALLOCATE])
trace_nfs4_fallocate(inode, &args, status);
else
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 82944e14fcea..ee66ffdb985e 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1051,22 +1051,31 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx)
if (ctx->bsize)
sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits);
- if (server->nfs_client->rpc_ops->version != 2) {
- /* The VFS shouldn't apply the umask to mode bits. We will do
- * so ourselves when necessary.
+ switch (server->nfs_client->rpc_ops->version) {
+ case 2:
+ sb->s_time_gran = 1000;
+ sb->s_time_min = 0;
+ sb->s_time_max = U32_MAX;
+ break;
+ case 3:
+ /*
+ * The VFS shouldn't apply the umask to mode bits.
+ * We will do so ourselves when necessary.
*/
sb->s_flags |= SB_POSIXACL;
sb->s_time_gran = 1;
- sb->s_export_op = &nfs_export_ops;
- } else
- sb->s_time_gran = 1000;
-
- if (server->nfs_client->rpc_ops->version != 4) {
sb->s_time_min = 0;
sb->s_time_max = U32_MAX;
- } else {
+ sb->s_export_op = &nfs_export_ops;
+ break;
+ case 4:
+ sb->s_flags |= SB_POSIXACL;
+ sb->s_time_gran = 1;
sb->s_time_min = S64_MIN;
sb->s_time_max = S64_MAX;
+ if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
+ sb->s_export_op = &nfs_export_ops;
+ break;
}
sb->s_magic = NFS_SUPER_MAGIC;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 1843fa235d9b..f41d24b54fd1 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1496,31 +1496,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata)
NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
}
-/*
- * Special version of should_remove_suid() that ignores capabilities.
- */
-static int nfs_should_remove_suid(const struct inode *inode)
-{
- umode_t mode = inode->i_mode;
- int kill = 0;
-
- /* suid always must be killed */
- if (unlikely(mode & S_ISUID))
- kill = ATTR_KILL_SUID;
-
- /*
- * sgid without any exec bits is just a mandatory locking mark; leave
- * it alone. If some exec bits are set, it's a real sgid; kill it.
- */
- if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
- kill |= ATTR_KILL_SGID;
-
- if (unlikely(kill && S_ISREG(mode)))
- return kill;
-
- return 0;
-}
-
static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
struct nfs_fattr *fattr)
{
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 9f486b788ed0..fc17b0ac8729 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -300,6 +300,10 @@ commit_metadata(struct svc_fh *fhp)
static void
nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap)
{
+ /* Ignore mode updates on symlinks */
+ if (S_ISLNK(inode->i_mode))
+ iap->ia_valid &= ~ATTR_MODE;
+
/* sanitize the mode change */
if (iap->ia_valid & ATTR_MODE) {
iap->ia_mode &= S_IALLUGO;
@@ -353,7 +357,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
int accmode = NFSD_MAY_SATTR;
umode_t ftype = 0;
__be32 err;
- int host_err;
+ int host_err = 0;
bool get_write_count;
bool size_change = (iap->ia_valid & ATTR_SIZE);
@@ -391,13 +395,6 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
dentry = fhp->fh_dentry;
inode = d_inode(dentry);
- /* Ignore any mode updates on symlinks */
- if (S_ISLNK(inode->i_mode))
- iap->ia_valid &= ~ATTR_MODE;
-
- if (!iap->ia_valid)
- return 0;
-
nfsd_sanitize_attrs(inode, iap);
if (check_guard && guardtime != inode->i_ctime.tv_sec)
@@ -448,8 +445,10 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
goto out_unlock;
}
- iap->ia_valid |= ATTR_CTIME;
- host_err = notify_change(&init_user_ns, dentry, iap, NULL);
+ if (iap->ia_valid) {
+ iap->ia_valid |= ATTR_CTIME;
+ host_err = notify_change(&init_user_ns, dentry, iap, NULL);
+ }
out_unlock:
if (attr->na_seclabel && attr->na_seclabel->len)
@@ -846,10 +845,14 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
struct svc_rqst *rqstp = sd->u.data;
-
- svc_rqst_replace_page(rqstp, buf->page);
- if (rqstp->rq_res.page_len == 0)
- rqstp->rq_res.page_base = buf->offset;
+ struct page *page = buf->page; // may be a compound one
+ unsigned offset = buf->offset;
+
+ page += offset / PAGE_SIZE;
+ for (int i = sd->len; i > 0; i -= PAGE_SIZE)
+ svc_rqst_replace_page(rqstp, page++);
+ if (rqstp->rq_res.page_len == 0) // first call
+ rqstp->rq_res.page_base = offset % PAGE_SIZE;
rqstp->rq_res.page_len += sd->len;
return sd->len;
}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 6e89f0e2fd20..208efd4fa52c 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -115,6 +115,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
#endif
show_val_kb(m, "PageTables: ",
global_node_page_state(NR_PAGETABLE));
+ show_val_kb(m, "SecPageTables: ",
+ global_node_page_state(NR_SECONDARY_PAGETABLE));
show_val_kb(m, "NFS_Unstable: ", 0);
show_val_kb(m, "Bounce: ",
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 81d26abf486f..da85b3979195 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -141,6 +141,8 @@ struct tracefs_mount_opts {
kuid_t uid;
kgid_t gid;
umode_t mode;
+ /* Opt_* bitfield. */
+ unsigned int opts;
};
enum {
@@ -241,6 +243,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
kgid_t gid;
char *p;
+ opts->opts = 0;
opts->mode = TRACEFS_DEFAULT_MODE;
while ((p = strsep(&data, ",")) != NULL) {
@@ -275,24 +278,36 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
* but traditionally tracefs has ignored all mount options
*/
}
+
+ opts->opts |= BIT(token);
}
return 0;
}
-static int tracefs_apply_options(struct super_block *sb)
+static int tracefs_apply_options(struct super_block *sb, bool remount)
{
struct tracefs_fs_info *fsi = sb->s_fs_info;
struct inode *inode = d_inode(sb->s_root);
struct tracefs_mount_opts *opts = &fsi->mount_opts;
- inode->i_mode &= ~S_IALLUGO;
- inode->i_mode |= opts->mode;
+ /*
+ * On remount, only reset mode/uid/gid if they were provided as mount
+ * options.
+ */
+
+ if (!remount || opts->opts & BIT(Opt_mode)) {
+ inode->i_mode &= ~S_IALLUGO;
+ inode->i_mode |= opts->mode;
+ }
- inode->i_uid = opts->uid;
+ if (!remount || opts->opts & BIT(Opt_uid))
+ inode->i_uid = opts->uid;
- /* Set all the group ids to the mount option */
- set_gid(sb->s_root, opts->gid);
+ if (!remount || opts->opts & BIT(Opt_gid)) {
+ /* Set all the group ids to the mount option */
+ set_gid(sb->s_root, opts->gid);
+ }
return 0;
}
@@ -307,7 +322,7 @@ static int tracefs_remount(struct super_block *sb, int *flags, char *data)
if (err)
goto fail;
- tracefs_apply_options(sb);
+ tracefs_apply_options(sb, true);
fail:
return err;
@@ -359,7 +374,7 @@ static int trace_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &tracefs_super_operations;
- tracefs_apply_options(sb);
+ tracefs_apply_options(sb, false);
return 0;
diff --git a/include/asm-generic/softirq_stack.h b/include/asm-generic/softirq_stack.h
index d3e2d81656e0..2a67aed9ac52 100644
--- a/include/asm-generic/softirq_stack.h
+++ b/include/asm-generic/softirq_stack.h
@@ -2,7 +2,7 @@
#ifndef __ASM_GENERIC_SOFTIRQ_STACK_H
#define __ASM_GENERIC_SOFTIRQ_STACK_H
-#if defined(CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK) && !defined(CONFIG_PREEMPT_RT)
+#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index a1705d6b3fba..7df7876b2ad5 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -319,8 +319,8 @@ enum drm_panel_orientation {
* EDID's detailed monitor range
*/
struct drm_monitor_range_info {
- u8 min_vfreq;
- u8 max_vfreq;
+ u16 min_vfreq;
+ u16 max_vfreq;
};
/**
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 2181977ae683..1ed61e2b30a4 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -92,6 +92,11 @@ struct detailed_data_string {
u8 str[13];
} __attribute__((packed));
+#define DRM_EDID_RANGE_OFFSET_MIN_VFREQ (1 << 0) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MAX_VFREQ (1 << 1) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MIN_HFREQ (1 << 2) /* 1.4 */
+#define DRM_EDID_RANGE_OFFSET_MAX_HFREQ (1 << 3) /* 1.4 */
+
#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00
#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01
#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02
diff --git a/include/kunit/test.h b/include/kunit/test.h
index c958855681cc..840a2c375065 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -826,7 +826,7 @@ do { \
#define KUNIT_EXPECT_LE_MSG(test, left, right, fmt, ...) \
KUNIT_BINARY_INT_ASSERTION(test, \
- KUNIT_ASSERTION, \
+ KUNIT_EXPECTATION, \
left, <=, right, \
fmt, \
##__VA_ARGS__)
@@ -1116,7 +1116,7 @@ do { \
#define KUNIT_ASSERT_LT_MSG(test, left, right, fmt, ...) \
KUNIT_BINARY_INT_ASSERTION(test, \
- KUNIT_EXPECTATION, \
+ KUNIT_ASSERTION, \
left, <, right, \
fmt, \
##__VA_ARGS__)
@@ -1157,7 +1157,7 @@ do { \
#define KUNIT_ASSERT_GT_MSG(test, left, right, fmt, ...) \
KUNIT_BINARY_INT_ASSERTION(test, \
- KUNIT_EXPECTATION, \
+ KUNIT_ASSERTION, \
left, >, right, \
fmt, \
##__VA_ARGS__)
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index e94cdf235f1d..5001e14c5c06 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -67,6 +67,7 @@ struct amba_device {
struct clk *pclk;
struct device_dma_parameters dma_parms;
unsigned int periphid;
+ struct mutex periphid_lock;
unsigned int cid;
struct amba_cs_uci_id uci;
unsigned int irq[AMBA_NR_IRQS];
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 089c9ade4325..df518c429667 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -138,6 +138,17 @@ BUFFER_FNS(Defer_Completion, defer_completion)
static __always_inline void set_buffer_uptodate(struct buffer_head *bh)
{
/*
+ * If somebody else already set this uptodate, they will
+ * have done the memory barrier, and a reader will thus
+ * see *some* valid buffer state.
+ *
+ * Any other serialization (with IO errors or whatever that
+ * might clear the bit) has to come from other state (eg BH_Lock).
+ */
+ if (test_bit(BH_Uptodate, &bh->b_state))
+ return;
+
+ /*
* make it consistent with folio_mark_uptodate
* pairs with smp_load_acquire in buffer_uptodate
*/
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 01ce94b58b42..7713d7bcdaea 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -240,6 +240,12 @@ static inline void *offset_to_ptr(const int *off)
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
/*
+ * Whether 'type' is a signed type or an unsigned type. Supports scalar types,
+ * bool and also pointer types.
+ */
+#define is_signed_type(type) (((type)(-1)) < (__force type)1)
+
+/*
* This is needed in functions which generate the stack canary, see
* arch/x86/kernel/smpboot.c::start_secondary() for an example.
*/
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index c869f1e73d75..f60674692d36 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -91,6 +91,8 @@ struct dentry *debugfs_create_automount(const char *name,
void debugfs_remove(struct dentry *dentry);
#define debugfs_remove_recursive debugfs_remove
+void debugfs_lookup_and_remove(const char *name, struct dentry *parent);
+
const struct file_operations *debugfs_real_fops(const struct file *filp);
int debugfs_file_get(struct dentry *dentry);
@@ -225,6 +227,10 @@ static inline void debugfs_remove(struct dentry *dentry)
static inline void debugfs_remove_recursive(struct dentry *dentry)
{ }
+static inline void debugfs_lookup_and_remove(const char *name,
+ struct dentry *parent)
+{ }
+
const struct file_operations *debugfs_real_fops(const struct file *filp);
static inline int debugfs_file_get(struct dentry *dentry)
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
index 7acaabde5396..2114d65b862f 100644
--- a/include/linux/device/driver.h
+++ b/include/linux/device/driver.h
@@ -242,6 +242,7 @@ driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
extern int driver_deferred_probe_timeout;
void driver_deferred_probe_add(struct device *dev);
+int driver_deferred_probe_check_state(struct device *dev);
void driver_init(void);
/**
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 25a30906289d..0ee20b764000 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -139,7 +139,6 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
bool dma_can_mmap(struct device *dev);
-int dma_supported(struct device *dev, u64 mask);
bool dma_pci_p2pdma_supported(struct device *dev);
int dma_set_mask(struct device *dev, u64 mask);
int dma_set_coherent_mask(struct device *dev, u64 mask);
@@ -248,10 +247,6 @@ static inline bool dma_can_mmap(struct device *dev)
{
return false;
}
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- return 0;
-}
static inline bool dma_pci_p2pdma_supported(struct device *dev)
{
return false;
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index d81a51978d01..8917a32173c4 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -65,6 +65,7 @@ struct dmar_pci_notify_info {
extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units;
+extern int intel_iommu_enabled;
#define for_each_drhd_unit(drhd) \
list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
@@ -88,7 +89,8 @@ extern struct list_head dmar_drhd_units;
static inline bool dmar_rcu_check(void)
{
return rwsem_is_locked(&dmar_global_lock) ||
- system_state == SYSTEM_BOOTING;
+ system_state == SYSTEM_BOOTING ||
+ (IS_ENABLED(CONFIG_INTEL_IOMMU) && !intel_iommu_enabled);
}
#define dmar_rcu_dereference(p) rcu_dereference_check((p), dmar_rcu_check())
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 720874e6ee94..36e5dd84cf59 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -258,7 +258,7 @@ struct fscache_cookie *fscache_acquire_cookie(struct fscache_volume *volume,
/**
* fscache_use_cookie - Request usage of cookie attached to an object
- * @object: Object description
+ * @cookie: The cookie representing the cache object
* @will_modify: If cache is expected to be modified locally
*
* Request usage of the cookie attached to an object. The caller should tell
@@ -274,7 +274,7 @@ static inline void fscache_use_cookie(struct fscache_cookie *cookie,
/**
* fscache_unuse_cookie - Cease usage of cookie attached to an object
- * @object: Object description
+ * @cookie: The cookie representing the cache object
* @aux_data: Updated auxiliary data (or NULL)
* @object_size: Revised size of the object (or NULL)
*
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h
index 6f1dee7e67e0..9be8704e2d38 100644
--- a/include/linux/hp_sdc.h
+++ b/include/linux/hp_sdc.h
@@ -180,7 +180,7 @@ switch (val) { \
#define HP_SDC_CMD_SET_IM 0x40 /* 010xxxxx == set irq mask */
-/* The documents provided do not explicitly state that all registers betweem
+/* The documents provided do not explicitly state that all registers between
* 0x01 and 0x1f inclusive can be read by sending their register index as a
* command, but this is implied and appears to be the case.
*/
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 55e6f4ad0ca6..b6e6d5b40774 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -310,9 +310,11 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
+ struct_group(addrs,
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ );
__le16 seq_ctrl;
u8 addr4[ETH_ALEN];
} __packed __aligned(2);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f4519d3689e1..32f259fa5801 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -151,12 +151,11 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQUEST_NO_ACTION BIT(10)
/*
* Architecture-independent vcpu->requests bit members
- * Bits 4-7 are reserved for more arch-independent bits.
+ * Bits 3-7 are reserved for more arch-independent bits.
*/
#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_VM_DEAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_UNBLOCK 2
-#define KVM_REQ_UNHALT 3
#define KVM_REQUEST_ARCH_BASE 8
/*
@@ -2248,6 +2247,19 @@ static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
/*
+ * If more than one page is being (un)accounted, @virt must be the address of
+ * the first page of a block of pages what were allocated together (i.e
+ * accounted together).
+ *
+ * kvm_account_pgtable_pages() is thread-safe because mod_lruvec_page_state()
+ * is thread-safe.
+ */
+static inline void kvm_account_pgtable_pages(void *virt, int nr)
+{
+ mod_lruvec_page_state(virt_to_page(virt), NR_SECONDARY_PAGETABLE, nr);
+}
+
+/*
* This defines how many reserved entries we want to keep before we
* kick the vcpu to the userspace to avoid dirty ring full. This
* value can be tuned to higher if e.g. PML is enabled on the host.
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index 806448173033..60fff133c0b1 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -407,4 +407,5 @@ LSM_HOOK(int, 0, perf_event_write, struct perf_event *event)
#ifdef CONFIG_IO_URING
LSM_HOOK(int, 0, uring_override_creds, const struct cred *new)
LSM_HOOK(int, 0, uring_sqpoll, void)
+LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd)
#endif /* CONFIG_IO_URING */
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 84a0d7e02176..3aa6030302f5 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1582,6 +1582,9 @@
* Check whether the current task is allowed to spawn a io_uring polling
* thread (IORING_SETUP_SQPOLL).
*
+ * @uring_cmd:
+ * Check whether the file_operations uring_cmd is allowed to run.
+ *
*/
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 7b7ce602c808..c32de987fa71 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1280,16 +1280,17 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
-static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
+bool mlx5_is_roce_on(struct mlx5_core_dev *dev);
+
+static inline bool mlx5_get_roce_state(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
- union devlink_param_value val;
- int err;
-
- err = devlink_param_driverinit_value_get(devlink,
- DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
- &val);
- return err ? MLX5_CAP_GEN(dev, roce) : val.vbool;
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ return MLX5_CAP_GEN(dev, roce);
+
+ /* If RoCE cap is read-only in FW, get RoCE state from devlink
+ * in order to support RoCE enable/disable feature
+ */
+ return mlx5_is_roce_on(dev);
}
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e24b40c52468..355d842d2731 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -216,6 +216,7 @@ enum node_stat_item {
NR_KERNEL_SCS_KB, /* measured in KiB */
#endif
NR_PAGETABLE, /* used for pagetables */
+ NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */
#ifdef CONFIG_SWAP
NR_SWAPCACHE,
#endif
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 1d7992a02e36..1a803e4335d3 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -101,8 +101,9 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
}
static inline int of_dma_configure_id(struct device *dev,
- struct device_node *np,
- bool force_dma)
+ struct device_node *np,
+ bool force_dma,
+ const u32 *id)
{
return 0;
}
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index f1221d11f8e5..0eb3b192f07a 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -30,7 +30,6 @@
* https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
* credit to Christian Biere.
*/
-#define is_signed_type(type) (((type)(-1)) < (type)1)
#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
#define type_min(T) ((T)((T)-type_max(T)-(T)1))
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6feade66efdb..15b49e655ce3 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2079,6 +2079,9 @@
#define PCI_DEVICE_ID_ICE_1712 0x1712
#define PCI_DEVICE_ID_VT1724 0x1724
+#define PCI_VENDOR_ID_MICROSOFT 0x1414
+#define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
+
#define PCI_VENDOR_ID_OXSEMI 0x1415
#define PCI_DEVICE_ID_OXSEMI_12PCI840 0x8403
#define PCI_DEVICE_ID_OXSEMI_PCIe840 0xC000
diff --git a/include/linux/platform_data/x86/pmc_atom.h b/include/linux/platform_data/x86/pmc_atom.h
index 3edfb6d4e67a..dd81f510e4cf 100644
--- a/include/linux/platform_data/x86/pmc_atom.h
+++ b/include/linux/platform_data/x86/pmc_atom.h
@@ -7,6 +7,8 @@
#ifndef PMC_ATOM_H
#define PMC_ATOM_H
+#include <linux/bits.h>
+
/* ValleyView Power Control Unit PCI Device ID */
#define PCI_DEVICE_ID_VLV_PMC 0x0F1C
/* CherryTrail Power Control Unit PCI Device ID */
@@ -139,9 +141,9 @@
#define ACPI_MMIO_REG_LEN 0x100
#define PM1_CNT 0x4
-#define SLEEP_TYPE_MASK 0xFFFFECFF
+#define SLEEP_TYPE_MASK GENMASK(12, 10)
#define SLEEP_TYPE_S5 0x1C00
-#define SLEEP_ENABLE 0x2000
+#define SLEEP_ENABLE BIT(13)
extern int pmc_atom_read(int offset, u32 *value);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index bf80adca980b..b89b4b86951f 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -41,12 +41,15 @@ struct anon_vma {
atomic_t refcount;
/*
- * Count of child anon_vmas and VMAs which points to this anon_vma.
+ * Count of child anon_vmas. Equals to the count of all anon_vmas that
+ * have ->parent pointing to this one, including itself.
*
* This counter is used for making decision about reusing anon_vma
* instead of forking new one. See comments in function anon_vma_clone.
*/
- unsigned degree;
+ unsigned long num_children;
+ /* Count of VMAs whose ->anon_vma pointer points to this object. */
+ unsigned long num_active_vmas;
struct anon_vma *parent; /* Parent of this anon_vma */
diff --git a/include/linux/security.h b/include/linux/security.h
index 1bc362cb413f..7bd0c490703d 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2060,6 +2060,7 @@ static inline int security_perf_event_write(struct perf_event *event)
#ifdef CONFIG_SECURITY
extern int security_uring_override_creds(const struct cred *new);
extern int security_uring_sqpoll(void);
+extern int security_uring_cmd(struct io_uring_cmd *ioucmd);
#else
static inline int security_uring_override_creds(const struct cred *new)
{
@@ -2069,6 +2070,10 @@ static inline int security_uring_sqpoll(void)
{
return 0;
}
+static inline int security_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ return 0;
+}
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_IO_URING */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index aef3145f2032..6e4f4765d209 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -141,6 +141,14 @@ struct gpio_desc;
* Locking: none.
* Interrupts: caller dependent.
*
+ * @start_rx: ``void ()(struct uart_port *port)``
+ *
+ * Start receiving characters.
+ *
+ * Locking: @port->lock taken.
+ * Interrupts: locally disabled.
+ * This call must not sleep
+ *
* @stop_rx: ``void ()(struct uart_port *port)``
*
* Stop receiving characters; the @port is in the process of being closed.
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ca8afa382bf2..18e163a3460d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2444,6 +2444,27 @@ static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
skb_shinfo(skb)->nr_frags = i + 1;
}
+/**
+ * skb_fill_page_desc_noacc - initialise a paged fragment in an skb
+ * @skb: buffer containing fragment to be initialised
+ * @i: paged fragment index to initialise
+ * @page: the page to use for this fragment
+ * @off: the offset to the data with @page
+ * @size: the length of the data
+ *
+ * Variant of skb_fill_page_desc() which does not deal with
+ * pfmemalloc, if page is not owned by us.
+ */
+static inline void skb_fill_page_desc_noacc(struct sk_buff *skb, int i,
+ struct page *page, int off,
+ int size)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ __skb_fill_page_desc_noacc(shinfo, i, page, off, size);
+ shinfo->nr_frags = i + 1;
+}
+
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size, unsigned int truesize);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index e6c73d5ff1a8..f089ee1ead58 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -469,6 +469,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch
* SPI_TRANS_FAIL_NO_START.
* @queue_empty: signal green light for opportunistically skipping the queue
* for spi_sync transfers.
+ * @must_async: disable all fast paths in the core
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -690,6 +691,7 @@ struct spi_controller {
/* Flag for enabling opportunistic skipping of the queue in spi_sync */
bool queue_empty;
+ bool must_async;
};
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index b18759a673c6..8401dec93c15 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -814,8 +814,6 @@ extern int trace_add_event_call(struct trace_event_call *call);
extern int trace_remove_event_call(struct trace_event_call *call);
extern int trace_event_get_offsets(struct trace_event_call *call);
-#define is_signed_type(type) (((type)(-1)) < (type)1)
-
int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
int trace_set_clr_event(const char *system, const char *event, int set);
int trace_array_set_clr_event(struct trace_array *tr, const char *system,
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 254a2654400f..e96da4157d04 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -70,6 +70,7 @@ struct udp_sock {
* For encapsulation sockets.
*/
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+ void (*encap_err_rcv)(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
int (*encap_err_lookup)(struct sock *sk, struct sk_buff *skb);
void (*encap_destroy)(struct sock *sk);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index f7a9914fc97f..9ff1ad4dfad1 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -575,6 +575,7 @@ struct usb3_lpm_parameters {
* @devaddr: device address, XHCI: assigned by HW, others: same as devnum
* @can_submit: URBs may be submitted
* @persist_enabled: USB_PERSIST enabled for this device
+ * @reset_in_progress: the device is being reset
* @have_langid: whether string_langid is valid
* @authorized: policy has said we can use it;
* (user space) policy determines if we authorize this device to be
@@ -662,6 +663,7 @@ struct usb_device {
unsigned can_submit:1;
unsigned persist_enabled:1;
+ unsigned reset_in_progress:1;
unsigned have_langid:1;
unsigned authorized:1;
unsigned authenticated:1;
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index cfb916cccd31..8d09c2f0a9b8 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -73,6 +73,11 @@ enum {
#define DP_CAP_USB BIT(7)
#define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
#define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16)
+/* Get pin assignment taking plug & receptacle into consideration */
+#define DP_CAP_PIN_ASSIGN_UFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
+ DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
+#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
+ DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_))
/* DisplayPort Status Update VDO bits */
#define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index cee5f83c0f11..b69ca695935c 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -66,8 +66,6 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
-bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
- ktime_t *);
bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *,
unsigned long);
diff --git a/include/net/dropreason.h b/include/net/dropreason.h
index fae9b40e54fa..c1cbcdbaf149 100644
--- a/include/net/dropreason.h
+++ b/include/net/dropreason.h
@@ -3,6 +3,73 @@
#ifndef _LINUX_DROPREASON_H
#define _LINUX_DROPREASON_H
+#define DEFINE_DROP_REASON(FN, FNe) \
+ FN(NOT_SPECIFIED) \
+ FN(NO_SOCKET) \
+ FN(PKT_TOO_SMALL) \
+ FN(TCP_CSUM) \
+ FN(SOCKET_FILTER) \
+ FN(UDP_CSUM) \
+ FN(NETFILTER_DROP) \
+ FN(OTHERHOST) \
+ FN(IP_CSUM) \
+ FN(IP_INHDR) \
+ FN(IP_RPFILTER) \
+ FN(UNICAST_IN_L2_MULTICAST) \
+ FN(XFRM_POLICY) \
+ FN(IP_NOPROTO) \
+ FN(SOCKET_RCVBUFF) \
+ FN(PROTO_MEM) \
+ FN(TCP_MD5NOTFOUND) \
+ FN(TCP_MD5UNEXPECTED) \
+ FN(TCP_MD5FAILURE) \
+ FN(SOCKET_BACKLOG) \
+ FN(TCP_FLAGS) \
+ FN(TCP_ZEROWINDOW) \
+ FN(TCP_OLD_DATA) \
+ FN(TCP_OVERWINDOW) \
+ FN(TCP_OFOMERGE) \
+ FN(TCP_RFC7323_PAWS) \
+ FN(TCP_INVALID_SEQUENCE) \
+ FN(TCP_RESET) \
+ FN(TCP_INVALID_SYN) \
+ FN(TCP_CLOSE) \
+ FN(TCP_FASTOPEN) \
+ FN(TCP_OLD_ACK) \
+ FN(TCP_TOO_OLD_ACK) \
+ FN(TCP_ACK_UNSENT_DATA) \
+ FN(TCP_OFO_QUEUE_PRUNE) \
+ FN(TCP_OFO_DROP) \
+ FN(IP_OUTNOROUTES) \
+ FN(BPF_CGROUP_EGRESS) \
+ FN(IPV6DISABLED) \
+ FN(NEIGH_CREATEFAIL) \
+ FN(NEIGH_FAILED) \
+ FN(NEIGH_QUEUEFULL) \
+ FN(NEIGH_DEAD) \
+ FN(TC_EGRESS) \
+ FN(QDISC_DROP) \
+ FN(CPU_BACKLOG) \
+ FN(XDP) \
+ FN(TC_INGRESS) \
+ FN(UNHANDLED_PROTO) \
+ FN(SKB_CSUM) \
+ FN(SKB_GSO_SEG) \
+ FN(SKB_UCOPY_FAULT) \
+ FN(DEV_HDR) \
+ FN(DEV_READY) \
+ FN(FULL_RING) \
+ FN(NOMEM) \
+ FN(HDR_TRUNC) \
+ FN(TAP_FILTER) \
+ FN(TAP_TXFILTER) \
+ FN(ICMP_CSUM) \
+ FN(INVALID_PROTO) \
+ FN(IP_INADDRERRORS) \
+ FN(IP_INNOROUTES) \
+ FN(PKT_TOO_BIG) \
+ FNe(MAX)
+
/**
* enum skb_drop_reason - the reasons of skb drops
*
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 63fac94f9ace..ced80e2f8b58 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -246,7 +246,8 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
__be32 daddr, __be32 saddr,
__be32 key, __u8 tos,
struct net *net, int oif,
- __u32 mark, __u32 tun_inner_hash)
+ __u32 mark, __u32 tun_inner_hash,
+ __u8 flow_flags)
{
memset(fl4, 0, sizeof(*fl4));
@@ -263,6 +264,7 @@ static inline void ip_tunnel_init_flow(struct flowi4 *fl4,
fl4->fl4_gre_key = key;
fl4->flowi4_mark = mark;
fl4->flowi4_multipath_hash = tun_inner_hash;
+ fl4->flowi4_flags = flow_flags;
}
int ip_tunnel_init(struct net_device *dev);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index a32be8aa7ed2..6a2019aaa464 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -53,8 +53,6 @@ struct nf_conntrack_net {
/* only used when new connection is allocated: */
atomic_t count;
unsigned int expect_count;
- u8 sysctl_auto_assign_helper;
- bool auto_assign_helper_warned;
/* only used from work queues, configuration plane, and so on: */
unsigned int users4;
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index c396a3862e80..e1290c159184 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -101,7 +101,6 @@ struct netns_ct {
u8 sysctl_log_invalid; /* Log invalid packets */
u8 sysctl_events;
u8 sysctl_acct;
- u8 sysctl_auto_assign_helper;
u8 sysctl_tstamp;
u8 sysctl_checksum;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index c7320ef356d9..6320a76cefdc 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -179,6 +179,8 @@ struct netns_ipv4 {
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
atomic_t tfo_active_disable_times;
unsigned long tfo_active_disable_stamp;
+ u32 tcp_challenge_timestamp;
+ u32 tcp_challenge_count;
int sysctl_udp_wmem_min;
int sysctl_udp_rmem_min;
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index afc7ce713657..72394f441dad 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -67,6 +67,9 @@ static inline int udp_sock_create(struct net *net,
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
struct sk_buff *skb);
+typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
+ struct sk_buff *skb,
+ unsigned int udp_offset);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
struct list_head *head,
@@ -80,6 +83,7 @@ struct udp_tunnel_sock_cfg {
__u8 encap_type;
udp_tunnel_encap_rcv_t encap_rcv;
udp_tunnel_encap_err_lookup_t encap_err_lookup;
+ udp_tunnel_encap_err_rcv_t encap_err_rcv;
udp_tunnel_encap_destroy_t encap_destroy;
udp_tunnel_gro_receive_t gro_receive;
udp_tunnel_gro_complete_t gro_complete;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 3113471ca375..2493bd65351a 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -309,8 +309,6 @@ struct scsi_target {
struct list_head devices;
struct device dev;
struct kref reap_ref; /* last put renders target invisible */
- atomic_t sdev_count;
- wait_queue_head_t sdev_wq;
unsigned int channel;
unsigned int id; /* target id ... replace
* scsi_device.id eventually */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index aa7b7496c93a..9b0a028bf053 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -557,6 +557,8 @@ struct Scsi_Host {
struct scsi_host_template *hostt;
struct scsi_transport_template *transportt;
+ struct kref tagset_refcnt;
+ struct completion tagset_freed;
/* Area to keep a shared tag map */
struct blk_mq_tag_set tag_set;
@@ -690,9 +692,6 @@ struct Scsi_Host {
/* ldm bits */
struct device shost_gendev, shost_dev;
- atomic_t target_count;
- wait_queue_head_t targets_wq;
-
/*
* Points to the transport data (if any) which is allocated
* separately
diff --git a/include/soc/at91/sama7-ddr.h b/include/soc/at91/sama7-ddr.h
index 9e17247474fa..6ce3bd22f6c6 100644
--- a/include/soc/at91/sama7-ddr.h
+++ b/include/soc/at91/sama7-ddr.h
@@ -38,6 +38,14 @@
#define DDR3PHY_DSGCR_ODTPDD_ODT0 (1 << 20) /* ODT[0] Power Down Driver */
#define DDR3PHY_ZQ0SR0 (0x188) /* ZQ status register 0 */
+#define DDR3PHY_ZQ0SR0_PDO_OFF (0) /* Pull-down output impedance select offset */
+#define DDR3PHY_ZQ0SR0_PUO_OFF (5) /* Pull-up output impedance select offset */
+#define DDR3PHY_ZQ0SR0_PDODT_OFF (10) /* Pull-down on-die termination impedance select offset */
+#define DDR3PHY_ZQ0SRO_PUODT_OFF (15) /* Pull-up on-die termination impedance select offset */
+
+#define DDR3PHY_DX0DLLCR (0x1CC) /* DDR3PHY DATX8 DLL Control Register */
+#define DDR3PHY_DX1DLLCR (0x20C) /* DDR3PHY DATX8 DLL Control Register */
+#define DDR3PHY_DXDLLCR_DLLDIS (1 << 31) /* DLL Disable */
/* UDDRC */
#define UDDRC_STAT (0x04) /* UDDRC Operating Mode Status Register */
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 45264e4bb254..50a974f7dfb4 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -9,6 +9,15 @@
#include <linux/netdevice.h>
#include <linux/tracepoint.h>
+#undef FN
+#define FN(reason) TRACE_DEFINE_ENUM(SKB_DROP_REASON_##reason);
+DEFINE_DROP_REASON(FN, FN)
+
+#undef FN
+#undef FNe
+#define FN(reason) { SKB_DROP_REASON_##reason, #reason },
+#define FNe(reason) { SKB_DROP_REASON_##reason, #reason }
+
/*
* Tracepoint for free an sk_buff:
*/
@@ -35,9 +44,13 @@ TRACE_EVENT(kfree_skb,
TP_printk("skbaddr=%p protocol=%u location=%p reason: %s",
__entry->skbaddr, __entry->protocol, __entry->location,
- drop_reasons[__entry->reason])
+ __print_symbolic(__entry->reason,
+ DEFINE_DROP_REASON(FN, FNe)))
);
+#undef FN
+#undef FNe
+
TRACE_EVENT(consume_skb,
TP_PROTO(struct sk_buff *skb),
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 7bf9ba1329be..59a217ca2dfd 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -79,7 +79,7 @@ struct bpf_insn {
/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
struct bpf_lpm_trie_key {
__u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
- __u8 data[]; /* Arbitrary size */
+ __u8 data[0]; /* Arbitrary size */
};
struct bpf_cgroup_storage_key {
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 9e0b5c8d92ce..6b83177fd41d 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -71,8 +71,8 @@ struct io_uring_sqe {
__s32 splice_fd_in;
__u32 file_index;
struct {
- __u16 notification_idx;
__u16 addr_len;
+ __u16 __pad3[1];
};
};
union {
@@ -178,8 +178,7 @@ enum io_uring_op {
IORING_OP_FALLOCATE,
IORING_OP_OPENAT,
IORING_OP_CLOSE,
- IORING_OP_RSRC_UPDATE,
- IORING_OP_FILES_UPDATE = IORING_OP_RSRC_UPDATE,
+ IORING_OP_FILES_UPDATE,
IORING_OP_STATX,
IORING_OP_READ,
IORING_OP_WRITE,
@@ -206,7 +205,7 @@ enum io_uring_op {
IORING_OP_GETXATTR,
IORING_OP_SOCKET,
IORING_OP_URING_CMD,
- IORING_OP_SENDZC_NOTIF,
+ IORING_OP_SEND_ZC,
/* this goes last, obviously */
IORING_OP_LAST,
@@ -228,7 +227,6 @@ enum io_uring_op {
#define IORING_TIMEOUT_ETIME_SUCCESS (1U << 5)
#define IORING_TIMEOUT_CLOCK_MASK (IORING_TIMEOUT_BOOTTIME | IORING_TIMEOUT_REALTIME)
#define IORING_TIMEOUT_UPDATE_MASK (IORING_TIMEOUT_UPDATE | IORING_LINK_TIMEOUT_UPDATE)
-
/*
* sqe->splice_flags
* extends splice(2) flags
@@ -281,29 +279,16 @@ enum io_uring_op {
*
* IORING_RECVSEND_FIXED_BUF Use registered buffers, the index is stored in
* the buf_index field.
- *
- * IORING_RECVSEND_NOTIF_FLUSH Flush a notification after a successful
- * successful. Only for zerocopy sends.
*/
#define IORING_RECVSEND_POLL_FIRST (1U << 0)
#define IORING_RECV_MULTISHOT (1U << 1)
#define IORING_RECVSEND_FIXED_BUF (1U << 2)
-#define IORING_RECVSEND_NOTIF_FLUSH (1U << 3)
/*
* accept flags stored in sqe->ioprio
*/
#define IORING_ACCEPT_MULTISHOT (1U << 0)
-
-/*
- * IORING_OP_RSRC_UPDATE flags
- */
-enum {
- IORING_RSRC_UPDATE_FILES,
- IORING_RSRC_UPDATE_NOTIF,
-};
-
/*
* IORING_OP_MSG_RING command types, stored in sqe->addr
*/
@@ -341,10 +326,13 @@ struct io_uring_cqe {
* IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
* IORING_CQE_F_MORE If set, parent SQE will generate more CQE entries
* IORING_CQE_F_SOCK_NONEMPTY If set, more data to read after socket recv
+ * IORING_CQE_F_NOTIF Set for notification CQEs. Can be used to distinct
+ * them from sends.
*/
#define IORING_CQE_F_BUFFER (1U << 0)
#define IORING_CQE_F_MORE (1U << 1)
#define IORING_CQE_F_SOCK_NONEMPTY (1U << 2)
+#define IORING_CQE_F_NOTIF (1U << 3)
enum {
IORING_CQE_BUFFER_SHIFT = 16,
@@ -485,10 +473,6 @@ enum {
/* register a range of fixed file slots for automatic slot allocation */
IORING_REGISTER_FILE_ALLOC_RANGE = 25,
- /* zerocopy notification API */
- IORING_REGISTER_NOTIFIERS = 26,
- IORING_UNREGISTER_NOTIFIERS = 27,
-
/* this goes last */
IORING_REGISTER_LAST
};
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 29ced55514d4..6cb842ea8979 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -56,7 +56,7 @@
#define VIRTIO_NET_F_MQ 22 /* Device supports Receive Flow
* Steering */
#define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */
-#define VIRTIO_NET_F_NOTF_COAL 53 /* Guest can handle notifications coalescing */
+#define VIRTIO_NET_F_NOTF_COAL 53 /* Device supports notifications coalescing */
#define VIRTIO_NET_F_HASH_REPORT 57 /* Supports hash report */
#define VIRTIO_NET_F_RSS 60 /* Supports RSS RX steering */
#define VIRTIO_NET_F_RSC_EXT 61 /* extended coalescing info */
@@ -364,24 +364,24 @@ struct virtio_net_hash_config {
*/
#define VIRTIO_NET_CTRL_NOTF_COAL 6
/*
- * Set the tx-usecs/tx-max-packets patameters.
- * tx-usecs - Maximum number of usecs to delay a TX notification.
- * tx-max-packets - Maximum number of packets to send before a TX notification.
+ * Set the tx-usecs/tx-max-packets parameters.
*/
struct virtio_net_ctrl_coal_tx {
+ /* Maximum number of packets to send before a TX notification */
__le32 tx_max_packets;
+ /* Maximum number of usecs to delay a TX notification */
__le32 tx_usecs;
};
#define VIRTIO_NET_CTRL_NOTF_COAL_TX_SET 0
/*
- * Set the rx-usecs/rx-max-packets patameters.
- * rx-usecs - Maximum number of usecs to delay a RX notification.
- * rx-max-frames - Maximum number of packets to receive before a RX notification.
+ * Set the rx-usecs/rx-max-packets parameters.
*/
struct virtio_net_ctrl_coal_rx {
+ /* Maximum number of packets to receive before a RX notification */
__le32 rx_max_packets;
+ /* Maximum number of usecs to delay a RX notification */
__le32 rx_usecs;
};
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 77616279000b..b9640ad5069f 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1728,6 +1728,7 @@ static void io_queue_async(struct io_kiocb *req, int ret)
switch (io_arm_poll_handler(req, 0)) {
case IO_APOLL_READY:
+ io_kbuf_recycle(req, 0);
io_req_task_queue(req);
break;
case IO_APOLL_ABORTED:
@@ -2640,7 +2641,6 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_unregister_personality(ctx, index);
if (ctx->rings)
io_poll_remove_all(ctx, NULL, true);
- io_notif_unregister(ctx);
mutex_unlock(&ctx->uring_lock);
/* failed during ring init, it couldn't have issued any requests */
@@ -3839,15 +3839,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_register_file_alloc_range(ctx, arg);
break;
- case IORING_REGISTER_NOTIFIERS:
- ret = io_notif_register(ctx, arg, nr_args);
- break;
- case IORING_UNREGISTER_NOTIFIERS:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_notif_unregister(ctx);
- break;
default:
ret = -EINVAL;
break;
@@ -3933,8 +3924,8 @@ static int __init io_uring_init(void)
BUILD_BUG_SQE_ELEM(42, __u16, personality);
BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
BUILD_BUG_SQE_ELEM(44, __u32, file_index);
- BUILD_BUG_SQE_ELEM(44, __u16, notification_idx);
- BUILD_BUG_SQE_ELEM(46, __u16, addr_len);
+ BUILD_BUG_SQE_ELEM(44, __u16, addr_len);
+ BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]);
BUILD_BUG_SQE_ELEM(48, __u64, addr3);
BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
BUILD_BUG_SQE_ELEM(56, __u64, __pad2);
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index d6af208d109f..746fbf31a703 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -91,9 +91,13 @@ static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
* buffer data. However if that buffer is recycled the original request
* data stored in addr is lost. Therefore forbid recycling for now.
*/
- if (req->opcode == IORING_OP_READV)
+ if (req->opcode == IORING_OP_READV) {
+ if ((req->flags & REQ_F_BUFFER_RING) && req->buf_list) {
+ req->buf_list->head++;
+ req->buf_list = NULL;
+ }
return;
-
+ }
if (req->flags & REQ_F_BUFFER_SELECTED)
io_kbuf_recycle_legacy(req, issue_flags);
if (req->flags & REQ_F_BUFFER_RING)
diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c
index 976c4ba68ee7..4a7e5d030c78 100644
--- a/io_uring/msg_ring.c
+++ b/io_uring/msg_ring.c
@@ -165,7 +165,8 @@ done:
req_set_fail(req);
io_req_set_res(req, ret, 0);
/* put file to avoid an attempt to IOPOLL the req */
- io_put_file(req->file);
+ if (!(req->flags & REQ_F_FIXED_FILE))
+ io_put_file(req->file);
req->file = NULL;
return IOU_OK;
}
diff --git a/io_uring/net.c b/io_uring/net.c
index 0af8a02df580..60e392f7f2dc 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -65,12 +65,12 @@ struct io_sendzc {
struct file *file;
void __user *buf;
size_t len;
- u16 slot_idx;
unsigned msg_flags;
unsigned flags;
unsigned addr_len;
void __user *addr;
size_t done_io;
+ struct io_kiocb *notif;
};
#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
@@ -879,18 +879,39 @@ out_free:
return ret;
}
+void io_sendzc_cleanup(struct io_kiocb *req)
+{
+ struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
+
+ zc->notif->flags |= REQ_F_CQE_SKIP;
+ io_notif_flush(zc->notif);
+ zc->notif = NULL;
+}
+
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *notif;
- if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))
+ if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3) ||
+ READ_ONCE(sqe->__pad3[0]))
+ return -EINVAL;
+ /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
+ if (req->flags & REQ_F_CQE_SKIP)
return -EINVAL;
zc->flags = READ_ONCE(sqe->ioprio);
if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
- IORING_RECVSEND_FIXED_BUF | IORING_RECVSEND_NOTIF_FLUSH))
+ IORING_RECVSEND_FIXED_BUF))
return -EINVAL;
+ notif = zc->notif = io_alloc_notif(ctx);
+ if (!notif)
+ return -ENOMEM;
+ notif->cqe.user_data = req->cqe.user_data;
+ notif->cqe.res = 0;
+ notif->cqe.flags = IORING_CQE_F_NOTIF;
+ req->flags |= REQ_F_NEED_CLEANUP;
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
unsigned idx = READ_ONCE(sqe->buf_index);
@@ -898,13 +919,12 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EFAULT;
idx = array_index_nospec(idx, ctx->nr_user_bufs);
req->imu = READ_ONCE(ctx->user_bufs[idx]);
- io_req_set_rsrc_node(req, ctx, 0);
+ io_req_set_rsrc_node(notif, ctx, 0);
}
zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
zc->len = READ_ONCE(sqe->len);
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
- zc->slot_idx = READ_ONCE(sqe->notification_idx);
if (zc->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
@@ -956,7 +976,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
shinfo->nr_frags = frag;
from->bvec += bi.bi_idx;
from->nr_segs -= bi.bi_idx;
- from->count = bi.bi_size;
+ from->count -= copied;
from->iov_offset = bi.bi_bvec_done;
skb->data_len += copied;
@@ -976,33 +996,17 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
{
struct sockaddr_storage __address, *addr = NULL;
- struct io_ring_ctx *ctx = req->ctx;
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
- struct io_notif_slot *notif_slot;
- struct io_kiocb *notif;
struct msghdr msg;
struct iovec iov;
struct socket *sock;
- unsigned msg_flags;
+ unsigned msg_flags, cflags;
int ret, min_ret = 0;
- if (!(req->flags & REQ_F_POLLED) &&
- (zc->flags & IORING_RECVSEND_POLL_FIRST))
- return -EAGAIN;
-
- if (issue_flags & IO_URING_F_UNLOCKED)
- return -EAGAIN;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
- notif_slot = io_get_notif_slot(ctx, zc->slot_idx);
- if (!notif_slot)
- return -EINVAL;
- notif = io_get_notif(ctx, notif_slot);
- if (!notif)
- return -ENOMEM;
-
msg.msg_name = NULL;
msg.msg_control = NULL;
msg.msg_controllen = 0;
@@ -1023,6 +1027,10 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
msg.msg_namelen = zc->addr_len;
}
+ if (!(req->flags & REQ_F_POLLED) &&
+ (zc->flags & IORING_RECVSEND_POLL_FIRST))
+ return io_setup_async_addr(req, addr, issue_flags);
+
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
ret = io_import_fixed(WRITE, &msg.msg_iter, req->imu,
(u64)(uintptr_t)zc->buf, zc->len);
@@ -1033,7 +1041,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
&msg.msg_iter);
if (unlikely(ret))
return ret;
- ret = io_notif_account_mem(notif, zc->len);
+ ret = io_notif_account_mem(zc->notif, zc->len);
if (unlikely(ret))
return ret;
}
@@ -1045,7 +1053,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
min_ret = iov_iter_count(&msg.msg_iter);
msg.msg_flags = msg_flags;
- msg.msg_ubuf = &io_notif_to_data(notif)->uarg;
+ msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
msg.sg_from_iter = io_sg_from_iter;
ret = sock_sendmsg(sock, &msg);
@@ -1060,18 +1068,22 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_addr(req, addr, issue_flags);
}
+ if (ret < 0 && !zc->done_io)
+ zc->notif->flags |= REQ_F_CQE_SKIP;
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
- } else if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH) {
- io_notif_slot_flush_submit(notif_slot, 0);
}
if (ret >= 0)
ret += zc->done_io;
else if (zc->done_io)
ret = zc->done_io;
- io_req_set_res(req, ret, 0);
+
+ io_notif_flush(zc->notif);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
+ io_req_set_res(req, ret, cflags);
return IOU_OK;
}
diff --git a/io_uring/net.h b/io_uring/net.h
index f91f56c6eeac..d744a0a874e7 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -55,6 +55,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags);
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags);
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+void io_sendzc_cleanup(struct io_kiocb *req);
void io_netmsg_cache_free(struct io_cache_entry *entry);
#else
diff --git a/io_uring/notif.c b/io_uring/notif.c
index 96f076b175e0..e37c6569d82e 100644
--- a/io_uring/notif.c
+++ b/io_uring/notif.c
@@ -21,14 +21,6 @@ static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
io_req_task_complete(notif, locked);
}
-static inline void io_notif_complete(struct io_kiocb *notif)
- __must_hold(&notif->ctx->uring_lock)
-{
- bool locked = true;
-
- __io_notif_complete_tw(notif, &locked);
-}
-
static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
struct ubuf_info *uarg,
bool success)
@@ -42,8 +34,7 @@ static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
}
}
-struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
- struct io_notif_slot *slot)
+struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
struct io_kiocb *notif;
@@ -59,101 +50,23 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
io_get_task_refs(1);
notif->rsrc_node = NULL;
io_req_set_rsrc_node(notif, ctx, 0);
- notif->cqe.user_data = slot->tag;
- notif->cqe.flags = slot->seq++;
- notif->cqe.res = 0;
nd = io_notif_to_data(notif);
nd->account_pages = 0;
nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
nd->uarg.callback = io_uring_tx_zerocopy_callback;
- /* master ref owned by io_notif_slot, will be dropped on flush */
refcount_set(&nd->uarg.refcnt, 1);
return notif;
}
-void io_notif_slot_flush(struct io_notif_slot *slot)
+void io_notif_flush(struct io_kiocb *notif)
__must_hold(&slot->notif->ctx->uring_lock)
{
- struct io_kiocb *notif = slot->notif;
struct io_notif_data *nd = io_notif_to_data(notif);
- slot->notif = NULL;
-
/* drop slot's master ref */
if (refcount_dec_and_test(&nd->uarg.refcnt)) {
notif->io_task_work.func = __io_notif_complete_tw;
io_req_task_work_add(notif);
}
}
-
-__cold int io_notif_unregister(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- int i;
-
- if (!ctx->notif_slots)
- return -ENXIO;
-
- for (i = 0; i < ctx->nr_notif_slots; i++) {
- struct io_notif_slot *slot = &ctx->notif_slots[i];
- struct io_kiocb *notif = slot->notif;
- struct io_notif_data *nd;
-
- if (!notif)
- continue;
- nd = io_notif_to_data(notif);
- slot->notif = NULL;
- if (!refcount_dec_and_test(&nd->uarg.refcnt))
- continue;
- notif->io_task_work.func = __io_notif_complete_tw;
- io_req_task_work_add(notif);
- }
-
- kvfree(ctx->notif_slots);
- ctx->notif_slots = NULL;
- ctx->nr_notif_slots = 0;
- return 0;
-}
-
-__cold int io_notif_register(struct io_ring_ctx *ctx,
- void __user *arg, unsigned int size)
- __must_hold(&ctx->uring_lock)
-{
- struct io_uring_notification_slot __user *slots;
- struct io_uring_notification_slot slot;
- struct io_uring_notification_register reg;
- unsigned i;
-
- if (ctx->nr_notif_slots)
- return -EBUSY;
- if (size != sizeof(reg))
- return -EINVAL;
- if (copy_from_user(&reg, arg, sizeof(reg)))
- return -EFAULT;
- if (!reg.nr_slots || reg.nr_slots > IORING_MAX_NOTIF_SLOTS)
- return -EINVAL;
- if (reg.resv || reg.resv2 || reg.resv3)
- return -EINVAL;
-
- slots = u64_to_user_ptr(reg.data);
- ctx->notif_slots = kvcalloc(reg.nr_slots, sizeof(ctx->notif_slots[0]),
- GFP_KERNEL_ACCOUNT);
- if (!ctx->notif_slots)
- return -ENOMEM;
-
- for (i = 0; i < reg.nr_slots; i++, ctx->nr_notif_slots++) {
- struct io_notif_slot *notif_slot = &ctx->notif_slots[i];
-
- if (copy_from_user(&slot, &slots[i], sizeof(slot))) {
- io_notif_unregister(ctx);
- return -EFAULT;
- }
- if (slot.resv[0] | slot.resv[1] | slot.resv[2]) {
- io_notif_unregister(ctx);
- return -EINVAL;
- }
- notif_slot->tag = slot.tag;
- }
- return 0;
-}
diff --git a/io_uring/notif.h b/io_uring/notif.h
index 80f6445e0c2b..5b4d710c8ca5 100644
--- a/io_uring/notif.h
+++ b/io_uring/notif.h
@@ -8,7 +8,6 @@
#include "rsrc.h"
#define IO_NOTIF_SPLICE_BATCH 32
-#define IORING_MAX_NOTIF_SLOTS (1U << 15)
struct io_notif_data {
struct file *file;
@@ -16,63 +15,14 @@ struct io_notif_data {
unsigned long account_pages;
};
-struct io_notif_slot {
- /*
- * Current/active notifier. A slot holds only one active notifier at a
- * time and keeps one reference to it. Flush releases the reference and
- * lazily replaces it with a new notifier.
- */
- struct io_kiocb *notif;
-
- /*
- * Default ->user_data for this slot notifiers CQEs
- */
- u64 tag;
- /*
- * Notifiers of a slot live in generations, we create a new notifier
- * only after flushing the previous one. Track the sequential number
- * for all notifiers and copy it into notifiers's cqe->cflags
- */
- u32 seq;
-};
-
-int io_notif_register(struct io_ring_ctx *ctx,
- void __user *arg, unsigned int size);
-int io_notif_unregister(struct io_ring_ctx *ctx);
-
-void io_notif_slot_flush(struct io_notif_slot *slot);
-struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
- struct io_notif_slot *slot);
+void io_notif_flush(struct io_kiocb *notif);
+struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx);
static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif)
{
return io_kiocb_to_cmd(notif, struct io_notif_data);
}
-static inline struct io_kiocb *io_get_notif(struct io_ring_ctx *ctx,
- struct io_notif_slot *slot)
-{
- if (!slot->notif)
- slot->notif = io_alloc_notif(ctx, slot);
- return slot->notif;
-}
-
-static inline struct io_notif_slot *io_get_notif_slot(struct io_ring_ctx *ctx,
- unsigned idx)
- __must_hold(&ctx->uring_lock)
-{
- if (idx >= ctx->nr_notif_slots)
- return NULL;
- idx = array_index_nospec(idx, ctx->nr_notif_slots);
- return &ctx->notif_slots[idx];
-}
-
-static inline void io_notif_slot_flush_submit(struct io_notif_slot *slot,
- unsigned int issue_flags)
-{
- io_notif_slot_flush(slot);
-}
-
static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len)
{
struct io_ring_ctx *ctx = notif->ctx;
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 41410126c1c6..c4dddd0fd709 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -246,13 +246,12 @@ const struct io_op_def io_op_defs[] = {
.prep = io_close_prep,
.issue = io_close,
},
- [IORING_OP_RSRC_UPDATE] = {
+ [IORING_OP_FILES_UPDATE] = {
.audit_skip = 1,
.iopoll = 1,
- .name = "RSRC_UPDATE",
- .prep = io_rsrc_update_prep,
- .issue = io_rsrc_update,
- .ioprio = 1,
+ .name = "FILES_UPDATE",
+ .prep = io_files_update_prep,
+ .issue = io_files_update,
},
[IORING_OP_STATX] = {
.audit_skip = 1,
@@ -471,8 +470,8 @@ const struct io_op_def io_op_defs[] = {
.issue = io_uring_cmd,
.prep_async = io_uring_cmd_prep_async,
},
- [IORING_OP_SENDZC_NOTIF] = {
- .name = "SENDZC_NOTIF",
+ [IORING_OP_SEND_ZC] = {
+ .name = "SEND_ZC",
.needs_file = 1,
.unbound_nonreg_file = 1,
.pollout = 1,
@@ -484,6 +483,7 @@ const struct io_op_def io_op_defs[] = {
.prep = io_sendzc_prep,
.issue = io_sendzc,
.prep_async = io_sendzc_prep_async,
+ .cleanup = io_sendzc_cleanup,
#else
.prep = io_eopnotsupp_prep,
#endif
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 71359a4d0bd4..cf3272113214 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -15,14 +15,12 @@
#include "io_uring.h"
#include "openclose.h"
#include "rsrc.h"
-#include "notif.h"
struct io_rsrc_update {
struct file *file;
u64 arg;
u32 nr_args;
u32 offset;
- int type;
};
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
@@ -655,7 +653,7 @@ __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
return -EINVAL;
}
-int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
@@ -669,7 +667,6 @@ int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!up->nr_args)
return -EINVAL;
up->arg = READ_ONCE(sqe->addr);
- up->type = READ_ONCE(sqe->ioprio);
return 0;
}
@@ -712,7 +709,7 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
return ret;
}
-static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
+int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
struct io_ring_ctx *ctx = req->ctx;
@@ -741,54 +738,6 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}
-static int io_notif_update(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
- struct io_ring_ctx *ctx = req->ctx;
- unsigned len = up->nr_args;
- unsigned idx_end, idx = up->offset;
- int ret = 0;
-
- io_ring_submit_lock(ctx, issue_flags);
- if (unlikely(check_add_overflow(idx, len, &idx_end))) {
- ret = -EOVERFLOW;
- goto out;
- }
- if (unlikely(idx_end > ctx->nr_notif_slots)) {
- ret = -EINVAL;
- goto out;
- }
-
- for (; idx < idx_end; idx++) {
- struct io_notif_slot *slot = &ctx->notif_slots[idx];
-
- if (!slot->notif)
- continue;
- if (up->arg)
- slot->tag = up->arg;
- io_notif_slot_flush_submit(slot, issue_flags);
- }
-out:
- io_ring_submit_unlock(ctx, issue_flags);
- if (ret < 0)
- req_set_fail(req);
- io_req_set_res(req, ret, 0);
- return IOU_OK;
-}
-
-int io_rsrc_update(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
-
- switch (up->type) {
- case IORING_RSRC_UPDATE_FILES:
- return io_files_update(req, issue_flags);
- case IORING_RSRC_UPDATE_NOTIF:
- return io_notif_update(req, issue_flags);
- }
- return -EINVAL;
-}
-
int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
struct io_rsrc_node *node, void *rsrc)
{
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index f3a9a177941f..9bce15665444 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -167,8 +167,8 @@ static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
return &data->tags[table_idx][off];
}
-int io_rsrc_update(struct io_kiocb *req, unsigned int issue_flags);
-int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
+int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int __io_account_mem(struct user_struct *user, unsigned long nr_pages);
diff --git a/io_uring/rw.c b/io_uring/rw.c
index 1babd77da79c..76ebcfebc9a6 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -206,6 +206,20 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
return false;
}
+static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
+{
+ struct io_async_rw *io = req->async_data;
+
+ /* add previously done IO, if any */
+ if (req_has_async_data(req) && io->bytes_done > 0) {
+ if (res < 0)
+ res = io->bytes_done;
+ else
+ res += io->bytes_done;
+ }
+ return res;
+}
+
static void io_complete_rw(struct kiocb *kiocb, long res)
{
struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
@@ -213,7 +227,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
if (__io_complete_rw_common(req, res))
return;
- io_req_set_res(req, res, 0);
+ io_req_set_res(req, io_fixup_rw_res(req, res), 0);
req->io_task_work.func = io_req_task_complete;
io_req_task_work_add(req);
}
@@ -240,22 +254,14 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
static int kiocb_done(struct io_kiocb *req, ssize_t ret,
unsigned int issue_flags)
{
- struct io_async_rw *io = req->async_data;
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
-
- /* add previously done IO, if any */
- if (req_has_async_data(req) && io->bytes_done > 0) {
- if (ret < 0)
- ret = io->bytes_done;
- else
- ret += io->bytes_done;
- }
+ unsigned final_ret = io_fixup_rw_res(req, ret);
if (req->flags & REQ_F_CUR_POS)
req->file->f_pos = rw->kiocb.ki_pos;
if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
if (!__io_complete_rw_common(req, ret)) {
- io_req_set_res(req, req->cqe.res,
+ io_req_set_res(req, final_ret,
io_put_kbuf(req, issue_flags));
return IOU_OK;
}
@@ -268,7 +274,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
if (io_resubmit_prep(req))
io_req_task_queue_reissue(req);
else
- io_req_task_queue_fail(req, ret);
+ io_req_task_queue_fail(req, final_ret);
}
return IOU_ISSUE_SKIP_COMPLETE;
}
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index b9989ae7b957..e78b6f980d77 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -3,6 +3,7 @@
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/io_uring.h>
+#include <linux/security.h>
#include <uapi/linux/io_uring.h>
@@ -88,6 +89,10 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
if (!req->file->f_op->uring_cmd)
return -EOPNOTSUPP;
+ ret = security_uring_cmd(ioucmd);
+ if (ret)
+ return ret;
+
if (ctx->flags & IORING_SETUP_SQE128)
issue_flags |= IO_URING_F_SQE128;
if (ctx->flags & IORING_SETUP_CQE32)
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 59b7eb60d5b4..4a400cd63731 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -921,8 +921,10 @@ static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog,
pos++;
}
}
+
+ /* no link or prog match, skip the cgroup of this layer */
+ continue;
found:
- BUG_ON(!cg);
progs = rcu_dereference_protected(
desc->bpf.effective[atype],
lockdep_is_held(&cgroup_mutex));
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index c1e10d088dbb..3d9eb3ae334c 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -971,7 +971,7 @@ pure_initcall(bpf_jit_charge_init);
int bpf_jit_charge_modmem(u32 size)
{
- if (atomic_long_add_return(size, &bpf_jit_current) > bpf_jit_limit) {
+ if (atomic_long_add_return(size, &bpf_jit_current) > READ_ONCE(bpf_jit_limit)) {
if (!bpf_capable()) {
atomic_long_sub(size, &bpf_jit_current);
return -EPERM;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index a4d40d98428a..27760627370d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -5197,7 +5197,7 @@ syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_sys_bpf:
- return &bpf_sys_bpf_proto;
+ return !perfmon_capable() ? NULL : &bpf_sys_bpf_proto;
case BPF_FUNC_btf_find_by_name_kind:
return &bpf_btf_find_by_name_kind_proto;
case BPF_FUNC_sys_close:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 096fdac70165..3eadb14e090b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6066,6 +6066,9 @@ skip_type_check:
return -EACCES;
}
meta->mem_size = reg->var_off.value;
+ err = mark_chain_precision(env, regno);
+ if (err)
+ return err;
break;
case ARG_PTR_TO_INT:
case ARG_PTR_TO_LONG:
@@ -7030,8 +7033,7 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx];
struct bpf_reg_state *regs = cur_regs(env), *reg;
struct bpf_map *map = meta->map_ptr;
- struct tnum range;
- u64 val;
+ u64 val, max;
int err;
if (func_id != BPF_FUNC_tail_call)
@@ -7041,10 +7043,11 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
return -EINVAL;
}
- range = tnum_range(0, map->max_entries - 1);
reg = &regs[BPF_REG_3];
+ val = reg->var_off.value;
+ max = map->max_entries;
- if (!register_is_const(reg) || !tnum_in(range, reg->var_off)) {
+ if (!(register_is_const(reg) && val < max)) {
bpf_map_key_store(aux, BPF_MAP_KEY_POISON);
return 0;
}
@@ -7052,8 +7055,6 @@ record_func_key(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
err = mark_chain_precision(env, BPF_REG_3);
if (err)
return err;
-
- val = reg->var_off.value;
if (bpf_map_key_unseen(aux))
bpf_map_key_store(aux, val);
else if (!bpf_map_key_poisoned(aux) &&
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 2caafd13f8aa..18c93c2276ca 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -350,11 +350,10 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
unsigned long *flags)
{
- unsigned int max_range = dma_get_max_seg_size(ref->dev);
struct dma_debug_entry *entry, index = *ref;
- unsigned int range = 0;
+ int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
- while (range <= max_range) {
+ for (int i = 0; i < limit; i++) {
entry = __hash_bucket_find(*bucket, ref, containing_match);
if (entry)
@@ -364,7 +363,6 @@ static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
* Nothing found, go back a hash bucket
*/
put_hash_bucket(*bucket, *flags);
- range += (1 << HASH_FN_SHIFT);
index.dev_addr -= (1 << HASH_FN_SHIFT);
*bucket = get_hash_bucket(&index, flags);
}
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 49cbf3e33de7..27f272381cf2 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -707,7 +707,7 @@ int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma,
}
EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous);
-int dma_supported(struct device *dev, u64 mask)
+static int dma_supported(struct device *dev, u64 mask)
{
const struct dma_map_ops *ops = get_dma_ops(dev);
@@ -721,7 +721,6 @@ int dma_supported(struct device *dev, u64 mask)
return 1;
return ops->dma_supported(dev, mask);
}
-EXPORT_SYMBOL(dma_supported);
bool dma_pci_p2pdma_supported(struct device *dev)
{
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index c5a9190b218f..0ef6b12f961d 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -326,9 +326,6 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
swiotlb_adjust_nareas(num_possible_cpus());
nslabs = default_nslabs;
- if (nslabs < IO_TLB_MIN_SLABS)
- panic("%s: nslabs = %lu too small\n", __func__, nslabs);
-
/*
* By default allocate the bounce buffer memory from low memory, but
* allow to pick a location everywhere for hypervisors with guest
@@ -341,8 +338,7 @@ retry:
else
tlb = memblock_alloc_low(bytes, PAGE_SIZE);
if (!tlb) {
- pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
- __func__, bytes);
+ pr_warn("%s: failed to allocate tlb structure\n", __func__);
return;
}
@@ -579,7 +575,10 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
}
}
-#define slot_addr(start, idx) ((start) + ((idx) << IO_TLB_SHIFT))
+static inline phys_addr_t slot_addr(phys_addr_t start, phys_addr_t idx)
+{
+ return start + (idx << IO_TLB_SHIFT);
+}
/*
* Carefully handle integer overflow which can occur when boundary_mask == ~0UL.
@@ -765,7 +764,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
/*
* When dir == DMA_FROM_DEVICE we could omit the copy from the orig
* to the tlb buffer, if we knew for sure the device will
- * overwirte the entire current content. But we don't. Thus
+ * overwrite the entire current content. But we don't. Thus
* unconditional bounce may prevent leaking swiotlb content (i.e.
* kernel memory) to user-space.
*/
diff --git a/kernel/fork.c b/kernel/fork.c
index 90c85b17bf69..8a9e92068b15 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1225,6 +1225,7 @@ void mmput_async(struct mm_struct *mm)
schedule_work(&mm->async_put_work);
}
}
+EXPORT_SYMBOL_GPL(mmput_async);
#endif
/**
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 08350e35aba2..ca9d834d0b84 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1562,6 +1562,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
/* Ensure it is not in reserved area nor out of text */
if (!(core_kernel_text((unsigned long) p->addr) ||
is_module_text_address((unsigned long) p->addr)) ||
+ in_gate_area_no_mm((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
static_call_text_reserved(p->addr, p->addr) ||
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index bb3d63bdf4ae..667876da8382 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -416,7 +416,7 @@ void update_sched_domain_debugfs(void)
char buf[32];
snprintf(buf, sizeof(buf), "cpu%d", cpu);
- debugfs_remove(debugfs_lookup(buf, sd_dentry));
+ debugfs_lookup_and_remove(buf, sd_dentry);
d_cpu = debugfs_create_dir(buf, sd_dentry);
i = 0;
diff --git a/kernel/trace/rv/monitors/wip/wip.h b/kernel/trace/rv/monitors/wip/wip.h
index c1c47e2305ef..dacc37b62a2c 100644
--- a/kernel/trace/rv/monitors/wip/wip.h
+++ b/kernel/trace/rv/monitors/wip/wip.h
@@ -27,7 +27,7 @@ struct automaton_wip {
bool final_states[state_max_wip];
};
-struct automaton_wip automaton_wip = {
+static struct automaton_wip automaton_wip = {
.state_names = {
"preemptive",
"non_preemptive"
diff --git a/kernel/trace/rv/monitors/wwnr/wwnr.h b/kernel/trace/rv/monitors/wwnr/wwnr.h
index d1afe55cdd4c..118e576b91b4 100644
--- a/kernel/trace/rv/monitors/wwnr/wwnr.h
+++ b/kernel/trace/rv/monitors/wwnr/wwnr.h
@@ -27,7 +27,7 @@ struct automaton_wwnr {
bool final_states[state_max_wwnr];
};
-struct automaton_wwnr automaton_wwnr = {
+static struct automaton_wwnr automaton_wwnr = {
.state_names = {
"not_running",
"running"
diff --git a/kernel/trace/rv/reactor_panic.c b/kernel/trace/rv/reactor_panic.c
index b698d05dd069..d65f6c25a87c 100644
--- a/kernel/trace/rv/reactor_panic.c
+++ b/kernel/trace/rv/reactor_panic.c
@@ -24,13 +24,13 @@ static struct rv_reactor rv_panic = {
.react = rv_panic_reaction
};
-static int register_react_panic(void)
+static int __init register_react_panic(void)
{
rv_register_reactor(&rv_panic);
return 0;
}
-static void unregister_react_panic(void)
+static void __exit unregister_react_panic(void)
{
rv_unregister_reactor(&rv_panic);
}
diff --git a/kernel/trace/rv/reactor_printk.c b/kernel/trace/rv/reactor_printk.c
index 31899f953af4..4b6b7106a477 100644
--- a/kernel/trace/rv/reactor_printk.c
+++ b/kernel/trace/rv/reactor_printk.c
@@ -23,13 +23,13 @@ static struct rv_reactor rv_printk = {
.react = rv_printk_reaction
};
-static int register_react_printk(void)
+static int __init register_react_printk(void)
{
rv_register_reactor(&rv_printk);
return 0;
}
-static void unregister_react_printk(void)
+static void __exit unregister_react_printk(void)
{
rv_unregister_reactor(&rv_printk);
}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index cb866c3141af..918730d74932 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -142,7 +142,8 @@ static bool check_user_trigger(struct trace_event_file *file)
{
struct event_trigger_data *data;
- list_for_each_entry_rcu(data, &file->triggers, list) {
+ list_for_each_entry_rcu(data, &file->triggers, list,
+ lockdep_is_held(&event_mutex)) {
if (data->flags & EVENT_TRIGGER_FL_PROBE)
continue;
return true;
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index 95b58bd757ce..1e130da1b742 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -95,14 +95,14 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
}
lockdep_hardirqs_on_prepare();
- lockdep_hardirqs_on(CALLER_ADDR0);
+ lockdep_hardirqs_on(caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
NOKPROBE_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
- lockdep_hardirqs_off(CALLER_ADDR0);
+ lockdep_hardirqs_off(caller_addr);
if (!this_cpu_read(tracing_irq_cpu)) {
this_cpu_write(tracing_irq_cpu, 1);
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 64ea283f2f86..ef42c1a11920 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -571,7 +571,8 @@ static void for_each_tracepoint_range(
bool trace_module_has_bad_taint(struct module *mod)
{
return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP) |
- (1 << TAINT_UNSIGNED_MODULE));
+ (1 << TAINT_UNSIGNED_MODULE) |
+ (1 << TAINT_TEST));
}
static BLOCKING_NOTIFIER_HEAD(tracepoint_notify_list);
@@ -647,7 +648,7 @@ static int tracepoint_module_coming(struct module *mod)
/*
* We skip modules that taint the kernel, especially those with different
* module headers (for forced load), to make sure we don't cause a crash.
- * Staging, out-of-tree, and unsigned GPL modules are fine.
+ * Staging, out-of-tree, unsigned GPL, and test modules are fine.
*/
if (trace_module_has_bad_taint(mod))
return 0;
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 9ff549f63540..47816af9a9d7 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -33,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
config CRYPTO_LIB_CHACHA_GENERIC
tristate
- select XOR_BLOCKS
help
This symbol can be depended upon by arch implementations of the
ChaCha library interface that require the generic code as a
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b69979c9ced5..9d054e3767ce 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1401,6 +1401,7 @@ static const struct memory_stat memory_stats[] = {
{ "kernel", MEMCG_KMEM },
{ "kernel_stack", NR_KERNEL_STACK_KB },
{ "pagetables", NR_PAGETABLE },
+ { "sec_pagetables", NR_SECONDARY_PAGETABLE },
{ "percpu", MEMCG_PERCPU_B },
{ "sock", MEMCG_SOCK },
{ "vmalloc", MEMCG_VMALLOC },
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e5486d47406e..90461bd94744 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6039,7 +6039,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
" unevictable:%lu dirty:%lu writeback:%lu\n"
" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
- " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
+ " mapped:%lu shmem:%lu pagetables:%lu\n"
+ " sec_pagetables:%lu bounce:%lu\n"
" kernel_misc_reclaimable:%lu\n"
" free:%lu free_pcp:%lu free_cma:%lu\n",
global_node_page_state(NR_ACTIVE_ANON),
@@ -6056,6 +6057,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
global_node_page_state(NR_FILE_MAPPED),
global_node_page_state(NR_SHMEM),
global_node_page_state(NR_PAGETABLE),
+ global_node_page_state(NR_SECONDARY_PAGETABLE),
global_zone_page_state(NR_BOUNCE),
global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
global_zone_page_state(NR_FREE_PAGES),
@@ -6089,6 +6091,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
" shadow_call_stack:%lukB"
#endif
" pagetables:%lukB"
+ " sec_pagetables:%lukB"
" all_unreclaimable? %s"
"\n",
pgdat->node_id,
@@ -6114,6 +6117,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
node_page_state(pgdat, NR_KERNEL_SCS_KB),
#endif
K(node_page_state(pgdat, NR_PAGETABLE)),
+ K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
"yes" : "no");
}
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 9b3db11a4d1d..fa7a3d21a751 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -110,7 +110,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
do {
again:
next = pmd_addr_end(addr, end);
- if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
+ if (pmd_none(*pmd)) {
if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
@@ -171,7 +171,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
do {
again:
next = pud_addr_end(addr, end);
- if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
+ if (pud_none(*pud)) {
if (ops->pte_hole)
err = ops->pte_hole(addr, next, depth, walk);
if (err)
@@ -366,19 +366,19 @@ static int __walk_page_range(unsigned long start, unsigned long end,
struct vm_area_struct *vma = walk->vma;
const struct mm_walk_ops *ops = walk->ops;
- if (vma && ops->pre_vma) {
+ if (ops->pre_vma) {
err = ops->pre_vma(start, end, walk);
if (err)
return err;
}
- if (vma && is_vm_hugetlb_page(vma)) {
+ if (is_vm_hugetlb_page(vma)) {
if (ops->hugetlb_entry)
err = walk_hugetlb_range(start, end, walk);
} else
err = walk_pgd_range(start, end, walk);
- if (vma && ops->post_vma)
+ if (ops->post_vma)
ops->post_vma(walk);
return err;
@@ -450,9 +450,13 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
if (!vma) { /* after the last vma */
walk.vma = NULL;
next = end;
+ if (ops->pte_hole)
+ err = ops->pte_hole(start, next, -1, &walk);
} else if (start < vma->vm_start) { /* outside vma */
walk.vma = NULL;
next = min(end, vma->vm_start);
+ if (ops->pte_hole)
+ err = ops->pte_hole(start, next, -1, &walk);
} else { /* inside vma */
walk.vma = vma;
next = min(end, vma->vm_end);
@@ -470,9 +474,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
}
if (err < 0)
break;
- }
- if (walk.vma || walk.ops->pte_hole)
err = __walk_page_range(start, next, &walk);
+ }
if (err)
break;
} while (start = next, start < end);
@@ -501,9 +504,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
if (start >= end || !walk.mm)
return -EINVAL;
- mmap_assert_locked(walk.mm);
+ mmap_assert_write_locked(walk.mm);
- return __walk_page_range(start, end, &walk);
+ return walk_pgd_range(start, end, &walk);
}
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
diff --git a/mm/ptdump.c b/mm/ptdump.c
index eea3d28d173c..8adab455a68b 100644
--- a/mm/ptdump.c
+++ b/mm/ptdump.c
@@ -152,13 +152,13 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
{
const struct ptdump_range *range = st->range;
- mmap_read_lock(mm);
+ mmap_write_lock(mm);
while (range->start != range->end) {
walk_page_range_novma(mm, range->start, range->end,
&ptdump_ops, pgd, st);
range++;
}
- mmap_read_unlock(mm);
+ mmap_write_unlock(mm);
/* Flush out the last page */
st->note_page(st, 0, -1, 0);
diff --git a/mm/rmap.c b/mm/rmap.c
index edc06c52bc82..93d5a6f793d2 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -93,7 +93,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
atomic_set(&anon_vma->refcount, 1);
- anon_vma->degree = 1; /* Reference for first vma */
+ anon_vma->num_children = 0;
+ anon_vma->num_active_vmas = 0;
anon_vma->parent = anon_vma;
/*
* Initialise the anon_vma root to point to itself. If called
@@ -201,6 +202,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
anon_vma = anon_vma_alloc();
if (unlikely(!anon_vma))
goto out_enomem_free_avc;
+ anon_vma->num_children++; /* self-parent link for new root */
allocated = anon_vma;
}
@@ -210,8 +212,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma);
- /* vma reference or self-parent link for new root */
- anon_vma->degree++;
+ anon_vma->num_active_vmas++;
allocated = NULL;
avc = NULL;
}
@@ -296,19 +297,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
anon_vma_chain_link(dst, avc, anon_vma);
/*
- * Reuse existing anon_vma if its degree lower than two,
- * that means it has no vma and only one anon_vma child.
+ * Reuse existing anon_vma if it has no vma and only one
+ * anon_vma child.
*
- * Do not choose parent anon_vma, otherwise first child
- * will always reuse it. Root anon_vma is never reused:
+ * Root anon_vma is never reused:
* it has self-parent reference and at least one child.
*/
if (!dst->anon_vma && src->anon_vma &&
- anon_vma != src->anon_vma && anon_vma->degree < 2)
+ anon_vma->num_children < 2 &&
+ anon_vma->num_active_vmas == 0)
dst->anon_vma = anon_vma;
}
if (dst->anon_vma)
- dst->anon_vma->degree++;
+ dst->anon_vma->num_active_vmas++;
unlock_anon_vma_root(root);
return 0;
@@ -358,6 +359,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
anon_vma = anon_vma_alloc();
if (!anon_vma)
goto out_error;
+ anon_vma->num_active_vmas++;
avc = anon_vma_chain_alloc(GFP_KERNEL);
if (!avc)
goto out_error_free_anon_vma;
@@ -378,7 +380,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
vma->anon_vma = anon_vma;
anon_vma_lock_write(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
- anon_vma->parent->degree++;
+ anon_vma->parent->num_children++;
anon_vma_unlock_write(anon_vma);
return 0;
@@ -410,7 +412,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
* to free them outside the lock.
*/
if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
- anon_vma->parent->degree--;
+ anon_vma->parent->num_children--;
continue;
}
@@ -418,7 +420,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
anon_vma_chain_free(avc);
}
if (vma->anon_vma) {
- vma->anon_vma->degree--;
+ vma->anon_vma->num_active_vmas--;
/*
* vma would still be needed after unlink, and anon_vma will be prepared
@@ -436,7 +438,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
- VM_WARN_ON(anon_vma->degree);
+ VM_WARN_ON(anon_vma->num_children);
+ VM_WARN_ON(anon_vma->num_active_vmas);
put_anon_vma(anon_vma);
list_del(&avc->same_vma);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 17996649cfe3..07b948288f84 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -392,6 +392,28 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
}
EXPORT_SYMBOL(kmem_cache_create);
+#ifdef SLAB_SUPPORTS_SYSFS
+/*
+ * For a given kmem_cache, kmem_cache_destroy() should only be called
+ * once or there will be a use-after-free problem. The actual deletion
+ * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
+ * protection. So they are now done without holding those locks.
+ *
+ * Note that there will be a slight delay in the deletion of sysfs files
+ * if kmem_cache_release() is called indrectly from a work function.
+ */
+static void kmem_cache_release(struct kmem_cache *s)
+{
+ sysfs_slab_unlink(s);
+ sysfs_slab_release(s);
+}
+#else
+static void kmem_cache_release(struct kmem_cache *s)
+{
+ slab_kmem_cache_release(s);
+}
+#endif
+
static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
{
LIST_HEAD(to_destroy);
@@ -418,11 +440,7 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
list_for_each_entry_safe(s, s2, &to_destroy, list) {
debugfs_slab_release(s);
kfence_shutdown_cache(s);
-#ifdef SLAB_SUPPORTS_SYSFS
- sysfs_slab_release(s);
-#else
- slab_kmem_cache_release(s);
-#endif
+ kmem_cache_release(s);
}
}
@@ -437,20 +455,11 @@ static int shutdown_cache(struct kmem_cache *s)
list_del(&s->list);
if (s->flags & SLAB_TYPESAFE_BY_RCU) {
-#ifdef SLAB_SUPPORTS_SYSFS
- sysfs_slab_unlink(s);
-#endif
list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
kfence_shutdown_cache(s);
debugfs_slab_release(s);
-#ifdef SLAB_SUPPORTS_SYSFS
- sysfs_slab_unlink(s);
- sysfs_slab_release(s);
-#else
- slab_kmem_cache_release(s);
-#endif
}
return 0;
@@ -465,14 +474,16 @@ void slab_kmem_cache_release(struct kmem_cache *s)
void kmem_cache_destroy(struct kmem_cache *s)
{
+ int refcnt;
+
if (unlikely(!s) || !kasan_check_byte(s))
return;
cpus_read_lock();
mutex_lock(&slab_mutex);
- s->refcount--;
- if (s->refcount)
+ refcnt = --s->refcount;
+ if (refcnt)
goto out_unlock;
WARN(shutdown_cache(s),
@@ -481,6 +492,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
out_unlock:
mutex_unlock(&slab_mutex);
cpus_read_unlock();
+ if (!refcnt && !(s->flags & SLAB_TYPESAFE_BY_RCU))
+ kmem_cache_release(s);
}
EXPORT_SYMBOL(kmem_cache_destroy);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 90af9a8572f5..da264a040c55 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1247,6 +1247,7 @@ const char * const vmstat_text[] = {
"nr_shadow_call_stack",
#endif
"nr_page_table_pages",
+ "nr_sec_page_table_pages",
#ifdef CONFIG_SWAP
"nr_swapcached",
#endif
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 485c814cf44a..6643c9c20fa4 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -4179,6 +4179,17 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data,
}
}
+ if (i == ARRAY_SIZE(hci_cc_table)) {
+ /* Unknown opcode, assume byte 0 contains the status, so
+ * that e.g. __hci_cmd_sync() properly returns errors
+ * for vendor specific commands send by HCI drivers.
+ * If a vendor doesn't actually follow this convention we may
+ * need to introduce a vendor CC table in order to properly set
+ * the status.
+ */
+ *status = skb->data[0];
+ }
+
handle_cmd_cnt_and_timer(hdev, ev->ncmd);
hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
@@ -5790,7 +5801,7 @@ static void le_conn_complete_evt(struct hci_dev *hdev, u8 status,
*/
hci_dev_clear_flag(hdev, HCI_LE_ADV);
- conn = hci_lookup_le_connect(hdev);
+ conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr);
if (!conn) {
/* In case of error status and there is no connection pending
* just unlock as there is nothing to cleanup.
diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
index e6d804b82b67..fbd5613eebfc 100644
--- a/net/bluetooth/hci_sync.c
+++ b/net/bluetooth/hci_sync.c
@@ -3018,12 +3018,6 @@ static const struct hci_init_stage amp_init2[] = {
/* Read Buffer Size (ACL mtu, max pkt, etc.) */
static int hci_read_buffer_size_sync(struct hci_dev *hdev)
{
- /* Use Read LE Buffer Size V2 if supported */
- if (hdev->commands[41] & 0x20)
- return __hci_cmd_sync_status(hdev,
- HCI_OP_LE_READ_BUFFER_SIZE_V2,
- 0, NULL, HCI_CMD_TIMEOUT);
-
return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
0, NULL, HCI_CMD_TIMEOUT);
}
@@ -3237,6 +3231,12 @@ static const struct hci_init_stage hci_init2[] = {
/* Read LE Buffer Size */
static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
{
+ /* Use Read LE Buffer Size V2 if supported */
+ if (hdev->commands[41] & 0x20)
+ return __hci_cmd_sync_status(hdev,
+ HCI_OP_LE_READ_BUFFER_SIZE_V2,
+ 0, NULL, HCI_CMD_TIMEOUT);
+
return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
0, NULL, HCI_CMD_TIMEOUT);
}
@@ -4773,9 +4773,11 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
/* Cleanup hci_conn object if it cannot be cancelled as it
* likelly means the controller and host stack are out of sync.
*/
- if (err)
+ if (err) {
+ hci_dev_lock(hdev);
hci_conn_failed(conn, err);
-
+ hci_dev_unlock(hdev);
+ }
return err;
case BT_CONNECT2:
return hci_reject_conn_sync(hdev, conn, reason);
@@ -5288,17 +5290,21 @@ int hci_suspend_sync(struct hci_dev *hdev)
/* Prevent disconnects from causing scanning to be re-enabled */
hci_pause_scan_sync(hdev);
- /* Soft disconnect everything (power off) */
- err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
- if (err) {
- /* Set state to BT_RUNNING so resume doesn't notify */
- hdev->suspend_state = BT_RUNNING;
- hci_resume_sync(hdev);
- return err;
- }
+ if (hci_conn_count(hdev)) {
+ /* Soft disconnect everything (power off) */
+ err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
+ if (err) {
+ /* Set state to BT_RUNNING so resume doesn't notify */
+ hdev->suspend_state = BT_RUNNING;
+ hci_resume_sync(hdev);
+ return err;
+ }
- /* Update event mask so only the allowed event can wakeup the host */
- hci_set_event_mask_sync(hdev);
+ /* Update event mask so only the allowed event can wakeup the
+ * host.
+ */
+ hci_set_event_mask_sync(hdev);
+ }
/* Only configure accept list if disconnect succeeded and wake
* isn't being prevented.
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 5940744a8cd8..cc20e706c639 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -83,14 +83,14 @@ static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo
ci->product = session->input->id.product;
ci->version = session->input->id.version;
if (session->input->name)
- strlcpy(ci->name, session->input->name, 128);
+ strscpy(ci->name, session->input->name, 128);
else
- strlcpy(ci->name, "HID Boot Device", 128);
+ strscpy(ci->name, "HID Boot Device", 128);
} else if (session->hid) {
ci->vendor = session->hid->vendor;
ci->product = session->hid->product;
ci->version = session->hid->version;
- strlcpy(ci->name, session->hid->name, 128);
+ strscpy(ci->name, session->hid->name, 128);
}
}
diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
index ced8ad4fed4f..613039ba5dbf 100644
--- a/net/bluetooth/iso.c
+++ b/net/bluetooth/iso.c
@@ -1309,7 +1309,7 @@ static int iso_sock_shutdown(struct socket *sock, int how)
struct sock *sk = sock->sk;
int err = 0;
- BT_DBG("sock %p, sk %p", sock, sk);
+ BT_DBG("sock %p, sk %p, how %d", sock, sk, how);
if (!sk)
return 0;
@@ -1317,17 +1317,32 @@ static int iso_sock_shutdown(struct socket *sock, int how)
sock_hold(sk);
lock_sock(sk);
- if (!sk->sk_shutdown) {
- sk->sk_shutdown = SHUTDOWN_MASK;
- iso_sock_clear_timer(sk);
- __iso_sock_close(sk);
-
- if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
- !(current->flags & PF_EXITING))
- err = bt_sock_wait_state(sk, BT_CLOSED,
- sk->sk_lingertime);
+ switch (how) {
+ case SHUT_RD:
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ goto unlock;
+ sk->sk_shutdown |= RCV_SHUTDOWN;
+ break;
+ case SHUT_WR:
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ goto unlock;
+ sk->sk_shutdown |= SEND_SHUTDOWN;
+ break;
+ case SHUT_RDWR:
+ if (sk->sk_shutdown & SHUTDOWN_MASK)
+ goto unlock;
+ sk->sk_shutdown |= SHUTDOWN_MASK;
+ break;
}
+ iso_sock_clear_timer(sk);
+ __iso_sock_close(sk);
+
+ if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
+ !(current->flags & PF_EXITING))
+ err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
+
+unlock:
release_sock(sk);
sock_put(sk);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index cbe0cae73434..2c9de67daadc 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1992,11 +1992,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
src_match = !bacmp(&c->src, src);
dst_match = !bacmp(&c->dst, dst);
if (src_match && dst_match) {
- c = l2cap_chan_hold_unless_zero(c);
- if (c) {
- read_unlock(&chan_list_lock);
- return c;
- }
+ if (!l2cap_chan_hold_unless_zero(c))
+ continue;
+
+ read_unlock(&chan_list_lock);
+ return c;
}
/* Closest match */
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 6e31023b84f5..72e6595a71cc 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -4547,6 +4547,22 @@ static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
MGMT_STATUS_NOT_SUPPORTED);
}
+static u32 get_params_flags(struct hci_dev *hdev,
+ struct hci_conn_params *params)
+{
+ u32 flags = hdev->conn_flags;
+
+ /* Devices using RPAs can only be programmed in the acceptlist if
+ * LL Privacy has been enable otherwise they cannot mark
+ * HCI_CONN_FLAG_REMOTE_WAKEUP.
+ */
+ if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) &&
+ hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
+ flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP;
+
+ return flags;
+}
+
static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
u16 data_len)
{
@@ -4578,10 +4594,10 @@ static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
} else {
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
le_addr_type(cp->addr.type));
-
if (!params)
goto done;
+ supported_flags = get_params_flags(hdev, params);
current_flags = params->flags;
}
@@ -4649,38 +4665,35 @@ static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
&cp->addr.bdaddr, cp->addr.type);
}
- } else {
- params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
- le_addr_type(cp->addr.type));
- if (params) {
- /* Devices using RPAs can only be programmed in the
- * acceptlist LL Privacy has been enable otherwise they
- * cannot mark HCI_CONN_FLAG_REMOTE_WAKEUP.
- */
- if ((current_flags & HCI_CONN_FLAG_REMOTE_WAKEUP) &&
- !use_ll_privacy(hdev) &&
- hci_find_irk_by_addr(hdev, &params->addr,
- params->addr_type)) {
- bt_dev_warn(hdev,
- "Cannot set wakeable for RPA");
- goto unlock;
- }
- params->flags = current_flags;
- status = MGMT_STATUS_SUCCESS;
+ goto unlock;
+ }
- /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
- * has been set.
- */
- if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
- hci_update_passive_scan(hdev);
- } else {
- bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
- &cp->addr.bdaddr,
- le_addr_type(cp->addr.type));
- }
+ params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
+ le_addr_type(cp->addr.type));
+ if (!params) {
+ bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
+ &cp->addr.bdaddr, le_addr_type(cp->addr.type));
+ goto unlock;
}
+ supported_flags = get_params_flags(hdev, params);
+
+ if ((supported_flags | current_flags) != supported_flags) {
+ bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
+ current_flags, supported_flags);
+ goto unlock;
+ }
+
+ params->flags = current_flags;
+ status = MGMT_STATUS_SUCCESS;
+
+ /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
+ * has been set.
+ */
+ if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)
+ hci_update_passive_scan(hdev);
+
unlock:
hci_dev_unlock(hdev);
@@ -5054,7 +5067,6 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
else
status = MGMT_STATUS_FAILED;
- mgmt_pending_remove(cmd);
goto unlock;
}
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index ff4779036649..f20f4373ff40 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -384,6 +384,7 @@ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_
/* - Bridged-and-DNAT'ed traffic doesn't
* require ip_forwarding. */
if (rt->dst.dev == dev) {
+ skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
goto bridged_dnat;
}
@@ -413,6 +414,7 @@ bridged_dnat:
kfree_skb(skb);
return 0;
}
+ skb_dst_drop(skb);
skb_dst_set_noref(skb, &rt->dst);
}
diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
index e4e0c836c3f5..6b07f30675bb 100644
--- a/net/bridge/br_netfilter_ipv6.c
+++ b/net/bridge/br_netfilter_ipv6.c
@@ -197,6 +197,7 @@ static int br_nf_pre_routing_finish_ipv6(struct net *net, struct sock *sk, struc
kfree_skb(skb);
return 0;
}
+ skb_dst_drop(skb);
skb_dst_set_noref(skb, &rt->dst);
}
diff --git a/net/core/.gitignore b/net/core/.gitignore
deleted file mode 100644
index df1e74372cce..000000000000
--- a/net/core/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-dropreason_str.c
diff --git a/net/core/Makefile b/net/core/Makefile
index e8ce3bd283a6..5857cec87b83 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -5,7 +5,7 @@
obj-y := sock.o request_sock.o skbuff.o datagram.o stream.o scm.o \
gen_stats.o gen_estimator.o net_namespace.o secure_seq.o \
- flow_dissector.o dropreason_str.o
+ flow_dissector.o
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
@@ -40,23 +40,3 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
obj-$(CONFIG_OF) += of_net.o
-
-clean-files := dropreason_str.c
-
-quiet_cmd_dropreason_str = GEN $@
-cmd_dropreason_str = awk -F ',' 'BEGIN{ print "\#include <net/dropreason.h>\n"; \
- print "const char * const drop_reasons[] = {" }\
- /^enum skb_drop/ { dr=1; }\
- /^\};/ { dr=0; }\
- /^\tSKB_DROP_REASON_/ {\
- if (dr) {\
- sub(/\tSKB_DROP_REASON_/, "", $$1);\
- printf "\t[SKB_DROP_REASON_%s] = \"%s\",\n", $$1, $$1;\
- }\
- }\
- END{ print "};" }' $< > $@
-
-$(obj)/dropreason_str.c: $(srctree)/include/net/dropreason.h
- $(call cmd,dropreason_str)
-
-$(obj)/dropreason_str.o: $(obj)/dropreason_str.c
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 7255531f63ae..e4ff2db40c98 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -677,7 +677,7 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk,
page_ref_sub(last_head, refs);
refs = 0;
}
- skb_fill_page_desc(skb, frag++, head, start, size);
+ skb_fill_page_desc_noacc(skb, frag++, head, start, size);
}
if (refs)
page_ref_sub(last_head, refs);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 84bb5e188d0d..417463da4fac 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -91,7 +91,11 @@ static struct kmem_cache *skbuff_ext_cache __ro_after_init;
int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
EXPORT_SYMBOL(sysctl_max_skb_frags);
-/* The array 'drop_reasons' is auto-generated in dropreason_str.c */
+#undef FN
+#define FN(reason) [SKB_DROP_REASON_##reason] = #reason,
+const char * const drop_reasons[] = {
+ DEFINE_DROP_REASON(FN, FN)
+};
EXPORT_SYMBOL(drop_reasons);
/**
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 59e75ffcc1f4..188f8558d27d 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -461,7 +461,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
if (copied == len)
break;
- } while (!sg_is_last(sge));
+ } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
if (unlikely(peek)) {
msg_rx = sk_psock_next_msg(psock, msg_rx);
@@ -471,7 +471,7 @@ int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
}
msg_rx->sg.start = i;
- if (!sge->length && sg_is_last(sge)) {
+ if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
msg_rx = sk_psock_dequeue_msg(psock);
kfree_sk_msg(msg_rx);
}
diff --git a/net/dsa/tag_hellcreek.c b/net/dsa/tag_hellcreek.c
index eb204ad36eee..846588c0070a 100644
--- a/net/dsa/tag_hellcreek.c
+++ b/net/dsa/tag_hellcreek.c
@@ -45,7 +45,7 @@ static struct sk_buff *hellcreek_rcv(struct sk_buff *skb,
skb->dev = dsa_master_find_slave(dev, 0, port);
if (!skb->dev) {
- netdev_warn(dev, "Failed to get source port: %d\n", port);
+ netdev_warn_once(dev, "Failed to get source port: %d\n", port);
return NULL;
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index f361d3d56be2..943edf4ad4db 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -389,7 +389,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
dev_match = dev_match || (res.type == RTN_LOCAL &&
dev == net->loopback_dev);
if (dev_match) {
- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
+ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
return ret;
}
if (no_addr)
@@ -401,7 +401,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
ret = 0;
if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
if (res.type == RTN_UNICAST)
- ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_HOST;
+ ret = FIB_RES_NHC(res)->nhc_scope >= RT_SCOPE_LINK;
}
return ret;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5c58e21f724e..f866d6282b2b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -609,7 +609,7 @@ static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
tunnel_id_to_key32(key->tun_id),
key->tos & ~INET_ECN_MASK, dev_net(dev), 0,
- skb->mark, skb_get_hash(skb));
+ skb->mark, skb_get_hash(skb), key->flow_flags);
rt = ip_route_output_key(dev_net(dev), &fl4);
if (IS_ERR(rt))
return PTR_ERR(rt);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index e65e948cab9f..019f3b0839c5 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -295,7 +295,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
iph->saddr, tunnel->parms.o_key,
RT_TOS(iph->tos), dev_net(dev),
- tunnel->parms.link, tunnel->fwmark, 0);
+ tunnel->parms.link, tunnel->fwmark, 0, 0);
rt = ip_route_output_key(tunnel->net, &fl4);
if (!IS_ERR(rt)) {
@@ -570,7 +570,8 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
- dev_net(dev), 0, skb->mark, skb_get_hash(skb));
+ dev_net(dev), 0, skb->mark, skb_get_hash(skb),
+ key->flow_flags);
if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
goto tx_error;
@@ -729,7 +730,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos),
dev_net(dev), tunnel->parms.link,
- tunnel->fwmark, skb_get_hash(skb));
+ tunnel->fwmark, skb_get_hash(skb), 0);
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
goto tx_error;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e5011c136fdb..6cdfce6f2867 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1015,7 +1015,7 @@ new_segment:
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
} else {
get_page(page);
- skb_fill_page_desc(skb, i, page, offset, copy);
+ skb_fill_page_desc_noacc(skb, i, page, offset, copy);
}
if (!(flags & MSG_NO_SHARED_FRAGS))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ab5f0ea166f1..bc2ea12221f9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2513,6 +2513,21 @@ static inline bool tcp_may_undo(const struct tcp_sock *tp)
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
}
+static bool tcp_is_non_sack_preventing_reopen(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
+ /* Hold old state until something *above* high_seq
+ * is ACKed. For Reno it is MUST to prevent false
+ * fast retransmits (RFC2582). SACK TCP is safe. */
+ if (!tcp_any_retrans_done(sk))
+ tp->retrans_stamp = 0;
+ return true;
+ }
+ return false;
+}
+
/* People celebrate: "We love our President!" */
static bool tcp_try_undo_recovery(struct sock *sk)
{
@@ -2535,14 +2550,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
} else if (tp->rack.reo_wnd_persist) {
tp->rack.reo_wnd_persist--;
}
- if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
- /* Hold old state until something *above* high_seq
- * is ACKed. For Reno it is MUST to prevent false
- * fast retransmits (RFC2582). SACK TCP is safe. */
- if (!tcp_any_retrans_done(sk))
- tp->retrans_stamp = 0;
+ if (tcp_is_non_sack_preventing_reopen(sk))
return true;
- }
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
return false;
@@ -2578,6 +2587,8 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPSPURIOUSRTOS);
inet_csk(sk)->icsk_retransmits = 0;
+ if (tcp_is_non_sack_preventing_reopen(sk))
+ return true;
if (frto_undo || tcp_is_sack(tp)) {
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
@@ -3614,12 +3625,9 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
/* RFC 5961 7 [ACK Throttling] */
static void tcp_send_challenge_ack(struct sock *sk)
{
- /* unprotected vars, we dont care of overwrites */
- static u32 challenge_timestamp;
- static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
- u32 count, now;
+ u32 count, now, ack_limit;
/* First check our per-socket dupack rate limit. */
if (__tcp_oow_rate_limited(net,
@@ -3627,18 +3635,22 @@ static void tcp_send_challenge_ack(struct sock *sk)
&tp->last_oow_ack_time))
return;
+ ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
+ if (ack_limit == INT_MAX)
+ goto send_ack;
+
/* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
- if (now != challenge_timestamp) {
- u32 ack_limit = READ_ONCE(net->ipv4.sysctl_tcp_challenge_ack_limit);
+ if (now != READ_ONCE(net->ipv4.tcp_challenge_timestamp)) {
u32 half = (ack_limit + 1) >> 1;
- challenge_timestamp = now;
- WRITE_ONCE(challenge_count, half + prandom_u32_max(ack_limit));
+ WRITE_ONCE(net->ipv4.tcp_challenge_timestamp, now);
+ WRITE_ONCE(net->ipv4.tcp_challenge_count, half + prandom_u32_max(ack_limit));
}
- count = READ_ONCE(challenge_count);
+ count = READ_ONCE(net->ipv4.tcp_challenge_count);
if (count > 0) {
- WRITE_ONCE(challenge_count, count - 1);
+ WRITE_ONCE(net->ipv4.tcp_challenge_count, count - 1);
+send_ack:
NET_INC_STATS(net, LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0c83780dc9bf..5b019ba2b9d2 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -3139,8 +3139,10 @@ static int __net_init tcp_sk_init(struct net *net)
net->ipv4.sysctl_tcp_tso_win_divisor = 3;
/* Default TSQ limit of 16 TSO segments */
net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
- /* rfc5961 challenge ack rate limiting */
- net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
+
+ /* rfc5961 challenge ack rate limiting, per net-ns, disabled by default. */
+ net->ipv4.sysctl_tcp_challenge_ack_limit = INT_MAX;
+
net->ipv4.sysctl_tcp_min_tso_segs = 2;
net->ipv4.sysctl_tcp_tso_rtt_log = 9; /* 2^9 = 512 usec */
net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 34eda973bbf1..cd72158e953a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -783,6 +783,8 @@ int __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
*/
if (tunnel) {
/* ...not for tunnels though: we don't have a sending socket */
+ if (udp_sk(sk)->encap_err_rcv)
+ udp_sk(sk)->encap_err_rcv(sk, skb, iph->ihl << 2);
goto out;
}
if (!inet->recverr) {
diff --git a/net/ipv4/udp_tunnel_core.c b/net/ipv4/udp_tunnel_core.c
index 8efaf8c3fe2a..8242c8947340 100644
--- a/net/ipv4/udp_tunnel_core.c
+++ b/net/ipv4/udp_tunnel_core.c
@@ -72,6 +72,7 @@ void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
udp_sk(sk)->encap_type = cfg->encap_type;
udp_sk(sk)->encap_rcv = cfg->encap_rcv;
+ udp_sk(sk)->encap_err_rcv = cfg->encap_err_rcv;
udp_sk(sk)->encap_err_lookup = cfg->encap_err_lookup;
udp_sk(sk)->encap_destroy = cfg->encap_destroy;
udp_sk(sk)->gro_receive = cfg->gro_receive;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e15f64f22fa8..10ce86bf228e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3557,11 +3557,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
fallthrough;
case NETDEV_UP:
case NETDEV_CHANGE:
- if (dev->flags & IFF_SLAVE)
+ if (idev && idev->cnf.disable_ipv6)
break;
- if (idev && idev->cnf.disable_ipv6)
+ if (dev->flags & IFF_SLAVE) {
+ if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) &&
+ dev->flags & IFF_UP && dev->flags & IFF_MULTICAST)
+ ipv6_mc_up(idev);
break;
+ }
if (event == NETDEV_UP) {
/* restore routes for permanent addresses */
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 73aaabf0e966..0b0e34ddc64e 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -191,6 +191,11 @@ static int seg6_genl_sethmac(struct sk_buff *skb, struct genl_info *info)
goto out_unlock;
}
+ if (slen > nla_len(info->attrs[SEG6_ATTR_SECRET])) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
if (hinfo) {
err = seg6_hmac_info_del(net, hmackeyid);
if (err)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 16c176e7c69a..3366d6a77ff2 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -616,8 +616,11 @@ int __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
}
/* Tunnels don't have an application socket: don't pass errors back */
- if (tunnel)
+ if (tunnel) {
+ if (udp_sk(sk)->encap_err_rcv)
+ udp_sk(sk)->encap_err_rcv(sk, skb, offset);
goto out;
+ }
if (!np->recverr) {
if (!harderr || sk->sk_state != TCP_ESTABLISHED)
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 71899e5a5a11..1215c863e1c4 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1412,12 +1412,6 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
psock->sk = csk;
psock->bpf_prog = prog;
- err = strp_init(&psock->strp, csk, &cb);
- if (err) {
- kmem_cache_free(kcm_psockp, psock);
- goto out;
- }
-
write_lock_bh(&csk->sk_callback_lock);
/* Check if sk_user_data is already by KCM or someone else.
@@ -1425,13 +1419,18 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
*/
if (csk->sk_user_data) {
write_unlock_bh(&csk->sk_callback_lock);
- strp_stop(&psock->strp);
- strp_done(&psock->strp);
kmem_cache_free(kcm_psockp, psock);
err = -EALREADY;
goto out;
}
+ err = strp_init(&psock->strp, csk, &cb);
+ if (err) {
+ write_unlock_bh(&csk->sk_callback_lock);
+ kmem_cache_free(kcm_psockp, psock);
+ goto out;
+ }
+
psock->save_data_ready = csk->sk_data_ready;
psock->save_write_space = csk->sk_write_space;
psock->save_state_change = csk->sk_state_change;
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index d56890e3fabb..9b283bbc7bb4 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -530,6 +530,10 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
sdata_assert_lock(sdata);
+ /* When not connected/joined, sending CSA doesn't make sense. */
+ if (ifibss->state != IEEE80211_IBSS_MLME_JOINED)
+ return -ENOLINK;
+
/* update cfg80211 bss information with the new channel */
if (!is_zero_ether_addr(ifibss->bssid)) {
cbss = cfg80211_get_bss(sdata->local->hw.wiphy,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 3d4ab711f0d1..5265d2b6db12 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3420,11 +3420,11 @@ static void ieee80211_destroy_auth_data(struct ieee80211_sub_if_data *sdata,
ieee80211_link_info_change_notify(sdata, &sdata->deflink,
BSS_CHANGED_BSSID);
sdata->u.mgd.flags = 0;
+
mutex_lock(&sdata->local->mtx);
ieee80211_link_release_channel(&sdata->deflink);
- mutex_unlock(&sdata->local->mtx);
-
ieee80211_vif_set_links(sdata, 0);
+ mutex_unlock(&sdata->local->mtx);
}
cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
@@ -3462,10 +3462,6 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
sdata->u.mgd.flags = 0;
sdata->vif.bss_conf.mu_mimo_owner = false;
- mutex_lock(&sdata->local->mtx);
- ieee80211_link_release_channel(&sdata->deflink);
- mutex_unlock(&sdata->local->mtx);
-
if (status != ASSOC_REJECTED) {
struct cfg80211_assoc_failure data = {
.timeout = status == ASSOC_TIMEOUT,
@@ -3484,7 +3480,10 @@ static void ieee80211_destroy_assoc_data(struct ieee80211_sub_if_data *sdata,
cfg80211_assoc_failure(sdata->dev, &data);
}
+ mutex_lock(&sdata->local->mtx);
+ ieee80211_link_release_channel(&sdata->deflink);
ieee80211_vif_set_links(sdata, 0);
+ mutex_unlock(&sdata->local->mtx);
}
kfree(assoc_data);
@@ -6509,6 +6508,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
return 0;
out_err:
+ ieee80211_link_release_channel(&sdata->deflink);
ieee80211_vif_set_links(sdata, 0);
return err;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 57df21e2170a..45d7e71661e3 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -4074,6 +4074,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
.link_id = -1,
};
struct tid_ampdu_rx *tid_agg_rx;
+ u8 link_id;
tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
if (!tid_agg_rx)
@@ -4093,6 +4094,9 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
};
drv_event_callback(rx.local, rx.sdata, &event);
}
+ /* FIXME: statistics won't be right with this */
+ link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
+ rx.link = rcu_dereference(sta->sdata->link[link_id]);
ieee80211_rx_handlers(&rx, &frames);
}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index fa8ddf576bc1..c4f2aeb31da3 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -469,16 +469,19 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
scan_req = rcu_dereference_protected(local->scan_req,
lockdep_is_held(&local->mtx));
- if (scan_req != local->int_scan_req) {
- local->scan_info.aborted = aborted;
- cfg80211_scan_done(scan_req, &local->scan_info);
- }
RCU_INIT_POINTER(local->scan_req, NULL);
RCU_INIT_POINTER(local->scan_sdata, NULL);
local->scanning = 0;
local->scan_chandef.chan = NULL;
+ synchronize_rcu();
+
+ if (scan_req != local->int_scan_req) {
+ local->scan_info.aborted = aborted;
+ cfg80211_scan_done(scan_req, &local->scan_info);
+ }
+
/* Set power back to normal operating levels. */
ieee80211_hw_config(local, 0);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index cb23da9aff1e..58998d821778 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -494,7 +494,7 @@ __sta_info_alloc(struct ieee80211_sub_if_data *sdata,
sta->sdata = sdata;
if (sta_info_alloc_link(local, &sta->deflink, gfp))
- return NULL;
+ goto free;
if (link_id >= 0) {
sta_info_add_link(sta, link_id, &sta->deflink,
@@ -2316,9 +2316,9 @@ static inline u64 sta_get_tidstats_msdu(struct ieee80211_sta_rx_stats *rxstats,
u64 value;
do {
- start = u64_stats_fetch_begin(&rxstats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxstats->syncp);
value = rxstats->msdu[tid];
- } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
return value;
}
@@ -2384,9 +2384,9 @@ static inline u64 sta_get_stats_bytes(struct ieee80211_sta_rx_stats *rxstats)
u64 value;
do {
- start = u64_stats_fetch_begin(&rxstats->syncp);
+ start = u64_stats_fetch_begin_irq(&rxstats->syncp);
value = rxstats->bytes;
- } while (u64_stats_fetch_retry(&rxstats->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&rxstats->syncp, start));
return value;
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 45df9932d0ba..bf7fe6cd9dfc 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -5885,6 +5885,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
rcu_read_lock();
err = ieee80211_lookup_ra_sta(sdata, skb, &sta);
if (err) {
+ dev_kfree_skb(skb);
rcu_read_unlock();
return err;
}
@@ -5899,7 +5900,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
* for MLO STA, the SA should be the AP MLD address, but
* the link ID has been selected already
*/
- if (sta->sta.mlo)
+ if (sta && sta->sta.mlo)
memcpy(ehdr->h_source, sdata->vif.addr, ETH_ALEN);
}
rcu_read_unlock();
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 93ec2f349748..20f742b5503b 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -351,7 +351,7 @@ static u8 ccmp_gcmp_aad(struct sk_buff *skb, u8 *aad)
* FC | A1 | A2 | A3 | SC | [A4] | [QC] */
put_unaligned_be16(len_a, &aad[0]);
put_unaligned(mask_fc, (__le16 *)&aad[2]);
- memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN);
+ memcpy(&aad[4], &hdr->addrs, 3 * ETH_ALEN);
/* Mask Seq#, leave Frag# */
aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f;
@@ -792,7 +792,7 @@ static void bip_aad(struct sk_buff *skb, u8 *aad)
IEEE80211_FCTL_MOREDATA);
put_unaligned(mask_fc, (__le16 *) &aad[0]);
/* A1 || A2 || A3 */
- memcpy(aad + 2, &hdr->addr1, 3 * ETH_ALEN);
+ memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN);
}
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index b8ce84618a55..c439125ef2b9 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -44,7 +44,7 @@ ieee802154_subif_frame(struct ieee802154_sub_if_data *sdata,
switch (mac_cb(skb)->dest.mode) {
case IEEE802154_ADDR_NONE:
- if (mac_cb(skb)->dest.mode != IEEE802154_ADDR_NONE)
+ if (hdr->source.mode != IEEE802154_ADDR_NONE)
/* FIXME: check if we are PAN coordinator */
skb->pkt_type = PACKET_OTHERHOST;
else
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 35b5f806fdda..b52afe316dc4 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1079,9 +1079,9 @@ static void mpls_get_stats(struct mpls_dev *mdev,
p = per_cpu_ptr(mdev->stats, i);
do {
- start = u64_stats_fetch_begin(&p->syncp);
+ start = u64_stats_fetch_begin_irq(&p->syncp);
local = p->stats;
- } while (u64_stats_fetch_retry(&p->syncp, start));
+ } while (u64_stats_fetch_retry_irq(&p->syncp, start));
stats->rx_packets += local.rx_packets;
stats->rx_bytes += local.rx_bytes;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 71c2f4f95d36..1357a2729a4b 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1782,7 +1782,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
}
spin_unlock_bh(&nf_conntrack_expect_lock);
}
- if (!exp)
+ if (!exp && tmpl)
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
/* Other CPU might have obtained a pointer to this object before it was
@@ -2068,10 +2068,6 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
if (ct->master || (help && !hlist_empty(&help->expectations)))
return;
-
- rcu_read_lock();
- __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
- rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
@@ -2797,7 +2793,6 @@ int nf_conntrack_init_net(struct net *net)
nf_conntrack_acct_pernet_init(net);
nf_conntrack_tstamp_pernet_init(net);
nf_conntrack_ecache_pernet_init(net);
- nf_conntrack_helper_pernet_init(net);
nf_conntrack_proto_pernet_init(net);
return 0;
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index e96b32221444..ff737a76052e 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -35,11 +35,6 @@ unsigned int nf_ct_helper_hsize __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
static unsigned int nf_ct_helper_count __read_mostly;
-static bool nf_ct_auto_assign_helper __read_mostly = false;
-module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
-MODULE_PARM_DESC(nf_conntrack_helper,
- "Enable automatic conntrack helper assignment (default 0)");
-
static DEFINE_MUTEX(nf_ct_nat_helpers_mutex);
static struct list_head nf_ct_nat_helpers __read_mostly;
@@ -51,24 +46,6 @@ static unsigned int helper_hash(const struct nf_conntrack_tuple *tuple)
(__force __u16)tuple->src.u.all) % nf_ct_helper_hsize;
}
-static struct nf_conntrack_helper *
-__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
-{
- struct nf_conntrack_helper *helper;
- struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
- unsigned int h;
-
- if (!nf_ct_helper_count)
- return NULL;
-
- h = helper_hash(tuple);
- hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) {
- if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
- return helper;
- }
- return NULL;
-}
-
struct nf_conntrack_helper *
__nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
{
@@ -209,33 +186,11 @@ nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp)
}
EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
-static struct nf_conntrack_helper *
-nf_ct_lookup_helper(struct nf_conn *ct, struct net *net)
-{
- struct nf_conntrack_net *cnet = nf_ct_pernet(net);
-
- if (!cnet->sysctl_auto_assign_helper) {
- if (cnet->auto_assign_helper_warned)
- return NULL;
- if (!__nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple))
- return NULL;
- pr_info("nf_conntrack: default automatic helper assignment "
- "has been turned off for security reasons and CT-based "
- "firewall rule not found. Use the iptables CT target "
- "to attach helpers instead.\n");
- cnet->auto_assign_helper_warned = true;
- return NULL;
- }
-
- return __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-}
-
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
gfp_t flags)
{
struct nf_conntrack_helper *helper = NULL;
struct nf_conn_help *help;
- struct net *net = nf_ct_net(ct);
/* We already got a helper explicitly attached. The function
* nf_conntrack_alter_reply - in case NAT is in use - asks for looking
@@ -246,23 +201,21 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
if (test_bit(IPS_HELPER_BIT, &ct->status))
return 0;
- if (tmpl != NULL) {
- help = nfct_help(tmpl);
- if (help != NULL) {
- helper = rcu_dereference(help->helper);
- set_bit(IPS_HELPER_BIT, &ct->status);
- }
+ if (WARN_ON_ONCE(!tmpl))
+ return 0;
+
+ help = nfct_help(tmpl);
+ if (help != NULL) {
+ helper = rcu_dereference(help->helper);
+ set_bit(IPS_HELPER_BIT, &ct->status);
}
help = nfct_help(ct);
if (helper == NULL) {
- helper = nf_ct_lookup_helper(ct, net);
- if (helper == NULL) {
- if (help)
- RCU_INIT_POINTER(help->helper, NULL);
- return 0;
- }
+ if (help)
+ RCU_INIT_POINTER(help->helper, NULL);
+ return 0;
}
if (help == NULL) {
@@ -545,19 +498,6 @@ void nf_nat_helper_unregister(struct nf_conntrack_nat_helper *nat)
}
EXPORT_SYMBOL_GPL(nf_nat_helper_unregister);
-void nf_ct_set_auto_assign_helper_warned(struct net *net)
-{
- nf_ct_pernet(net)->auto_assign_helper_warned = true;
-}
-EXPORT_SYMBOL_GPL(nf_ct_set_auto_assign_helper_warned);
-
-void nf_conntrack_helper_pernet_init(struct net *net)
-{
- struct nf_conntrack_net *cnet = nf_ct_pernet(net);
-
- cnet->sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
-}
-
int nf_conntrack_helper_init(void)
{
nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index 1796c456ac98..992decbcaa5c 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -194,8 +194,9 @@ static int help(struct sk_buff *skb, unsigned int protoff,
/* dcc_ip can be the internal OR external (NAT'ed) IP */
tuple = &ct->tuplehash[dir].tuple;
- if (tuple->src.u3.ip != dcc_ip &&
- tuple->dst.u3.ip != dcc_ip) {
+ if ((tuple->src.u3.ip != dcc_ip &&
+ ct->tuplehash[!dir].tuple.dst.u3.ip != dcc_ip) ||
+ dcc_port == 0) {
net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
&tuple->src.u3.ip,
&dcc_ip, dcc_port);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 04169b54f2a2..7562b215b932 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2298,11 +2298,6 @@ ctnetlink_create_conntrack(struct net *net,
ct->status |= IPS_HELPER;
RCU_INIT_POINTER(help->helper, helper);
}
- } else {
- /* try an implicit helper assignation */
- err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
- if (err < 0)
- goto err2;
}
err = ctnetlink_setup_nat(ct, cda);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 05895878610c..4ffe84c5a82c 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -561,7 +561,6 @@ enum nf_ct_sysctl_index {
NF_SYSCTL_CT_LOG_INVALID,
NF_SYSCTL_CT_EXPECT_MAX,
NF_SYSCTL_CT_ACCT,
- NF_SYSCTL_CT_HELPER,
#ifdef CONFIG_NF_CONNTRACK_EVENTS
NF_SYSCTL_CT_EVENTS,
#endif
@@ -680,14 +679,6 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
- [NF_SYSCTL_CT_HELPER] = {
- .procname = "nf_conntrack_helper",
- .maxlen = sizeof(u8),
- .mode = 0644,
- .proc_handler = proc_dou8vec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
#ifdef CONFIG_NF_CONNTRACK_EVENTS
[NF_SYSCTL_CT_EVENTS] = {
.procname = "nf_conntrack_events",
@@ -1100,7 +1091,6 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
- table[NF_SYSCTL_CT_HELPER].data = &cnet->sysctl_auto_assign_helper;
#ifdef CONFIG_NF_CONNTRACK_EVENTS
table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
#endif
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 2ee50e23c9b7..816052089b33 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2166,8 +2166,10 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
chain->flags |= NFT_CHAIN_BASE | flags;
basechain->policy = NF_ACCEPT;
if (chain->flags & NFT_CHAIN_HW_OFFLOAD &&
- !nft_chain_offload_support(basechain))
+ !nft_chain_offload_support(basechain)) {
+ list_splice_init(&basechain->hook_list, &hook->list);
return -EOPNOTSUPP;
+ }
flow_block_init(&basechain->flow_block);
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index b04995c3e17f..a3f01f209a53 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -1089,9 +1089,6 @@ static int nft_ct_helper_obj_init(const struct nft_ctx *ctx,
if (err < 0)
goto err_put_helper;
- /* Avoid the bogus warning, helper will be assigned after CT init */
- nf_ct_set_auto_assign_helper_warned(ctx->net);
-
return 0;
err_put_helper:
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 7e8a39a35627..6c9d153afbee 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1802,7 +1802,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_dp_reset_user_features(skb, info);
}
- goto err_unlock_and_destroy_meters;
+ goto err_destroy_portids;
}
err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
@@ -1817,6 +1817,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
ovs_notify(&dp_datapath_genl_family, reply, info);
return 0;
+err_destroy_portids:
+ kfree(rcu_dereference_raw(dp->upcall_portids));
err_unlock_and_destroy_meters:
ovs_unlock();
ovs_meters_exit(dp);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 571436064cd6..62c70709d798 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -982,6 +982,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *);
/*
* peer_event.c
*/
+void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, unsigned int udp_offset);
void rxrpc_error_report(struct sock *);
void rxrpc_peer_keepalive_worker(struct work_struct *);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index f8ecad2b730e..2a93e7b5fbd0 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -166,7 +166,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
now = ktime_get_real();
- max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j));
+ max_age = ktime_sub_us(now, jiffies_to_usecs(call->peer->rto_j));
spin_lock_bh(&call->lock);
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 96ecb7356c0f..38ea98ff426b 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -137,6 +137,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
tuncfg.encap_type = UDP_ENCAP_RXRPC;
tuncfg.encap_rcv = rxrpc_input_packet;
+ tuncfg.encap_err_rcv = rxrpc_encap_err_rcv;
tuncfg.sk_user_data = local;
setup_udp_tunnel_sock(net, local->socket, &tuncfg);
@@ -405,6 +406,9 @@ static void rxrpc_local_processor(struct work_struct *work)
container_of(work, struct rxrpc_local, processor);
bool again;
+ if (local->dead)
+ return;
+
trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
refcount_read(&local->ref), NULL);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index be032850ae8c..32561e9567fe 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -16,22 +16,105 @@
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include <net/ip.h>
+#include <net/icmp.h>
#include "ar-internal.h"
+static void rxrpc_adjust_mtu(struct rxrpc_peer *, unsigned int);
static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
static void rxrpc_distribute_error(struct rxrpc_peer *, int,
enum rxrpc_call_completion);
/*
- * Find the peer associated with an ICMP packet.
+ * Find the peer associated with an ICMPv4 packet.
*/
static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
- const struct sk_buff *skb,
+ struct sk_buff *skb,
+ unsigned int udp_offset,
+ unsigned int *info,
struct sockaddr_rxrpc *srx)
{
- struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
+ struct iphdr *ip, *ip0 = ip_hdr(skb);
+ struct icmphdr *icmp = icmp_hdr(skb);
+ struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
- _enter("");
+ _enter("%u,%u,%u", ip0->protocol, icmp->type, icmp->code);
+
+ switch (icmp->type) {
+ case ICMP_DEST_UNREACH:
+ *info = ntohs(icmp->un.frag.mtu);
+ fallthrough;
+ case ICMP_TIME_EXCEEDED:
+ case ICMP_PARAMETERPROB:
+ ip = (struct iphdr *)((void *)icmp + 8);
+ break;
+ default:
+ return NULL;
+ }
+
+ memset(srx, 0, sizeof(*srx));
+ srx->transport_type = local->srx.transport_type;
+ srx->transport_len = local->srx.transport_len;
+ srx->transport.family = local->srx.transport.family;
+
+ /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
+ * versa?
+ */
+ switch (srx->transport.family) {
+ case AF_INET:
+ srx->transport_len = sizeof(srx->transport.sin);
+ srx->transport.family = AF_INET;
+ srx->transport.sin.sin_port = udp->dest;
+ memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
+ sizeof(struct in_addr));
+ break;
+
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case AF_INET6:
+ srx->transport_len = sizeof(srx->transport.sin);
+ srx->transport.family = AF_INET;
+ srx->transport.sin.sin_port = udp->dest;
+ memcpy(&srx->transport.sin.sin_addr, &ip->daddr,
+ sizeof(struct in_addr));
+ break;
+#endif
+
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+
+ _net("ICMP {%pISp}", &srx->transport);
+ return rxrpc_lookup_peer_rcu(local, srx);
+}
+
+#ifdef CONFIG_AF_RXRPC_IPV6
+/*
+ * Find the peer associated with an ICMPv6 packet.
+ */
+static struct rxrpc_peer *rxrpc_lookup_peer_icmp6_rcu(struct rxrpc_local *local,
+ struct sk_buff *skb,
+ unsigned int udp_offset,
+ unsigned int *info,
+ struct sockaddr_rxrpc *srx)
+{
+ struct icmp6hdr *icmp = icmp6_hdr(skb);
+ struct ipv6hdr *ip, *ip0 = ipv6_hdr(skb);
+ struct udphdr *udp = (struct udphdr *)(skb->data + udp_offset);
+
+ _enter("%u,%u,%u", ip0->nexthdr, icmp->icmp6_type, icmp->icmp6_code);
+
+ switch (icmp->icmp6_type) {
+ case ICMPV6_DEST_UNREACH:
+ *info = ntohl(icmp->icmp6_mtu);
+ fallthrough;
+ case ICMPV6_PKT_TOOBIG:
+ case ICMPV6_TIME_EXCEED:
+ case ICMPV6_PARAMPROB:
+ ip = (struct ipv6hdr *)((void *)icmp + 8);
+ break;
+ default:
+ return NULL;
+ }
memset(srx, 0, sizeof(*srx));
srx->transport_type = local->srx.transport_type;
@@ -43,6 +126,165 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
*/
switch (srx->transport.family) {
case AF_INET:
+ _net("Rx ICMP6 on v4 sock");
+ srx->transport_len = sizeof(srx->transport.sin);
+ srx->transport.family = AF_INET;
+ srx->transport.sin.sin_port = udp->dest;
+ memcpy(&srx->transport.sin.sin_addr,
+ &ip->daddr.s6_addr32[3], sizeof(struct in_addr));
+ break;
+ case AF_INET6:
+ _net("Rx ICMP6");
+ srx->transport.sin.sin_port = udp->dest;
+ memcpy(&srx->transport.sin6.sin6_addr, &ip->daddr,
+ sizeof(struct in6_addr));
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+
+ _net("ICMP {%pISp}", &srx->transport);
+ return rxrpc_lookup_peer_rcu(local, srx);
+}
+#endif /* CONFIG_AF_RXRPC_IPV6 */
+
+/*
+ * Handle an error received on the local endpoint as a tunnel.
+ */
+void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb,
+ unsigned int udp_offset)
+{
+ struct sock_extended_err ee;
+ struct sockaddr_rxrpc srx;
+ struct rxrpc_local *local;
+ struct rxrpc_peer *peer;
+ unsigned int info = 0;
+ int err;
+ u8 version = ip_hdr(skb)->version;
+ u8 type = icmp_hdr(skb)->type;
+ u8 code = icmp_hdr(skb)->code;
+
+ rcu_read_lock();
+ local = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!local)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ rxrpc_new_skb(skb, rxrpc_skb_received);
+
+ switch (ip_hdr(skb)->version) {
+ case IPVERSION:
+ peer = rxrpc_lookup_peer_icmp_rcu(local, skb, udp_offset,
+ &info, &srx);
+ break;
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case 6:
+ peer = rxrpc_lookup_peer_icmp6_rcu(local, skb, udp_offset,
+ &info, &srx);
+ break;
+#endif
+ default:
+ rcu_read_unlock();
+ return;
+ }
+
+ if (peer && !rxrpc_get_peer_maybe(peer))
+ peer = NULL;
+ if (!peer) {
+ rcu_read_unlock();
+ return;
+ }
+
+ memset(&ee, 0, sizeof(ee));
+
+ switch (version) {
+ case IPVERSION:
+ switch (type) {
+ case ICMP_DEST_UNREACH:
+ switch (code) {
+ case ICMP_FRAG_NEEDED:
+ rxrpc_adjust_mtu(peer, info);
+ rcu_read_unlock();
+ rxrpc_put_peer(peer);
+ return;
+ default:
+ break;
+ }
+
+ err = EHOSTUNREACH;
+ if (code <= NR_ICMP_UNREACH) {
+ /* Might want to do something different with
+ * non-fatal errors
+ */
+ //harderr = icmp_err_convert[code].fatal;
+ err = icmp_err_convert[code].errno;
+ }
+ break;
+
+ case ICMP_TIME_EXCEEDED:
+ err = EHOSTUNREACH;
+ break;
+ default:
+ err = EPROTO;
+ break;
+ }
+
+ ee.ee_origin = SO_EE_ORIGIN_ICMP;
+ ee.ee_type = type;
+ ee.ee_code = code;
+ ee.ee_errno = err;
+ break;
+
+#ifdef CONFIG_AF_RXRPC_IPV6
+ case 6:
+ switch (type) {
+ case ICMPV6_PKT_TOOBIG:
+ rxrpc_adjust_mtu(peer, info);
+ rcu_read_unlock();
+ rxrpc_put_peer(peer);
+ return;
+ }
+
+ icmpv6_err_convert(type, code, &err);
+
+ if (err == EACCES)
+ err = EHOSTUNREACH;
+
+ ee.ee_origin = SO_EE_ORIGIN_ICMP6;
+ ee.ee_type = type;
+ ee.ee_code = code;
+ ee.ee_errno = err;
+ break;
+#endif
+ }
+
+ trace_rxrpc_rx_icmp(peer, &ee, &srx);
+
+ rxrpc_distribute_error(peer, err, RXRPC_CALL_NETWORK_ERROR);
+ rcu_read_unlock();
+ rxrpc_put_peer(peer);
+}
+
+/*
+ * Find the peer associated with a local error.
+ */
+static struct rxrpc_peer *rxrpc_lookup_peer_local_rcu(struct rxrpc_local *local,
+ const struct sk_buff *skb,
+ struct sockaddr_rxrpc *srx)
+{
+ struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
+
+ _enter("");
+
+ memset(srx, 0, sizeof(*srx));
+ srx->transport_type = local->srx.transport_type;
+ srx->transport_len = local->srx.transport_len;
+ srx->transport.family = local->srx.transport.family;
+
+ switch (srx->transport.family) {
+ case AF_INET:
srx->transport_len = sizeof(srx->transport.sin);
srx->transport.family = AF_INET;
srx->transport.sin.sin_port = serr->port;
@@ -104,10 +346,8 @@ static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
/*
* Handle an MTU/fragmentation problem.
*/
-static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
+static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, unsigned int mtu)
{
- u32 mtu = serr->ee.ee_info;
-
_net("Rx ICMP Fragmentation Needed (%d)", mtu);
/* wind down the local interface MTU */
@@ -148,7 +388,7 @@ void rxrpc_error_report(struct sock *sk)
struct sock_exterr_skb *serr;
struct sockaddr_rxrpc srx;
struct rxrpc_local *local;
- struct rxrpc_peer *peer;
+ struct rxrpc_peer *peer = NULL;
struct sk_buff *skb;
rcu_read_lock();
@@ -172,41 +412,20 @@ void rxrpc_error_report(struct sock *sk)
}
rxrpc_new_skb(skb, rxrpc_skb_received);
serr = SKB_EXT_ERR(skb);
- if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
- _leave("UDP empty message");
- rcu_read_unlock();
- rxrpc_free_skb(skb, rxrpc_skb_freed);
- return;
- }
- peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
- if (peer && !rxrpc_get_peer_maybe(peer))
- peer = NULL;
- if (!peer) {
- rcu_read_unlock();
- rxrpc_free_skb(skb, rxrpc_skb_freed);
- _leave(" [no peer]");
- return;
- }
-
- trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
-
- if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
- serr->ee.ee_type == ICMP_DEST_UNREACH &&
- serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
- rxrpc_adjust_mtu(peer, serr);
- rcu_read_unlock();
- rxrpc_free_skb(skb, rxrpc_skb_freed);
- rxrpc_put_peer(peer);
- _leave(" [MTU update]");
- return;
+ if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) {
+ peer = rxrpc_lookup_peer_local_rcu(local, skb, &srx);
+ if (peer && !rxrpc_get_peer_maybe(peer))
+ peer = NULL;
+ if (peer) {
+ trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
+ rxrpc_store_error(peer, serr);
+ }
}
- rxrpc_store_error(peer, serr);
rcu_read_unlock();
rxrpc_free_skb(skb, rxrpc_skb_freed);
rxrpc_put_peer(peer);
-
_leave("");
}
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 250f23bc1c07..7e39c262fd79 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -771,46 +771,3 @@ call_complete:
goto out;
}
EXPORT_SYMBOL(rxrpc_kernel_recv_data);
-
-/**
- * rxrpc_kernel_get_reply_time - Get timestamp on first reply packet
- * @sock: The socket that the call exists on
- * @call: The call to query
- * @_ts: Where to put the timestamp
- *
- * Retrieve the timestamp from the first DATA packet of the reply if it is
- * in the ring. Returns true if successful, false if not.
- */
-bool rxrpc_kernel_get_reply_time(struct socket *sock, struct rxrpc_call *call,
- ktime_t *_ts)
-{
- struct sk_buff *skb;
- rxrpc_seq_t hard_ack, top, seq;
- bool success = false;
-
- mutex_lock(&call->user_mutex);
-
- if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_RECV_REPLY)
- goto out;
-
- hard_ack = call->rx_hard_ack;
- if (hard_ack != 0)
- goto out;
-
- seq = hard_ack + 1;
- top = smp_load_acquire(&call->rx_top);
- if (after(seq, top))
- goto out;
-
- skb = call->rxtx_buffer[seq & RXRPC_RXTX_BUFF_MASK];
- if (!skb)
- goto out;
-
- *_ts = skb_get_ktime(skb);
- success = true;
-
-out:
- mutex_unlock(&call->user_mutex);
- return success;
-}
-EXPORT_SYMBOL(rxrpc_kernel_get_reply_time);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 258917a714c8..78fa0524156f 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -540,7 +540,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
* directly into the target buffer.
*/
sg = _sg;
- nsg = skb_shinfo(skb)->nr_frags;
+ nsg = skb_shinfo(skb)->nr_frags + 1;
if (nsg <= 4) {
nsg = 4;
} else {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 99b697ad2b98..7a8ea03f673d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1122,6 +1122,21 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
}
EXPORT_SYMBOL(dev_graft_qdisc);
+static void shutdown_scheduler_queue(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_qdisc_default)
+{
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
+ struct Qdisc *qdisc_default = _qdisc_default;
+
+ if (qdisc) {
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
+ dev_queue->qdisc_sleeping = qdisc_default;
+
+ qdisc_put(qdisc);
+ }
+}
+
static void attach_one_default_qdisc(struct net_device *dev,
struct netdev_queue *dev_queue,
void *_unused)
@@ -1169,6 +1184,7 @@ static void attach_default_qdiscs(struct net_device *dev)
if (qdisc == &noop_qdisc) {
netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
default_qdisc_ops->id, noqueue_qdisc_ops.id);
+ netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
dev->priv_flags |= IFF_NO_QUEUE;
netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
qdisc = txq->qdisc_sleeping;
@@ -1447,21 +1463,6 @@ void dev_init_scheduler(struct net_device *dev)
timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
}
-static void shutdown_scheduler_queue(struct net_device *dev,
- struct netdev_queue *dev_queue,
- void *_qdisc_default)
-{
- struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
- struct Qdisc *qdisc_default = _qdisc_default;
-
- if (qdisc) {
- rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
- dev_queue->qdisc_sleeping = qdisc_default;
-
- qdisc_put(qdisc);
- }
-}
-
void dev_shutdown(struct net_device *dev)
{
netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 3d061a13d7ed..2829455211f8 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -135,15 +135,15 @@ static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
}
}
-static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
+static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
{
u32 sfbhash;
- sfbhash = sfb_hash(skb, 0);
+ sfbhash = cb->hashes[0];
if (sfbhash)
increment_one_qlen(sfbhash, 0, q);
- sfbhash = sfb_hash(skb, 1);
+ sfbhash = cb->hashes[1];
if (sfbhash)
increment_one_qlen(sfbhash, 1, q);
}
@@ -281,8 +281,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
struct sfb_sched_data *q = qdisc_priv(sch);
+ unsigned int len = qdisc_pkt_len(skb);
struct Qdisc *child = q->qdisc;
struct tcf_proto *fl;
+ struct sfb_skb_cb cb;
int i;
u32 p_min = ~0;
u32 minqlen = ~0;
@@ -399,11 +401,12 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
enqueue:
+ memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
ret = qdisc_enqueue(skb, child, to_free);
if (likely(ret == NET_XMIT_SUCCESS)) {
- qdisc_qstats_backlog_inc(sch, skb);
+ sch->qstats.backlog += len;
sch->q.qlen++;
- increment_qlen(skb, q);
+ increment_qlen(&cb, q);
} else if (net_xmit_drop_count(ret)) {
q->stats.childdrop++;
qdisc_qstats_drop(sch);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 72102277449e..36079fdde2cb 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -356,6 +356,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
struct nlattr *tb[TCA_TBF_MAX + 1];
struct tc_tbf_qopt *qopt;
struct Qdisc *child = NULL;
+ struct Qdisc *old = NULL;
struct psched_ratecfg rate;
struct psched_ratecfg peak;
u64 max_size;
@@ -447,7 +448,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
if (child) {
qdisc_tree_flush_backlog(q->qdisc);
- qdisc_put(q->qdisc);
+ old = q->qdisc;
q->qdisc = child;
}
q->limit = qopt->limit;
@@ -467,6 +468,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
sch_tree_unlock(sch);
+ qdisc_put(old);
err = 0;
tbf_offload_change(sch);
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 79c1318af1fe..0939cc3b915a 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1855,7 +1855,6 @@ static void smc_listen_out_connected(struct smc_sock *new_smc)
{
struct sock *newsmcsk = &new_smc->sk;
- sk_refcnt_debug_inc(newsmcsk);
if (newsmcsk->sk_state == SMC_INIT)
newsmcsk->sk_state = SMC_ACTIVE;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index ff49a11f57b8..ebf56cdf17db 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -757,6 +757,7 @@ int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
lnk->lgr = lgr;
smc_lgr_hold(lgr); /* lgr_put in smcr_link_clear() */
lnk->link_idx = link_idx;
+ lnk->wr_rx_id_compl = 0;
smc_ibdev_cnt_inc(lnk);
smcr_copy_dev_info_to_link(lnk);
atomic_set(&lnk->conn_cnt, 0);
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index fe8b524ad846..285f9bd8e232 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -115,8 +115,10 @@ struct smc_link {
dma_addr_t wr_rx_dma_addr; /* DMA address of wr_rx_bufs */
dma_addr_t wr_rx_v2_dma_addr; /* DMA address of v2 rx buf*/
u64 wr_rx_id; /* seq # of last recv WR */
+ u64 wr_rx_id_compl; /* seq # of last completed WR */
u32 wr_rx_cnt; /* number of WR recv buffers */
unsigned long wr_rx_tstamp; /* jiffies when last buf rx */
+ wait_queue_head_t wr_rx_empty_wait; /* wait for RQ empty */
struct ib_reg_wr wr_reg; /* WR register memory region */
wait_queue_head_t wr_reg_wait; /* wait for wr_reg result */
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
index 26f8f240d9e8..b0678a417e09 100644
--- a/net/smc/smc_wr.c
+++ b/net/smc/smc_wr.c
@@ -454,6 +454,7 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
for (i = 0; i < num; i++) {
link = wc[i].qp->qp_context;
+ link->wr_rx_id_compl = wc[i].wr_id;
if (wc[i].status == IB_WC_SUCCESS) {
link->wr_rx_tstamp = jiffies;
smc_wr_rx_demultiplex(&wc[i]);
@@ -465,6 +466,8 @@ static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
case IB_WC_RNR_RETRY_EXC_ERR:
case IB_WC_WR_FLUSH_ERR:
smcr_link_down_cond_sched(link);
+ if (link->wr_rx_id_compl == link->wr_rx_id)
+ wake_up(&link->wr_rx_empty_wait);
break;
default:
smc_wr_rx_post(link); /* refill WR RX */
@@ -639,6 +642,7 @@ void smc_wr_free_link(struct smc_link *lnk)
return;
ibdev = lnk->smcibdev->ibdev;
+ smc_wr_drain_cq(lnk);
smc_wr_wakeup_reg_wait(lnk);
smc_wr_wakeup_tx_wait(lnk);
@@ -889,6 +893,7 @@ int smc_wr_create_link(struct smc_link *lnk)
atomic_set(&lnk->wr_tx_refcnt, 0);
init_waitqueue_head(&lnk->wr_reg_wait);
atomic_set(&lnk->wr_reg_refcnt, 0);
+ init_waitqueue_head(&lnk->wr_rx_empty_wait);
return rc;
dma_unmap:
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h
index a54e90a1110f..45e9b894d3f8 100644
--- a/net/smc/smc_wr.h
+++ b/net/smc/smc_wr.h
@@ -73,6 +73,11 @@ static inline void smc_wr_tx_link_put(struct smc_link *link)
wake_up_all(&link->wr_tx_wait);
}
+static inline void smc_wr_drain_cq(struct smc_link *lnk)
+{
+ wait_event(lnk->wr_rx_empty_wait, lnk->wr_rx_id_compl == lnk->wr_rx_id);
+}
+
static inline void smc_wr_wakeup_tx_wait(struct smc_link *lnk)
{
wake_up_all(&lnk->wr_tx_wait);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7d268a291486..c284efa3d1ef 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2873,6 +2873,9 @@ int rpc_clnt_test_and_add_xprt(struct rpc_clnt *clnt,
task = rpc_call_null_helper(clnt, xprt, NULL, RPC_TASK_ASYNC,
&rpc_cb_add_xprt_call_ops, data);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+
data->xps->xps_nunique_destaddr_xprts++;
rpc_put_task(task);
success:
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index d71eec494826..f8fae7815649 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1179,11 +1179,8 @@ xprt_request_dequeue_receive_locked(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
+ if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
xprt_request_rb_remove(req->rq_xprt, req);
- xdr_free_bvec(&req->rq_rcv_buf);
- req->rq_private_buf.bvec = NULL;
- }
}
/**
@@ -1221,6 +1218,8 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
xprt->stat.recvs++;
+ xdr_free_bvec(&req->rq_rcv_buf);
+ req->rq_private_buf.bvec = NULL;
req->rq_private_buf.len = copied;
/* Ensure all writes are done before we update */
/* req->rq_reply_bytes_recvd */
@@ -1453,6 +1452,7 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
xprt_request_dequeue_transmit_locked(task);
xprt_request_dequeue_receive_locked(task);
spin_unlock(&xprt->queue_lock);
+ xdr_free_bvec(&req->rq_rcv_buf);
}
}
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
index 2f4d23238a7e..9618e4429f0f 100644
--- a/net/tipc/monitor.c
+++ b/net/tipc/monitor.c
@@ -160,7 +160,7 @@ static void map_set(u64 *up_map, int i, unsigned int v)
static int map_get(u64 up_map, int i)
{
- return (up_map & (1 << i)) >> i;
+ return (up_map & (1ULL << i)) >> i;
}
static struct tipc_peer *peer_prev(struct tipc_peer *peer)
diff --git a/net/wireless/debugfs.c b/net/wireless/debugfs.c
index aab43469a2f0..0878b162890a 100644
--- a/net/wireless/debugfs.c
+++ b/net/wireless/debugfs.c
@@ -65,9 +65,10 @@ static ssize_t ht40allow_map_read(struct file *file,
{
struct wiphy *wiphy = file->private_data;
char *buf;
- unsigned int offset = 0, buf_size = PAGE_SIZE, i, r;
+ unsigned int offset = 0, buf_size = PAGE_SIZE, i;
enum nl80211_band band;
struct ieee80211_supported_band *sband;
+ ssize_t r;
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
diff --git a/net/wireless/lib80211_crypt_ccmp.c b/net/wireless/lib80211_crypt_ccmp.c
index 6a5f08f7491e..cca5e1cf089e 100644
--- a/net/wireless/lib80211_crypt_ccmp.c
+++ b/net/wireless/lib80211_crypt_ccmp.c
@@ -136,7 +136,7 @@ static int ccmp_init_iv_and_aad(const struct ieee80211_hdr *hdr,
pos = (u8 *) hdr;
aad[0] = pos[0] & 0x8f;
aad[1] = pos[1] & 0xc7;
- memcpy(aad + 2, hdr->addr1, 3 * ETH_ALEN);
+ memcpy(aad + 2, &hdr->addrs, 3 * ETH_ALEN);
pos = (u8 *) & hdr->seq_ctrl;
aad[20] = pos[0] & 0x0f;
aad[21] = 0; /* all bits masked */
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index f70112176b7c..a71a8c6edf55 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -379,6 +379,16 @@ static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
{
+ if (!pool->unaligned) {
+ u32 i;
+
+ for (i = 0; i < pool->heads_cnt; i++) {
+ struct xdp_buff_xsk *xskb = &pool->heads[i];
+
+ xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
+ }
+ }
+
pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
if (!pool->dma_pages)
return -ENOMEM;
@@ -428,12 +438,6 @@ int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
if (pool->unaligned)
xp_check_dma_contiguity(dma_map);
- else
- for (i = 0; i < pool->heads_cnt; i++) {
- struct xdp_buff_xsk *xskb = &pool->heads[i];
-
- xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr);
- }
err = xp_init_dma_info(pool, dma_map);
if (err) {
diff --git a/scripts/Makefile.extrawarn b/scripts/Makefile.extrawarn
index 0621c39a3955..6ae482158bc4 100644
--- a/scripts/Makefile.extrawarn
+++ b/scripts/Makefile.extrawarn
@@ -47,7 +47,19 @@ else
ifdef CONFIG_CC_IS_CLANG
KBUILD_CFLAGS += -Wno-initializer-overrides
+# Clang before clang-16 would warn on default argument promotions.
+ifeq ($(shell [ $(CONFIG_CLANG_VERSION) -lt 160000 ] && echo y),y)
+# Disable -Wformat
KBUILD_CFLAGS += -Wno-format
+# Then re-enable flags that were part of the -Wformat group that aren't
+# problematic.
+KBUILD_CFLAGS += -Wformat-extra-args -Wformat-invalid-specifier
+KBUILD_CFLAGS += -Wformat-zero-length -Wnonnull
+# Requires clang-12+.
+ifeq ($(shell [ $(CONFIG_CLANG_VERSION) -ge 120000 ] && echo y),y)
+KBUILD_CFLAGS += -Wformat-insufficient-args
+endif
+endif
KBUILD_CFLAGS += -Wno-sign-compare
KBUILD_CFLAGS += $(call cc-disable-warning, pointer-to-enum-cast)
KBUILD_CFLAGS += -Wno-tautological-constant-out-of-range-compare
diff --git a/scripts/extract-ikconfig b/scripts/extract-ikconfig
index 3b42f255e2ba..8df33e7d6daa 100755
--- a/scripts/extract-ikconfig
+++ b/scripts/extract-ikconfig
@@ -62,6 +62,7 @@ try_decompress 'BZh' xy bunzip2
try_decompress '\135\0\0\0' xxx unlzma
try_decompress '\211\114\132' xy 'lzop -d'
try_decompress '\002\041\114\030' xyy 'lz4 -d -l'
+try_decompress '\050\265\057\375' xxx unzstd
# Bail out:
echo "$me: Cannot find kernel config." >&2
diff --git a/scripts/gcc-ld b/scripts/gcc-ld
deleted file mode 100755
index 997b818c3962..000000000000
--- a/scripts/gcc-ld
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-# SPDX-License-Identifier: GPL-2.0
-# run gcc with ld options
-# used as a wrapper to execute link time optimizations
-# yes virginia, this is not pretty
-
-ARGS="-nostdlib"
-
-while [ "$1" != "" ] ; do
- case "$1" in
- -save-temps|-m32|-m64) N="$1" ;;
- -r) N="$1" ;;
- -[Wg]*) N="$1" ;;
- -[olv]|-[Ofd]*|-nostdlib) N="$1" ;;
- --end-group|--start-group)
- N="-Wl,$1" ;;
- -[RTFGhIezcbyYu]*|\
---script|--defsym|-init|-Map|--oformat|-rpath|\
--rpath-link|--sort-section|--section-start|-Tbss|-Tdata|-Ttext|\
---version-script|--dynamic-list|--version-exports-symbol|--wrap|-m)
- A="$1" ; shift ; N="-Wl,$A,$1" ;;
- -[m]*) N="$1" ;;
- -*) N="-Wl,$1" ;;
- *) N="$1" ;;
- esac
- ARGS="$ARGS $N"
- shift
-done
-
-exec $CC $ARGS
diff --git a/scripts/mksysmap b/scripts/mksysmap
index 9aa23d15862a..ad8bbc52267d 100755
--- a/scripts/mksysmap
+++ b/scripts/mksysmap
@@ -41,4 +41,4 @@
# so we just ignore them to let readprofile continue to work.
# (At least sparc64 has __crc_ in the middle).
-$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)' > $2
+$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\( \.L\)\|\( L0\)' > $2
diff --git a/security/landlock/fs.c b/security/landlock/fs.c
index ec5a6247cd3e..a9dbd99d9ee7 100644
--- a/security/landlock/fs.c
+++ b/security/landlock/fs.c
@@ -150,6 +150,16 @@ retry:
/* clang-format on */
/*
+ * All access rights that are denied by default whether they are handled or not
+ * by a ruleset/layer. This must be ORed with all ruleset->fs_access_masks[]
+ * entries when we need to get the absolute handled access masks.
+ */
+/* clang-format off */
+#define ACCESS_INITIALLY_DENIED ( \
+ LANDLOCK_ACCESS_FS_REFER)
+/* clang-format on */
+
+/*
* @path: Should have been checked by get_path_from_fd().
*/
int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
@@ -167,7 +177,9 @@ int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
return -EINVAL;
/* Transforms relative access rights to absolute ones. */
- access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->fs_access_masks[0];
+ access_rights |=
+ LANDLOCK_MASK_ACCESS_FS &
+ ~(ruleset->fs_access_masks[0] | ACCESS_INITIALLY_DENIED);
object = get_inode_object(d_backing_inode(path->dentry));
if (IS_ERR(object))
return PTR_ERR(object);
@@ -277,23 +289,12 @@ static inline bool is_nouser_or_private(const struct dentry *dentry)
static inline access_mask_t
get_handled_accesses(const struct landlock_ruleset *const domain)
{
- access_mask_t access_dom = 0;
- unsigned long access_bit;
-
- for (access_bit = 0; access_bit < LANDLOCK_NUM_ACCESS_FS;
- access_bit++) {
- size_t layer_level;
+ access_mask_t access_dom = ACCESS_INITIALLY_DENIED;
+ size_t layer_level;
- for (layer_level = 0; layer_level < domain->num_layers;
- layer_level++) {
- if (domain->fs_access_masks[layer_level] &
- BIT_ULL(access_bit)) {
- access_dom |= BIT_ULL(access_bit);
- break;
- }
- }
- }
- return access_dom;
+ for (layer_level = 0; layer_level < domain->num_layers; layer_level++)
+ access_dom |= domain->fs_access_masks[layer_level];
+ return access_dom & LANDLOCK_MASK_ACCESS_FS;
}
static inline access_mask_t
@@ -316,8 +317,13 @@ init_layer_masks(const struct landlock_ruleset *const domain,
for_each_set_bit(access_bit, &access_req,
ARRAY_SIZE(*layer_masks)) {
- if (domain->fs_access_masks[layer_level] &
- BIT_ULL(access_bit)) {
+ /*
+ * Artificially handles all initially denied by default
+ * access rights.
+ */
+ if (BIT_ULL(access_bit) &
+ (domain->fs_access_masks[layer_level] |
+ ACCESS_INITIALLY_DENIED)) {
(*layer_masks)[access_bit] |=
BIT_ULL(layer_level);
handled_accesses |= BIT_ULL(access_bit);
@@ -857,10 +863,6 @@ static int current_check_refer_path(struct dentry *const old_dentry,
NULL, NULL);
}
- /* Backward compatibility: no reparenting support. */
- if (!(get_handled_accesses(dom) & LANDLOCK_ACCESS_FS_REFER))
- return -EXDEV;
-
access_request_parent1 |= LANDLOCK_ACCESS_FS_REFER;
access_request_parent2 |= LANDLOCK_ACCESS_FS_REFER;
diff --git a/security/security.c b/security/security.c
index 14d30fec8a00..4b95de24bc8d 100644
--- a/security/security.c
+++ b/security/security.c
@@ -2660,4 +2660,8 @@ int security_uring_sqpoll(void)
{
return call_int_hook(uring_sqpoll, 0);
}
+int security_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ return call_int_hook(uring_cmd, 0, ioucmd);
+}
#endif /* CONFIG_IO_URING */
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 79573504783b..03bca97c8b29 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -91,6 +91,7 @@
#include <uapi/linux/mount.h>
#include <linux/fsnotify.h>
#include <linux/fanotify.h>
+#include <linux/io_uring.h>
#include "avc.h"
#include "objsec.h"
@@ -6987,6 +6988,28 @@ static int selinux_uring_sqpoll(void)
return avc_has_perm(&selinux_state, sid, sid,
SECCLASS_IO_URING, IO_URING__SQPOLL, NULL);
}
+
+/**
+ * selinux_uring_cmd - check if IORING_OP_URING_CMD is allowed
+ * @ioucmd: the io_uring command structure
+ *
+ * Check to see if the current domain is allowed to execute an
+ * IORING_OP_URING_CMD against the device/file specified in @ioucmd.
+ *
+ */
+static int selinux_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ struct file *file = ioucmd->file;
+ struct inode *inode = file_inode(file);
+ struct inode_security_struct *isec = selinux_inode(inode);
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_FILE;
+ ad.u.file = file;
+
+ return avc_has_perm(&selinux_state, current_sid(), isec->sid,
+ SECCLASS_IO_URING, IO_URING__CMD, &ad);
+}
#endif /* CONFIG_IO_URING */
/*
@@ -7231,6 +7254,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = {
#ifdef CONFIG_IO_URING
LSM_HOOK_INIT(uring_override_creds, selinux_uring_override_creds),
LSM_HOOK_INIT(uring_sqpoll, selinux_uring_sqpoll),
+ LSM_HOOK_INIT(uring_cmd, selinux_uring_cmd),
#endif
/*
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index ff757ae5f253..1c2f41ff4e55 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -253,7 +253,7 @@ const struct security_class_mapping secclass_map[] = {
{ "anon_inode",
{ COMMON_FILE_PERMS, NULL } },
{ "io_uring",
- { "override_creds", "sqpoll", NULL } },
+ { "override_creds", "sqpoll", "cmd", NULL } },
{ NULL }
};
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 001831458fa2..bffccdc494cb 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -42,6 +42,7 @@
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/watch_queue.h>
+#include <linux/io_uring.h>
#include "smack.h"
#define TRANS_TRUE "TRUE"
@@ -4732,6 +4733,36 @@ static int smack_uring_sqpoll(void)
return -EPERM;
}
+/**
+ * smack_uring_cmd - check on file operations for io_uring
+ * @ioucmd: the command in question
+ *
+ * Make a best guess about whether a io_uring "command" should
+ * be allowed. Use the same logic used for determining if the
+ * file could be opened for read in the absence of better criteria.
+ */
+static int smack_uring_cmd(struct io_uring_cmd *ioucmd)
+{
+ struct file *file = ioucmd->file;
+ struct smk_audit_info ad;
+ struct task_smack *tsp;
+ struct inode *inode;
+ int rc;
+
+ if (!file)
+ return -EINVAL;
+
+ tsp = smack_cred(file->f_cred);
+ inode = file_inode(file);
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_tskacc(tsp, smk_of_inode(inode), MAY_READ, &ad);
+ rc = smk_bu_credfile(file->f_cred, file, MAY_READ, rc);
+
+ return rc;
+}
+
#endif /* CONFIG_IO_URING */
struct lsm_blob_sizes smack_blob_sizes __lsm_ro_after_init = {
@@ -4889,6 +4920,7 @@ static struct security_hook_list smack_hooks[] __lsm_ro_after_init = {
#ifdef CONFIG_IO_URING
LSM_HOOK_INIT(uring_override_creds, smack_uring_override_creds),
LSM_HOOK_INIT(uring_sqpoll, smack_uring_sqpoll),
+ LSM_HOOK_INIT(uring_cmd, smack_uring_cmd),
#endif
};
diff --git a/sound/core/control.c b/sound/core/control.c
index f3e893715369..a7271927d875 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -385,14 +385,14 @@ static bool elem_id_matches(const struct snd_kcontrol *kctl,
#define MULTIPLIER 37
static unsigned long get_ctl_id_hash(const struct snd_ctl_elem_id *id)
{
+ int i;
unsigned long h;
- const unsigned char *p;
h = id->iface;
h = MULTIPLIER * h + id->device;
h = MULTIPLIER * h + id->subdevice;
- for (p = id->name; *p; p++)
- h = MULTIPLIER * h + *p;
+ for (i = 0; i < SNDRV_CTL_ELEM_ID_NAME_MAXLEN && id->name[i]; i++)
+ h = MULTIPLIER * h + id->name[i];
h = MULTIPLIER * h + id->index;
h &= LONG_MAX;
return h;
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index d3885cb02270..cfcd8eff4139 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -20,6 +20,13 @@
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab);
+#ifdef CONFIG_SND_DMA_SGBUF
+static void *do_alloc_fallback_pages(struct device *dev, size_t size,
+ dma_addr_t *addr, bool wc);
+static void do_free_fallback_pages(void *p, size_t size, bool wc);
+static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
+#endif
+
/* a cast to gfp flag from the dev pointer; for CONTINUOUS and VMALLOC types */
static inline gfp_t snd_mem_get_gfp_flags(const struct snd_dma_buffer *dmab,
gfp_t default_gfp)
@@ -277,16 +284,21 @@ EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
/*
* Continuous pages allocator
*/
-static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
+static void *do_alloc_pages(size_t size, dma_addr_t *addr, gfp_t gfp)
{
- gfp_t gfp = snd_mem_get_gfp_flags(dmab, GFP_KERNEL);
void *p = alloc_pages_exact(size, gfp);
if (p)
- dmab->addr = page_to_phys(virt_to_page(p));
+ *addr = page_to_phys(virt_to_page(p));
return p;
}
+static void *snd_dma_continuous_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ return do_alloc_pages(size, &dmab->addr,
+ snd_mem_get_gfp_flags(dmab, GFP_KERNEL));
+}
+
static void snd_dma_continuous_free(struct snd_dma_buffer *dmab)
{
free_pages_exact(dmab->area, dmab->bytes);
@@ -463,6 +475,25 @@ static const struct snd_malloc_ops snd_dma_dev_ops = {
/*
* Write-combined pages
*/
+/* x86-specific allocations */
+#ifdef CONFIG_SND_DMA_SGBUF
+static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
+{
+ return do_alloc_fallback_pages(dmab->dev.dev, size, &dmab->addr, true);
+}
+
+static void snd_dma_wc_free(struct snd_dma_buffer *dmab)
+{
+ do_free_fallback_pages(dmab->area, dmab->bytes, true);
+}
+
+static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
+ struct vm_area_struct *area)
+{
+ area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
+ return snd_dma_continuous_mmap(dmab, area);
+}
+#else
static void *snd_dma_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
{
return dma_alloc_wc(dmab->dev.dev, size, &dmab->addr, DEFAULT_GFP);
@@ -479,6 +510,7 @@ static int snd_dma_wc_mmap(struct snd_dma_buffer *dmab,
return dma_mmap_wc(dmab->dev.dev, area,
dmab->area, dmab->addr, dmab->bytes);
}
+#endif /* CONFIG_SND_DMA_SGBUF */
static const struct snd_malloc_ops snd_dma_wc_ops = {
.alloc = snd_dma_wc_alloc,
@@ -486,10 +518,6 @@ static const struct snd_malloc_ops snd_dma_wc_ops = {
.mmap = snd_dma_wc_mmap,
};
-#ifdef CONFIG_SND_DMA_SGBUF
-static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size);
-#endif
-
/*
* Non-contiguous pages allocator
*/
@@ -515,10 +543,13 @@ static void *snd_dma_noncontig_alloc(struct snd_dma_buffer *dmab, size_t size)
dmab->dev.need_sync = dma_need_sync(dmab->dev.dev,
sg_dma_address(sgt->sgl));
p = dma_vmap_noncontiguous(dmab->dev.dev, size, sgt);
- if (p)
+ if (p) {
dmab->private_data = sgt;
- else
+ /* store the first page address for convenience */
+ dmab->addr = snd_sgbuf_get_addr(dmab, 0);
+ } else {
dma_free_noncontiguous(dmab->dev.dev, size, sgt, dmab->dev.dir);
+ }
return p;
}
@@ -669,6 +700,37 @@ static const struct snd_malloc_ops snd_dma_sg_wc_ops = {
.get_chunk_size = snd_dma_noncontig_get_chunk_size,
};
+/* manual page allocations with wc setup */
+static void *do_alloc_fallback_pages(struct device *dev, size_t size,
+ dma_addr_t *addr, bool wc)
+{
+ gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
+ void *p;
+
+ again:
+ p = do_alloc_pages(size, addr, gfp);
+ if (!p || (*addr + size - 1) & ~dev->coherent_dma_mask) {
+ if (IS_ENABLED(CONFIG_ZONE_DMA32) && !(gfp & GFP_DMA32)) {
+ gfp |= GFP_DMA32;
+ goto again;
+ }
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && !(gfp & GFP_DMA)) {
+ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
+ goto again;
+ }
+ }
+ if (p && wc)
+ set_memory_wc((unsigned long)(p), size >> PAGE_SHIFT);
+ return p;
+}
+
+static void do_free_fallback_pages(void *p, size_t size, bool wc)
+{
+ if (wc)
+ set_memory_wb((unsigned long)(p), size >> PAGE_SHIFT);
+ free_pages_exact(p, size);
+}
+
/* Fallback SG-buffer allocations for x86 */
struct snd_dma_sg_fallback {
size_t count;
@@ -679,14 +741,11 @@ struct snd_dma_sg_fallback {
static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
struct snd_dma_sg_fallback *sgbuf)
{
+ bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
size_t i;
- if (sgbuf->count && dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
- set_pages_array_wb(sgbuf->pages, sgbuf->count);
for (i = 0; i < sgbuf->count && sgbuf->pages[i]; i++)
- dma_free_coherent(dmab->dev.dev, PAGE_SIZE,
- page_address(sgbuf->pages[i]),
- sgbuf->addrs[i]);
+ do_free_fallback_pages(page_address(sgbuf->pages[i]), PAGE_SIZE, wc);
kvfree(sgbuf->pages);
kvfree(sgbuf->addrs);
kfree(sgbuf);
@@ -698,6 +757,7 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
struct page **pages;
size_t i, count;
void *p;
+ bool wc = dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK;
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
if (!sgbuf)
@@ -712,19 +772,19 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
goto error;
for (i = 0; i < count; sgbuf->count++, i++) {
- p = dma_alloc_coherent(dmab->dev.dev, PAGE_SIZE,
- &sgbuf->addrs[i], DEFAULT_GFP);
+ p = do_alloc_fallback_pages(dmab->dev.dev, PAGE_SIZE,
+ &sgbuf->addrs[i], wc);
if (!p)
goto error;
sgbuf->pages[i] = virt_to_page(p);
}
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG_FALLBACK)
- set_pages_array_wc(pages, count);
p = vmap(pages, count, VM_MAP, PAGE_KERNEL);
if (!p)
goto error;
dmab->private_data = sgbuf;
+ /* store the first page address for convenience */
+ dmab->addr = snd_sgbuf_get_addr(dmab, 0);
return p;
error:
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 90c3a367d7de..02df915eb3c6 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1672,14 +1672,14 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
runtime = substream->runtime;
if (atomic_read(&substream->mmap_count))
goto __direct;
- err = snd_pcm_oss_make_ready(substream);
- if (err < 0)
- return err;
atomic_inc(&runtime->oss.rw_ref);
if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
atomic_dec(&runtime->oss.rw_ref);
return -ERESTARTSYS;
}
+ err = snd_pcm_oss_make_ready_locked(substream);
+ if (err < 0)
+ goto unlock;
format = snd_pcm_oss_format_from(runtime->oss.format);
width = snd_pcm_format_physical_width(format);
if (runtime->oss.buffer_used > 0) {
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index 1e3bf086f867..07efb38f58ac 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -270,7 +270,9 @@ snd_seq_oss_midi_clear_all(void)
void
snd_seq_oss_midi_setup(struct seq_oss_devinfo *dp)
{
+ spin_lock_irq(&register_lock);
dp->max_mididev = max_midi_devs;
+ spin_unlock_irq(&register_lock);
}
/*
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 2e9d695d336c..2d707afa1ef1 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -121,13 +121,13 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
spin_unlock_irqrestore(&clients_lock, flags);
#ifdef CONFIG_MODULES
if (!in_interrupt()) {
- static char client_requested[SNDRV_SEQ_GLOBAL_CLIENTS];
- static char card_requested[SNDRV_CARDS];
+ static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS);
+ static DECLARE_BITMAP(card_requested, SNDRV_CARDS);
+
if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
int idx;
- if (!client_requested[clientid]) {
- client_requested[clientid] = 1;
+ if (!test_and_set_bit(clientid, client_requested)) {
for (idx = 0; idx < 15; idx++) {
if (seq_client_load[idx] < 0)
break;
@@ -142,10 +142,8 @@ struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
SNDRV_SEQ_CLIENTS_PER_CARD;
if (card < snd_ecards_limit) {
- if (! card_requested[card]) {
- card_requested[card] = 1;
+ if (!test_and_set_bit(card, card_requested))
snd_request_card(card);
- }
snd_seq_device_load_drivers();
}
}
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 9b4a7cdb103a..12f12a294df5 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -605,17 +605,18 @@ static unsigned int loopback_jiffies_timer_pos_update
cable->streams[SNDRV_PCM_STREAM_PLAYBACK];
struct loopback_pcm *dpcm_capt =
cable->streams[SNDRV_PCM_STREAM_CAPTURE];
- unsigned long delta_play = 0, delta_capt = 0;
+ unsigned long delta_play = 0, delta_capt = 0, cur_jiffies;
unsigned int running, count1, count2;
+ cur_jiffies = jiffies;
running = cable->running ^ cable->pause;
if (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) {
- delta_play = jiffies - dpcm_play->last_jiffies;
+ delta_play = cur_jiffies - dpcm_play->last_jiffies;
dpcm_play->last_jiffies += delta_play;
}
if (running & (1 << SNDRV_PCM_STREAM_CAPTURE)) {
- delta_capt = jiffies - dpcm_capt->last_jiffies;
+ delta_capt = cur_jiffies - dpcm_capt->last_jiffies;
dpcm_capt->last_jiffies += delta_capt;
}
diff --git a/sound/hda/intel-nhlt.c b/sound/hda/intel-nhlt.c
index 9db5ccd9aa2d..13bb0ccfb36c 100644
--- a/sound/hda/intel-nhlt.c
+++ b/sound/hda/intel-nhlt.c
@@ -55,16 +55,22 @@ int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt)
/* find max number of channels based on format_configuration */
if (fmt_configs->fmt_count) {
+ struct nhlt_fmt_cfg *fmt_cfg = fmt_configs->fmt_config;
+
dev_dbg(dev, "found %d format definitions\n",
fmt_configs->fmt_count);
for (i = 0; i < fmt_configs->fmt_count; i++) {
struct wav_fmt_ext *fmt_ext;
- fmt_ext = &fmt_configs->fmt_config[i].fmt_ext;
+ fmt_ext = &fmt_cfg->fmt_ext;
if (fmt_ext->fmt.channels > max_ch)
max_ch = fmt_ext->fmt.channels;
+
+ /* Move to the next nhlt_fmt_cfg */
+ fmt_cfg = (struct nhlt_fmt_cfg *)(fmt_cfg->config.caps +
+ fmt_cfg->config.size);
}
dev_dbg(dev, "max channels found %d\n", max_ch);
} else {
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index b2701a4452d8..48af77ae8020 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -124,7 +124,7 @@ static int snd_emu10k1_pcm_channel_alloc(struct snd_emu10k1_pcm * epcm, int voic
epcm->voices[0]->epcm = epcm;
if (voices > 1) {
for (i = 1; i < voices; i++) {
- epcm->voices[i] = &epcm->emu->voices[epcm->voices[0]->number + i];
+ epcm->voices[i] = &epcm->emu->voices[(epcm->voices[0]->number + i) % NUM_G];
epcm->voices[i]->epcm = epcm;
}
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index a77165bd92a9..b20694fd69de 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1817,7 +1817,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
/* use the non-cached pages in non-snoop mode */
if (!azx_snoop(chip))
- azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_WC;
+ azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_WC_SG;
if (chip->driver_type == AZX_DRIVER_NVIDIA) {
dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 7debb2c76aa6..976a112c7d00 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -474,7 +474,8 @@ MODULE_DEVICE_TABLE(of, hda_tegra_match);
static int hda_tegra_probe(struct platform_device *pdev)
{
const unsigned int driver_flags = AZX_DCAPS_CORBRP_SELF_CLEAR |
- AZX_DCAPS_PM_RUNTIME;
+ AZX_DCAPS_PM_RUNTIME |
+ AZX_DCAPS_4K_BDLE_BOUNDARY;
struct snd_card *card;
struct azx *chip;
struct hda_tegra *hda;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 47e72cf76608..38930cf5aace 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4700,6 +4700,48 @@ static void alc236_fixup_hp_mute_led_micmute_vref(struct hda_codec *codec,
alc236_fixup_hp_micmute_led_vref(codec, fix, action);
}
+static inline void alc298_samsung_write_coef_pack(struct hda_codec *codec,
+ const unsigned short coefs[2])
+{
+ alc_write_coef_idx(codec, 0x23, coefs[0]);
+ alc_write_coef_idx(codec, 0x25, coefs[1]);
+ alc_write_coef_idx(codec, 0x26, 0xb011);
+}
+
+struct alc298_samsung_amp_desc {
+ unsigned char nid;
+ unsigned short init_seq[2][2];
+};
+
+static void alc298_fixup_samsung_amp(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ int i, j;
+ static const unsigned short init_seq[][2] = {
+ { 0x19, 0x00 }, { 0x20, 0xc0 }, { 0x22, 0x44 }, { 0x23, 0x08 },
+ { 0x24, 0x85 }, { 0x25, 0x41 }, { 0x35, 0x40 }, { 0x36, 0x01 },
+ { 0x38, 0x81 }, { 0x3a, 0x03 }, { 0x3b, 0x81 }, { 0x40, 0x3e },
+ { 0x41, 0x07 }, { 0x400, 0x1 }
+ };
+ static const struct alc298_samsung_amp_desc amps[] = {
+ { 0x3a, { { 0x18, 0x1 }, { 0x26, 0x0 } } },
+ { 0x39, { { 0x18, 0x2 }, { 0x26, 0x1 } } }
+ };
+
+ if (action != HDA_FIXUP_ACT_INIT)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(amps); i++) {
+ alc_write_coef_idx(codec, 0x22, amps[i].nid);
+
+ for (j = 0; j < ARRAY_SIZE(amps[i].init_seq); j++)
+ alc298_samsung_write_coef_pack(codec, amps[i].init_seq[j]);
+
+ for (j = 0; j < ARRAY_SIZE(init_seq); j++)
+ alc298_samsung_write_coef_pack(codec, init_seq[j]);
+ }
+}
+
#if IS_REACHABLE(CONFIG_INPUT)
static void gpio2_mic_hotkey_event(struct hda_codec *codec,
struct hda_jack_callback *event)
@@ -7030,6 +7072,7 @@ enum {
ALC236_FIXUP_HP_GPIO_LED,
ALC236_FIXUP_HP_MUTE_LED,
ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
+ ALC298_FIXUP_SAMSUNG_AMP,
ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET,
ALC295_FIXUP_ASUS_MIC_NO_PRESENCE,
@@ -8396,6 +8439,12 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc236_fixup_hp_mute_led_micmute_vref,
},
+ [ALC298_FIXUP_SAMSUNG_AMP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc298_fixup_samsung_amp,
+ .chained = true,
+ .chain_id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET
+ },
[ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -9342,13 +9391,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
- SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
- SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
- SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
- SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
- SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
- SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
+ SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_AMP),
+ SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP),
SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
@@ -9716,7 +9765,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
{.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
{.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
- {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
+ {.id = ALC298_FIXUP_SAMSUNG_AMP, .name = "alc298-samsung-amp"},
{.id = ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc256-samsung-headphone"},
{.id = ALC255_FIXUP_XIAOMI_HEADSET_MIC, .name = "alc255-xiaomi-headset"},
{.id = ALC274_FIXUP_HP_MIC, .name = "alc274-hp-mic-detect"},
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 61df4d33c48f..7f340f18599c 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -209,6 +209,7 @@ struct sigmatel_spec {
/* beep widgets */
hda_nid_t anabeep_nid;
+ bool beep_power_on;
/* SPDIF-out mux */
const char * const *spdif_labels;
@@ -4443,6 +4444,28 @@ static int stac_suspend(struct hda_codec *codec)
return 0;
}
+
+static int stac_check_power_status(struct hda_codec *codec, hda_nid_t nid)
+{
+#ifdef CONFIG_SND_HDA_INPUT_BEEP
+ struct sigmatel_spec *spec = codec->spec;
+#endif
+ int ret = snd_hda_gen_check_power_status(codec, nid);
+
+#ifdef CONFIG_SND_HDA_INPUT_BEEP
+ if (nid == spec->gen.beep_nid && codec->beep) {
+ if (codec->beep->enabled != spec->beep_power_on) {
+ spec->beep_power_on = codec->beep->enabled;
+ if (spec->beep_power_on)
+ snd_hda_power_up_pm(codec);
+ else
+ snd_hda_power_down_pm(codec);
+ }
+ ret |= spec->beep_power_on;
+ }
+#endif
+ return ret;
+}
#else
#define stac_suspend NULL
#endif /* CONFIG_PM */
@@ -4455,6 +4478,7 @@ static const struct hda_codec_ops stac_patch_ops = {
.unsol_event = snd_hda_jack_unsol_event,
#ifdef CONFIG_PM
.suspend = stac_suspend,
+ .check_power_status = stac_check_power_status,
#endif
};
diff --git a/sound/soc/atmel/mchp-spdiftx.c b/sound/soc/atmel/mchp-spdiftx.c
index 4850a177803d..ab2d7a791f39 100644
--- a/sound/soc/atmel/mchp-spdiftx.c
+++ b/sound/soc/atmel/mchp-spdiftx.c
@@ -196,7 +196,7 @@ struct mchp_spdiftx_dev {
struct clk *pclk;
struct clk *gclk;
unsigned int fmt;
- int gclk_enabled:1;
+ unsigned int gclk_enabled:1;
};
static inline int mchp_spdiftx_is_running(struct mchp_spdiftx_dev *dev)
diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
index d545a593a251..daafd4251ce6 100644
--- a/sound/soc/codecs/cs42l42.c
+++ b/sound/soc/codecs/cs42l42.c
@@ -1617,7 +1617,6 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
unsigned int current_plug_status;
unsigned int current_button_status;
unsigned int i;
- int report = 0;
mutex_lock(&cs42l42->irq_lock);
if (cs42l42->suspended) {
@@ -1711,13 +1710,15 @@ static irqreturn_t cs42l42_irq_thread(int irq, void *data)
if (current_button_status & CS42L42_M_DETECT_TF_MASK) {
dev_dbg(cs42l42->dev, "Button released\n");
- report = 0;
+ snd_soc_jack_report(cs42l42->jack, 0,
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
} else if (current_button_status & CS42L42_M_DETECT_FT_MASK) {
- report = cs42l42_handle_button_press(cs42l42);
-
+ snd_soc_jack_report(cs42l42->jack,
+ cs42l42_handle_button_press(cs42l42),
+ SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3);
}
- snd_soc_jack_report(cs42l42->jack, report, SND_JACK_BTN_0 | SND_JACK_BTN_1 |
- SND_JACK_BTN_2 | SND_JACK_BTN_3);
}
}
diff --git a/sound/soc/codecs/nau8540.c b/sound/soc/codecs/nau8540.c
index 58f70a02f18a..0626d5694c22 100644
--- a/sound/soc/codecs/nau8540.c
+++ b/sound/soc/codecs/nau8540.c
@@ -357,17 +357,32 @@ static const struct snd_soc_dapm_route nau8540_dapm_routes[] = {
{"AIFTX", NULL, "Digital CH4 Mux"},
};
-static int nau8540_clock_check(struct nau8540 *nau8540, int rate, int osr)
+static const struct nau8540_osr_attr *
+nau8540_get_osr(struct nau8540 *nau8540)
{
+ unsigned int osr;
+
+ regmap_read(nau8540->regmap, NAU8540_REG_ADC_SAMPLE_RATE, &osr);
+ osr &= NAU8540_ADC_OSR_MASK;
if (osr >= ARRAY_SIZE(osr_adc_sel))
- return -EINVAL;
+ return NULL;
+ return &osr_adc_sel[osr];
+}
+
+static int nau8540_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8540 *nau8540 = snd_soc_component_get_drvdata(component);
+ const struct nau8540_osr_attr *osr;
- if (rate * osr > CLK_ADC_MAX) {
- dev_err(nau8540->dev, "exceed the maximum frequency of CLK_ADC\n");
+ osr = nau8540_get_osr(nau8540);
+ if (!osr || !osr->osr)
return -EINVAL;
- }
- return 0;
+ return snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_RATE,
+ 0, CLK_ADC_MAX / osr->osr);
}
static int nau8540_hw_params(struct snd_pcm_substream *substream,
@@ -375,7 +390,8 @@ static int nau8540_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_component *component = dai->component;
struct nau8540 *nau8540 = snd_soc_component_get_drvdata(component);
- unsigned int val_len = 0, osr;
+ unsigned int val_len = 0;
+ const struct nau8540_osr_attr *osr;
/* CLK_ADC = OSR * FS
* ADC clock frequency is defined as Over Sampling Rate (OSR)
@@ -383,13 +399,14 @@ static int nau8540_hw_params(struct snd_pcm_substream *substream,
* values must be selected such that the maximum frequency is less
* than 6.144 MHz.
*/
- regmap_read(nau8540->regmap, NAU8540_REG_ADC_SAMPLE_RATE, &osr);
- osr &= NAU8540_ADC_OSR_MASK;
- if (nau8540_clock_check(nau8540, params_rate(params), osr))
+ osr = nau8540_get_osr(nau8540);
+ if (!osr || !osr->osr)
+ return -EINVAL;
+ if (params_rate(params) * osr->osr > CLK_ADC_MAX)
return -EINVAL;
regmap_update_bits(nau8540->regmap, NAU8540_REG_CLOCK_SRC,
NAU8540_CLK_ADC_SRC_MASK,
- osr_adc_sel[osr].clk_src << NAU8540_CLK_ADC_SRC_SFT);
+ osr->clk_src << NAU8540_CLK_ADC_SRC_SFT);
switch (params_width(params)) {
case 16:
@@ -515,6 +532,7 @@ static int nau8540_set_tdm_slot(struct snd_soc_dai *dai,
static const struct snd_soc_dai_ops nau8540_dai_ops = {
+ .startup = nau8540_dai_startup,
.hw_params = nau8540_hw_params,
.set_fmt = nau8540_set_fmt,
.set_tdm_slot = nau8540_set_tdm_slot,
diff --git a/sound/soc/codecs/nau8821.c b/sound/soc/codecs/nau8821.c
index 2d21339932e6..4a72b94e8410 100644
--- a/sound/soc/codecs/nau8821.c
+++ b/sound/soc/codecs/nau8821.c
@@ -670,28 +670,40 @@ static const struct snd_soc_dapm_route nau8821_dapm_routes[] = {
{"HPOR", NULL, "Class G"},
};
-static int nau8821_clock_check(struct nau8821 *nau8821,
- int stream, int rate, int osr)
+static const struct nau8821_osr_attr *
+nau8821_get_osr(struct nau8821 *nau8821, int stream)
{
- int osrate = 0;
+ unsigned int osr;
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_read(nau8821->regmap, NAU8821_R2C_DAC_CTRL1, &osr);
+ osr &= NAU8821_DAC_OVERSAMPLE_MASK;
if (osr >= ARRAY_SIZE(osr_dac_sel))
- return -EINVAL;
- osrate = osr_dac_sel[osr].osr;
+ return NULL;
+ return &osr_dac_sel[osr];
} else {
+ regmap_read(nau8821->regmap, NAU8821_R2B_ADC_RATE, &osr);
+ osr &= NAU8821_ADC_SYNC_DOWN_MASK;
if (osr >= ARRAY_SIZE(osr_adc_sel))
- return -EINVAL;
- osrate = osr_adc_sel[osr].osr;
+ return NULL;
+ return &osr_adc_sel[osr];
}
+}
- if (!osrate || rate * osrate > CLK_DA_AD_MAX) {
- dev_err(nau8821->dev,
- "exceed the maximum frequency of CLK_ADC or CLK_DAC");
+static int nau8821_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8821 *nau8821 = snd_soc_component_get_drvdata(component);
+ const struct nau8821_osr_attr *osr;
+
+ osr = nau8821_get_osr(nau8821, substream->stream);
+ if (!osr || !osr->osr)
return -EINVAL;
- }
- return 0;
+ return snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_RATE,
+ 0, CLK_DA_AD_MAX / osr->osr);
}
static int nau8821_hw_params(struct snd_pcm_substream *substream,
@@ -699,7 +711,8 @@ static int nau8821_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_component *component = dai->component;
struct nau8821 *nau8821 = snd_soc_component_get_drvdata(component);
- unsigned int val_len = 0, osr, ctrl_val, bclk_fs, clk_div;
+ unsigned int val_len = 0, ctrl_val, bclk_fs, clk_div;
+ const struct nau8821_osr_attr *osr;
nau8821->fs = params_rate(params);
/* CLK_DAC or CLK_ADC = OSR * FS
@@ -708,27 +721,19 @@ static int nau8821_hw_params(struct snd_pcm_substream *substream,
* values must be selected such that the maximum frequency is less
* than 6.144 MHz.
*/
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- regmap_read(nau8821->regmap, NAU8821_R2C_DAC_CTRL1, &osr);
- osr &= NAU8821_DAC_OVERSAMPLE_MASK;
- if (nau8821_clock_check(nau8821, substream->stream,
- nau8821->fs, osr)) {
- return -EINVAL;
- }
+ osr = nau8821_get_osr(nau8821, substream->stream);
+ if (!osr || !osr->osr)
+ return -EINVAL;
+ if (nau8821->fs * osr->osr > CLK_DA_AD_MAX)
+ return -EINVAL;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
regmap_update_bits(nau8821->regmap, NAU8821_R03_CLK_DIVIDER,
NAU8821_CLK_DAC_SRC_MASK,
- osr_dac_sel[osr].clk_src << NAU8821_CLK_DAC_SRC_SFT);
- } else {
- regmap_read(nau8821->regmap, NAU8821_R2B_ADC_RATE, &osr);
- osr &= NAU8821_ADC_SYNC_DOWN_MASK;
- if (nau8821_clock_check(nau8821, substream->stream,
- nau8821->fs, osr)) {
- return -EINVAL;
- }
+ osr->clk_src << NAU8821_CLK_DAC_SRC_SFT);
+ else
regmap_update_bits(nau8821->regmap, NAU8821_R03_CLK_DIVIDER,
NAU8821_CLK_ADC_SRC_MASK,
- osr_adc_sel[osr].clk_src << NAU8821_CLK_ADC_SRC_SFT);
- }
+ osr->clk_src << NAU8821_CLK_ADC_SRC_SFT);
/* make BCLK and LRC divde configuration if the codec as master. */
regmap_read(nau8821->regmap, NAU8821_R1D_I2S_PCM_CTRL2, &ctrl_val);
@@ -843,6 +848,7 @@ static int nau8821_digital_mute(struct snd_soc_dai *dai, int mute,
}
static const struct snd_soc_dai_ops nau8821_dai_ops = {
+ .startup = nau8821_dai_startup,
.hw_params = nau8821_hw_params,
.set_fmt = nau8821_set_dai_fmt,
.mute_stream = nau8821_digital_mute,
diff --git a/sound/soc/codecs/nau8824.c b/sound/soc/codecs/nau8824.c
index ad54d70f7d8e..15596452ca37 100644
--- a/sound/soc/codecs/nau8824.c
+++ b/sound/soc/codecs/nau8824.c
@@ -1014,27 +1014,42 @@ static irqreturn_t nau8824_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static int nau8824_clock_check(struct nau8824 *nau8824,
- int stream, int rate, int osr)
+static const struct nau8824_osr_attr *
+nau8824_get_osr(struct nau8824 *nau8824, int stream)
{
- int osrate;
+ unsigned int osr;
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_read(nau8824->regmap,
+ NAU8824_REG_DAC_FILTER_CTRL_1, &osr);
+ osr &= NAU8824_DAC_OVERSAMPLE_MASK;
if (osr >= ARRAY_SIZE(osr_dac_sel))
- return -EINVAL;
- osrate = osr_dac_sel[osr].osr;
+ return NULL;
+ return &osr_dac_sel[osr];
} else {
+ regmap_read(nau8824->regmap,
+ NAU8824_REG_ADC_FILTER_CTRL, &osr);
+ osr &= NAU8824_ADC_SYNC_DOWN_MASK;
if (osr >= ARRAY_SIZE(osr_adc_sel))
- return -EINVAL;
- osrate = osr_adc_sel[osr].osr;
+ return NULL;
+ return &osr_adc_sel[osr];
}
+}
+
+static int nau8824_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
+ const struct nau8824_osr_attr *osr;
- if (!osrate || rate * osr > CLK_DA_AD_MAX) {
- dev_err(nau8824->dev, "exceed the maximum frequency of CLK_ADC or CLK_DAC\n");
+ osr = nau8824_get_osr(nau8824, substream->stream);
+ if (!osr || !osr->osr)
return -EINVAL;
- }
- return 0;
+ return snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_RATE,
+ 0, CLK_DA_AD_MAX / osr->osr);
}
static int nau8824_hw_params(struct snd_pcm_substream *substream,
@@ -1042,7 +1057,9 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_component *component = dai->component;
struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
- unsigned int val_len = 0, osr, ctrl_val, bclk_fs, bclk_div;
+ unsigned int val_len = 0, ctrl_val, bclk_fs, bclk_div;
+ const struct nau8824_osr_attr *osr;
+ int err = -EINVAL;
nau8824_sema_acquire(nau8824, HZ);
@@ -1053,27 +1070,19 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
* than 6.144 MHz.
*/
nau8824->fs = params_rate(params);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- regmap_read(nau8824->regmap,
- NAU8824_REG_DAC_FILTER_CTRL_1, &osr);
- osr &= NAU8824_DAC_OVERSAMPLE_MASK;
- if (nau8824_clock_check(nau8824, substream->stream,
- nau8824->fs, osr))
- return -EINVAL;
+ osr = nau8824_get_osr(nau8824, substream->stream);
+ if (!osr || !osr->osr)
+ goto error;
+ if (nau8824->fs * osr->osr > CLK_DA_AD_MAX)
+ goto error;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER,
NAU8824_CLK_DAC_SRC_MASK,
- osr_dac_sel[osr].clk_src << NAU8824_CLK_DAC_SRC_SFT);
- } else {
- regmap_read(nau8824->regmap,
- NAU8824_REG_ADC_FILTER_CTRL, &osr);
- osr &= NAU8824_ADC_SYNC_DOWN_MASK;
- if (nau8824_clock_check(nau8824, substream->stream,
- nau8824->fs, osr))
- return -EINVAL;
+ osr->clk_src << NAU8824_CLK_DAC_SRC_SFT);
+ else
regmap_update_bits(nau8824->regmap, NAU8824_REG_CLK_DIVIDER,
NAU8824_CLK_ADC_SRC_MASK,
- osr_adc_sel[osr].clk_src << NAU8824_CLK_ADC_SRC_SFT);
- }
+ osr->clk_src << NAU8824_CLK_ADC_SRC_SFT);
/* make BCLK and LRC divde configuration if the codec as master. */
regmap_read(nau8824->regmap,
@@ -1090,7 +1099,7 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
else if (bclk_fs <= 256)
bclk_div = 0;
else
- return -EINVAL;
+ goto error;
regmap_update_bits(nau8824->regmap,
NAU8824_REG_PORT0_I2S_PCM_CTRL_2,
NAU8824_I2S_LRC_DIV_MASK | NAU8824_I2S_BLK_DIV_MASK,
@@ -1111,15 +1120,17 @@ static int nau8824_hw_params(struct snd_pcm_substream *substream,
val_len |= NAU8824_I2S_DL_32;
break;
default:
- return -EINVAL;
+ goto error;
}
regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1,
NAU8824_I2S_DL_MASK, val_len);
+ err = 0;
+ error:
nau8824_sema_release(nau8824);
- return 0;
+ return err;
}
static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
@@ -1128,8 +1139,6 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
struct nau8824 *nau8824 = snd_soc_component_get_drvdata(component);
unsigned int ctrl1_val = 0, ctrl2_val = 0;
- nau8824_sema_acquire(nau8824, HZ);
-
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
ctrl2_val |= NAU8824_I2S_MS_MASTER;
@@ -1171,6 +1180,8 @@ static int nau8824_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
+ nau8824_sema_acquire(nau8824, HZ);
+
regmap_update_bits(nau8824->regmap, NAU8824_REG_PORT0_I2S_PCM_CTRL_1,
NAU8824_I2S_DF_MASK | NAU8824_I2S_BP_MASK |
NAU8824_I2S_PCMB_EN, ctrl1_val);
@@ -1547,6 +1558,7 @@ static const struct snd_soc_component_driver nau8824_component_driver = {
};
static const struct snd_soc_dai_ops nau8824_dai_ops = {
+ .startup = nau8824_dai_startup,
.hw_params = nau8824_hw_params,
.set_fmt = nau8824_set_fmt,
.set_tdm_slot = nau8824_set_tdm_slot,
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 54ef7b0fa878..8213273f501e 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -1247,27 +1247,42 @@ static const struct snd_soc_dapm_route nau8825_dapm_routes[] = {
{"HPOR", NULL, "Class G"},
};
-static int nau8825_clock_check(struct nau8825 *nau8825,
- int stream, int rate, int osr)
+static const struct nau8825_osr_attr *
+nau8825_get_osr(struct nau8825 *nau8825, int stream)
{
- int osrate;
+ unsigned int osr;
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ regmap_read(nau8825->regmap,
+ NAU8825_REG_DAC_CTRL1, &osr);
+ osr &= NAU8825_DAC_OVERSAMPLE_MASK;
if (osr >= ARRAY_SIZE(osr_dac_sel))
- return -EINVAL;
- osrate = osr_dac_sel[osr].osr;
+ return NULL;
+ return &osr_dac_sel[osr];
} else {
+ regmap_read(nau8825->regmap,
+ NAU8825_REG_ADC_RATE, &osr);
+ osr &= NAU8825_ADC_SYNC_DOWN_MASK;
if (osr >= ARRAY_SIZE(osr_adc_sel))
- return -EINVAL;
- osrate = osr_adc_sel[osr].osr;
+ return NULL;
+ return &osr_adc_sel[osr];
}
+}
- if (!osrate || rate * osr > CLK_DA_AD_MAX) {
- dev_err(nau8825->dev, "exceed the maximum frequency of CLK_ADC or CLK_DAC\n");
+static int nau8825_dai_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8825 *nau8825 = snd_soc_component_get_drvdata(component);
+ const struct nau8825_osr_attr *osr;
+
+ osr = nau8825_get_osr(nau8825, substream->stream);
+ if (!osr || !osr->osr)
return -EINVAL;
- }
- return 0;
+ return snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_RATE,
+ 0, CLK_DA_AD_MAX / osr->osr);
}
static int nau8825_hw_params(struct snd_pcm_substream *substream,
@@ -1276,7 +1291,9 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_component *component = dai->component;
struct nau8825 *nau8825 = snd_soc_component_get_drvdata(component);
- unsigned int val_len = 0, osr, ctrl_val, bclk_fs, bclk_div;
+ unsigned int val_len = 0, ctrl_val, bclk_fs, bclk_div;
+ const struct nau8825_osr_attr *osr;
+ int err = -EINVAL;
nau8825_sema_acquire(nau8825, 3 * HZ);
@@ -1286,29 +1303,19 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
* values must be selected such that the maximum frequency is less
* than 6.144 MHz.
*/
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- regmap_read(nau8825->regmap, NAU8825_REG_DAC_CTRL1, &osr);
- osr &= NAU8825_DAC_OVERSAMPLE_MASK;
- if (nau8825_clock_check(nau8825, substream->stream,
- params_rate(params), osr)) {
- nau8825_sema_release(nau8825);
- return -EINVAL;
- }
+ osr = nau8825_get_osr(nau8825, substream->stream);
+ if (!osr || !osr->osr)
+ goto error;
+ if (params_rate(params) * osr->osr > CLK_DA_AD_MAX)
+ goto error;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
NAU8825_CLK_DAC_SRC_MASK,
- osr_dac_sel[osr].clk_src << NAU8825_CLK_DAC_SRC_SFT);
- } else {
- regmap_read(nau8825->regmap, NAU8825_REG_ADC_RATE, &osr);
- osr &= NAU8825_ADC_SYNC_DOWN_MASK;
- if (nau8825_clock_check(nau8825, substream->stream,
- params_rate(params), osr)) {
- nau8825_sema_release(nau8825);
- return -EINVAL;
- }
+ osr->clk_src << NAU8825_CLK_DAC_SRC_SFT);
+ else
regmap_update_bits(nau8825->regmap, NAU8825_REG_CLK_DIVIDER,
NAU8825_CLK_ADC_SRC_MASK,
- osr_adc_sel[osr].clk_src << NAU8825_CLK_ADC_SRC_SFT);
- }
+ osr->clk_src << NAU8825_CLK_ADC_SRC_SFT);
/* make BCLK and LRC divde configuration if the codec as master. */
regmap_read(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2, &ctrl_val);
@@ -1321,10 +1328,8 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
bclk_div = 1;
else if (bclk_fs <= 128)
bclk_div = 0;
- else {
- nau8825_sema_release(nau8825);
- return -EINVAL;
- }
+ else
+ goto error;
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
NAU8825_I2S_LRC_DIV_MASK | NAU8825_I2S_BLK_DIV_MASK,
((bclk_div + 1) << NAU8825_I2S_LRC_DIV_SFT) | bclk_div);
@@ -1344,17 +1349,18 @@ static int nau8825_hw_params(struct snd_pcm_substream *substream,
val_len |= NAU8825_I2S_DL_32;
break;
default:
- nau8825_sema_release(nau8825);
- return -EINVAL;
+ goto error;
}
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL1,
NAU8825_I2S_DL_MASK, val_len);
+ err = 0;
+ error:
/* Release the semaphore. */
nau8825_sema_release(nau8825);
- return 0;
+ return err;
}
static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
@@ -1420,6 +1426,7 @@ static int nau8825_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
}
static const struct snd_soc_dai_ops nau8825_dai_ops = {
+ .startup = nau8825_dai_startup,
.hw_params = nau8825_hw_params,
.set_fmt = nau8825_set_dai_fmt,
};
diff --git a/sound/soc/fsl/fsl_aud2htx.c b/sound/soc/fsl/fsl_aud2htx.c
index 873295f59ad7..1e421d9a03fb 100644
--- a/sound/soc/fsl/fsl_aud2htx.c
+++ b/sound/soc/fsl/fsl_aud2htx.c
@@ -234,18 +234,26 @@ static int fsl_aud2htx_probe(struct platform_device *pdev)
regcache_cache_only(aud2htx->regmap, true);
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to pcm register\n");
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
ret = devm_snd_soc_register_component(&pdev->dev,
&fsl_aud2htx_component,
&fsl_aud2htx_dai, 1);
if (ret) {
dev_err(&pdev->dev, "failed to register ASoC DAI\n");
+ pm_runtime_disable(&pdev->dev);
return ret;
}
- ret = imx_pcm_dma_init(pdev);
- if (ret)
- dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
-
return ret;
}
diff --git a/sound/soc/fsl/fsl_mqs.c b/sound/soc/fsl/fsl_mqs.c
index c1e2f671191b..4922e6795b73 100644
--- a/sound/soc/fsl/fsl_mqs.c
+++ b/sound/soc/fsl/fsl_mqs.c
@@ -122,7 +122,7 @@ static int fsl_mqs_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
}
switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
- case SND_SOC_DAIFMT_BP_FP:
+ case SND_SOC_DAIFMT_CBC_CFC:
break;
default:
return -EINVAL;
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 7523bb944b21..d430eece1d6b 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -1306,7 +1306,7 @@ static int fsl_sai_probe(struct platform_device *pdev)
sai->mclk_clk[i] = devm_clk_get(dev, tmp);
if (IS_ERR(sai->mclk_clk[i])) {
dev_err(dev, "failed to get mclk%d clock: %ld\n",
- i + 1, PTR_ERR(sai->mclk_clk[i]));
+ i, PTR_ERR(sai->mclk_clk[i]));
sai->mclk_clk[i] = NULL;
}
}
diff --git a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
index 266704556f37..094402470dc2 100644
--- a/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
+++ b/sound/soc/mediatek/mt8186/mt8186-dai-adda.c
@@ -271,9 +271,6 @@ static int mtk_adda_ul_event(struct snd_soc_dapm_widget *w,
/* should delayed 1/fs(smallest is 8k) = 125us before afe off */
usleep_range(125, 135);
mt8186_afe_gpio_request(afe->dev, false, MT8186_DAI_ADDA, 1);
-
- /* reset dmic */
- afe_priv->mtkaif_dmic = 0;
break;
default:
break;
diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c
index ce4a5713386a..98a2fde9e004 100644
--- a/sound/soc/qcom/sm8250.c
+++ b/sound/soc/qcom/sm8250.c
@@ -270,6 +270,7 @@ static int sm8250_platform_probe(struct platform_device *pdev)
if (!card)
return -ENOMEM;
+ card->owner = THIS_MODULE;
/* Allocate the private data */
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
diff --git a/sound/soc/sof/Kconfig b/sound/soc/sof/Kconfig
index e90f173d067c..37f7df5fde17 100644
--- a/sound/soc/sof/Kconfig
+++ b/sound/soc/sof/Kconfig
@@ -196,6 +196,7 @@ config SND_SOC_SOF_DEBUG_ENABLE_FIRMWARE_TRACE
config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST
tristate "SOF enable IPC flood test"
+ depends on SND_SOC_SOF
select SND_SOC_SOF_CLIENT
help
This option enables a separate client device for IPC flood test
@@ -214,6 +215,7 @@ config SND_SOC_SOF_DEBUG_IPC_FLOOD_TEST_NUM
config SND_SOC_SOF_DEBUG_IPC_MSG_INJECTOR
tristate "SOF enable IPC message injector"
+ depends on SND_SOC_SOF
select SND_SOC_SOF_CLIENT
help
This option enables the IPC message injector which can be used to send
diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c
index af072b484a60..64929dc9af39 100644
--- a/sound/soc/sof/ipc4-topology.c
+++ b/sound/soc/sof/ipc4-topology.c
@@ -771,7 +771,7 @@ static int sof_ipc4_widget_setup_comp_src(struct snd_sof_widget *swidget)
goto err;
ret = sof_update_ipc_object(scomp, src, SOF_SRC_TOKENS, swidget->tuples,
- swidget->num_tuples, sizeof(src), 1);
+ swidget->num_tuples, sizeof(*src), 1);
if (ret) {
dev_err(scomp->dev, "Parsing SRC tokens failed\n");
goto err;
@@ -1251,7 +1251,7 @@ sof_ipc4_prepare_copier_module(struct snd_sof_widget *swidget,
if (blob->alh_cfg.count > 1) {
int group_id;
- group_id = ida_alloc_max(&alh_group_ida, ALH_MULTI_GTW_COUNT,
+ group_id = ida_alloc_max(&alh_group_ida, ALH_MULTI_GTW_COUNT - 1,
GFP_KERNEL);
if (group_id < 0)
diff --git a/sound/usb/card.c b/sound/usb/card.c
index d356743de2ff..706d249a9ad6 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -699,7 +699,7 @@ static bool check_delayed_register_option(struct snd_usb_audio *chip, int iface)
if (delayed_register[i] &&
sscanf(delayed_register[i], "%x:%x", &id, &inum) == 2 &&
id == chip->usb_id)
- return inum != iface;
+ return iface < inum;
}
return false;
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 0d7b73bf7945..8c8f9a851f89 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -758,7 +758,8 @@ bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
* The endpoint needs to be closed via snd_usb_endpoint_close() later.
*
* Note that this function doesn't configure the endpoint. The substream
- * needs to set it up later via snd_usb_endpoint_configure().
+ * needs to set it up later via snd_usb_endpoint_set_params() and
+ * snd_usb_endpoint_prepare().
*/
struct snd_usb_endpoint *
snd_usb_endpoint_open(struct snd_usb_audio *chip,
@@ -924,6 +925,8 @@ void snd_usb_endpoint_close(struct snd_usb_audio *chip,
endpoint_set_interface(chip, ep, false);
if (!--ep->opened) {
+ if (ep->clock_ref && !atomic_read(&ep->clock_ref->locked))
+ ep->clock_ref->rate = 0;
ep->iface = 0;
ep->altsetting = 0;
ep->cur_audiofmt = NULL;
@@ -1290,12 +1293,13 @@ out_of_memory:
/*
* snd_usb_endpoint_set_params: configure an snd_usb_endpoint
*
+ * It's called either from hw_params callback.
* Determine the number of URBs to be used on this endpoint.
* An endpoint must be configured before it can be started.
* An endpoint that is already running can not be reconfigured.
*/
-static int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
- struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+ struct snd_usb_endpoint *ep)
{
const struct audioformat *fmt = ep->cur_audiofmt;
int err;
@@ -1378,18 +1382,18 @@ static int init_sample_rate(struct snd_usb_audio *chip,
}
/*
- * snd_usb_endpoint_configure: Configure the endpoint
+ * snd_usb_endpoint_prepare: Prepare the endpoint
*
* This function sets up the EP to be fully usable state.
- * It's called either from hw_params or prepare callback.
+ * It's called either from prepare callback.
* The function checks need_setup flag, and performs nothing unless needed,
* so it's safe to call this multiple times.
*
* This returns zero if unchanged, 1 if the configuration has changed,
* or a negative error code.
*/
-int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
- struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
+ struct snd_usb_endpoint *ep)
{
bool iface_first;
int err = 0;
@@ -1410,9 +1414,6 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
if (err < 0)
goto unlock;
}
- err = snd_usb_endpoint_set_params(chip, ep);
- if (err < 0)
- goto unlock;
goto done;
}
@@ -1440,10 +1441,6 @@ int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
if (err < 0)
goto unlock;
- err = snd_usb_endpoint_set_params(chip, ep);
- if (err < 0)
- goto unlock;
-
err = snd_usb_select_mode_quirk(chip, ep->cur_audiofmt);
if (err < 0)
goto unlock;
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index 6a9af04cf175..e67ea28faa54 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -17,8 +17,10 @@ snd_usb_endpoint_open(struct snd_usb_audio *chip,
bool is_sync_ep);
void snd_usb_endpoint_close(struct snd_usb_audio *chip,
struct snd_usb_endpoint *ep);
-int snd_usb_endpoint_configure(struct snd_usb_audio *chip,
- struct snd_usb_endpoint *ep);
+int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+ struct snd_usb_endpoint *ep);
+int snd_usb_endpoint_prepare(struct snd_usb_audio *chip,
+ struct snd_usb_endpoint *ep);
int snd_usb_endpoint_get_clock_rate(struct snd_usb_audio *chip, int clock);
bool snd_usb_endpoint_compatible(struct snd_usb_audio *chip,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index d45d1d7e6664..b604f7e95e82 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -443,17 +443,17 @@ static int configure_endpoints(struct snd_usb_audio *chip,
if (stop_endpoints(subs, false))
sync_pending_stops(subs);
if (subs->sync_endpoint) {
- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
+ err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
if (err < 0)
return err;
}
- err = snd_usb_endpoint_configure(chip, subs->data_endpoint);
+ err = snd_usb_endpoint_prepare(chip, subs->data_endpoint);
if (err < 0)
return err;
snd_usb_set_format_quirk(subs, subs->cur_audiofmt);
} else {
if (subs->sync_endpoint) {
- err = snd_usb_endpoint_configure(chip, subs->sync_endpoint);
+ err = snd_usb_endpoint_prepare(chip, subs->sync_endpoint);
if (err < 0)
return err;
}
@@ -551,7 +551,13 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
subs->cur_audiofmt = fmt;
mutex_unlock(&chip->mutex);
- ret = configure_endpoints(chip, subs);
+ if (subs->sync_endpoint) {
+ ret = snd_usb_endpoint_set_params(chip, subs->sync_endpoint);
+ if (ret < 0)
+ goto unlock;
+ }
+
+ ret = snd_usb_endpoint_set_params(chip, subs->data_endpoint);
unlock:
if (ret < 0)
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 168fd802d70b..5b4d8f5eade2 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1764,7 +1764,7 @@ bool snd_usb_registration_quirk(struct snd_usb_audio *chip, int iface)
for (q = registration_quirks; q->usb_id; q++)
if (chip->usb_id == q->usb_id)
- return iface != q->interface;
+ return iface < q->interface;
/* Register as normal */
return false;
@@ -1903,6 +1903,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_SHARE_MEDIA_DEVICE | QUIRK_FLAG_ALIGN_TRANSFER),
DEVICE_FLG(0x21b4, 0x0081, /* AudioQuest DragonFly */
QUIRK_FLAG_GET_SAMPLE_RATE),
+ DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */
+ QUIRK_FLAG_SET_IFACE_FIRST),
DEVICE_FLG(0x2708, 0x0002, /* Audient iD14 */
QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index ceb93d798182..f10f4e6d3fb8 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -495,6 +495,10 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip,
return 0;
}
}
+
+ if (chip->card->registered)
+ chip->need_delayed_register = true;
+
/* look for an empty stream */
list_for_each_entry(as, &chip->pcm_list, list) {
if (as->fmt_type != fp->fmt_type)
@@ -502,9 +506,6 @@ static int __snd_usb_add_audio_stream(struct snd_usb_audio *chip,
subs = &as->substream[stream];
if (subs->ep_num)
continue;
- if (snd_device_get_state(chip->card, as->pcm) !=
- SNDRV_DEV_BUILD)
- chip->need_delayed_register = true;
err = snd_pcm_new_stream(as->pcm, stream, 1);
if (err < 0)
return err;
@@ -1105,7 +1106,7 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
* Dallas DS4201 workaround: It presents 5 altsettings, but the last
* one misses syncpipe, and does not produce any sound.
*/
- if (chip->usb_id == USB_ID(0x04fa, 0x4201))
+ if (chip->usb_id == USB_ID(0x04fa, 0x4201) && num >= 4)
num = 4;
for (i = 0; i < num; i++) {
diff --git a/tools/debugging/kernel-chktaint b/tools/debugging/kernel-chktaint
index f1af27ce9f20..279be06332be 100755
--- a/tools/debugging/kernel-chktaint
+++ b/tools/debugging/kernel-chktaint
@@ -187,6 +187,7 @@ else
echo " * auxiliary taint, defined for and used by distros (#16)"
fi
+
T=`expr $T / 2`
if [ `expr $T % 2` -eq 0 ]; then
addout " "
@@ -195,6 +196,14 @@ else
echo " * kernel was built with the struct randomization plugin (#17)"
fi
+T=`expr $T / 2`
+if [ `expr $T % 2` -eq 0 ]; then
+ addout " "
+else
+ addout "N"
+ echo " * an in-kernel test (such as a KUnit test) has been run (#18)"
+fi
+
echo "For a more detailed explanation of the various taint flags see"
echo " Documentation/admin-guide/tainted-kernels.rst in the Linux kernel sources"
echo " or https://kernel.org/doc/html/latest/admin-guide/tainted-kernels.html"
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 1e6fd6ca513b..27f5e7dfc2f7 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -44,7 +44,7 @@
/*
* KVP protocol: The user mode component first registers with the
- * the kernel component. Subsequently, the kernel component requests, data
+ * kernel component. Subsequently, the kernel component requests, data
* for the specified keys. In response to this message the user mode component
* fills in the value corresponding to the specified key. We overload the
* sequence field in the cn_msg header to define our KVP message types.
@@ -772,11 +772,11 @@ static int kvp_process_ip_address(void *addrp,
const char *str;
if (family == AF_INET) {
- addr = (struct sockaddr_in *)addrp;
+ addr = addrp;
str = inet_ntop(family, &addr->sin_addr, tmp, 50);
addr_length = INET_ADDRSTRLEN;
} else {
- addr6 = (struct sockaddr_in6 *)addrp;
+ addr6 = addrp;
str = inet_ntop(family, &addr6->sin6_addr.s6_addr, tmp, 50);
addr_length = INET6_ADDRSTRLEN;
}
diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h
index d30439b4b8ab..869379f91fe4 100644
--- a/tools/include/uapi/asm/errno.h
+++ b/tools/include/uapi/asm/errno.h
@@ -9,8 +9,8 @@
#include "../../../arch/alpha/include/uapi/asm/errno.h"
#elif defined(__mips__)
#include "../../../arch/mips/include/uapi/asm/errno.h"
-#elif defined(__xtensa__)
-#include "../../../arch/xtensa/include/uapi/asm/errno.h"
+#elif defined(__hppa__)
+#include "../../../arch/parisc/include/uapi/asm/errno.h"
#else
#include <asm-generic/errno.h>
#endif
diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c
index e6c98a6e3908..6b1bafe267a4 100644
--- a/tools/lib/perf/evlist.c
+++ b/tools/lib/perf/evlist.c
@@ -486,6 +486,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
if (ops->idx)
ops->idx(evlist, evsel, mp, idx);
+ pr_debug("idx %d: mmapping fd %d\n", idx, *output);
if (ops->mmap(map, mp, *output, evlist_cpu) < 0)
return -1;
@@ -494,6 +495,7 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
if (!idx)
perf_evlist__set_mmap_first(evlist, map, overwrite);
} else {
+ pr_debug("idx %d: set output fd %d -> %d\n", idx, fd, *output);
if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
return -1;
@@ -520,6 +522,48 @@ mmap_per_evsel(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
}
static int
+mmap_per_thread(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
+ struct perf_mmap_param *mp)
+{
+ int nr_threads = perf_thread_map__nr(evlist->threads);
+ int nr_cpus = perf_cpu_map__nr(evlist->all_cpus);
+ int cpu, thread, idx = 0;
+ int nr_mmaps = 0;
+
+ pr_debug("%s: nr cpu values (may include -1) %d nr threads %d\n",
+ __func__, nr_cpus, nr_threads);
+
+ /* per-thread mmaps */
+ for (thread = 0; thread < nr_threads; thread++, idx++) {
+ int output = -1;
+ int output_overwrite = -1;
+
+ if (mmap_per_evsel(evlist, ops, idx, mp, 0, thread, &output,
+ &output_overwrite, &nr_mmaps))
+ goto out_unmap;
+ }
+
+ /* system-wide mmaps i.e. per-cpu */
+ for (cpu = 1; cpu < nr_cpus; cpu++, idx++) {
+ int output = -1;
+ int output_overwrite = -1;
+
+ if (mmap_per_evsel(evlist, ops, idx, mp, cpu, 0, &output,
+ &output_overwrite, &nr_mmaps))
+ goto out_unmap;
+ }
+
+ if (nr_mmaps != evlist->nr_mmaps)
+ pr_err("Miscounted nr_mmaps %d vs %d\n", nr_mmaps, evlist->nr_mmaps);
+
+ return 0;
+
+out_unmap:
+ perf_evlist__munmap(evlist);
+ return -1;
+}
+
+static int
mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
{
@@ -528,6 +572,8 @@ mmap_per_cpu(struct perf_evlist *evlist, struct perf_evlist_mmap_ops *ops,
int nr_mmaps = 0;
int cpu, thread;
+ pr_debug("%s: nr cpu values %d nr threads %d\n", __func__, nr_cpus, nr_threads);
+
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
int output_overwrite = -1;
@@ -569,6 +615,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
struct perf_evlist_mmap_ops *ops,
struct perf_mmap_param *mp)
{
+ const struct perf_cpu_map *cpus = evlist->all_cpus;
struct perf_evsel *evsel;
if (!ops || !ops->get || !ops->mmap)
@@ -588,6 +635,9 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist,
if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
return -ENOMEM;
+ if (perf_cpu_map__empty(cpus))
+ return mmap_per_thread(evlist, ops, mp);
+
return mmap_per_cpu(evlist, ops, mp);
}
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index e5921b347153..bd947885a639 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -954,11 +954,11 @@ ifndef NO_LIBBPF
$(call QUIET_INSTALL, bpf-headers) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'; \
- $(INSTALL) include/bpf/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
- $(INSTALL) include/bpf/linux/*.h -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'
+ $(INSTALL) include/bpf/*.h -m 644 -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf'; \
+ $(INSTALL) include/bpf/linux/*.h -m 644 -t '$(DESTDIR_SQ)$(perf_include_instdir_SQ)/bpf/linux'
$(call QUIET_INSTALL, bpf-examples) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'; \
- $(INSTALL) examples/bpf/*.c -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
+ $(INSTALL) examples/bpf/*.c -m 644 -t '$(DESTDIR_SQ)$(perf_examples_instdir_SQ)/bpf'
endif
$(call QUIET_INSTALL, perf-archive) \
$(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
@@ -967,13 +967,13 @@ endif
ifndef NO_LIBAUDIT
$(call QUIET_INSTALL, strace/groups) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'; \
- $(INSTALL) trace/strace/groups/* -t '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'
+ $(INSTALL) trace/strace/groups/* -m 644 -t '$(DESTDIR_SQ)$(STRACE_GROUPS_INSTDIR_SQ)'
endif
ifndef NO_LIBPERL
$(call QUIET_INSTALL, perl-scripts) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
- $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
- $(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'; \
+ $(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'; \
+ $(INSTALL) scripts/perl/*.pl -m 644 -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'; \
$(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
endif
@@ -990,23 +990,23 @@ endif
$(INSTALL) $(DLFILTERS) '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/dlfilters';
$(call QUIET_INSTALL, perf_completion-script) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'; \
- $(INSTALL) perf-completion.sh '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
+ $(INSTALL) perf-completion.sh -m 644 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
$(call QUIET_INSTALL, perf-tip) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(tip_instdir_SQ)'; \
- $(INSTALL) Documentation/tips.txt -t '$(DESTDIR_SQ)$(tip_instdir_SQ)'
+ $(INSTALL) Documentation/tips.txt -m 644 -t '$(DESTDIR_SQ)$(tip_instdir_SQ)'
install-tests: all install-gtk
$(call QUIET_INSTALL, tests) \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
- $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
+ $(INSTALL) tests/attr.py -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
$(INSTALL) tests/pe-file.exe* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
- $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
+ $(INSTALL) tests/attr/* -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
$(INSTALL) tests/shell/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell'; \
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
- $(INSTALL) tests/shell/lib/*.sh '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
- $(INSTALL) tests/shell/lib/*.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
+ $(INSTALL) tests/shell/lib/*.sh -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'; \
+ $(INSTALL) tests/shell/lib/*.py -m 644 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/shell/lib'
install-bin: install-tools install-tests install-traceevent-plugins
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 653e13b5037e..438fc222e213 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -146,15 +146,15 @@ static void *c2c_he_zalloc(size_t size)
c2c_he->cpuset = bitmap_zalloc(c2c.cpus_cnt);
if (!c2c_he->cpuset)
- return NULL;
+ goto out_free;
c2c_he->nodeset = bitmap_zalloc(c2c.nodes_cnt);
if (!c2c_he->nodeset)
- return NULL;
+ goto out_free;
c2c_he->node_stats = zalloc(c2c.nodes_cnt * sizeof(*c2c_he->node_stats));
if (!c2c_he->node_stats)
- return NULL;
+ goto out_free;
init_stats(&c2c_he->cstats.lcl_hitm);
init_stats(&c2c_he->cstats.rmt_hitm);
@@ -163,6 +163,12 @@ static void *c2c_he_zalloc(size_t size)
init_stats(&c2c_he->cstats.load);
return &c2c_he->he;
+
+out_free:
+ free(c2c_he->nodeset);
+ free(c2c_he->cpuset);
+ free(c2c_he);
+ return NULL;
}
static void c2c_he_free(void *he)
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index dd11d3471baf..ea40ae52cd2c 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -1874,8 +1874,7 @@ int cmd_lock(int argc, const char **argv)
NULL
};
const char *const lock_subcommands[] = { "record", "report", "script",
- "info", "contention",
- "contention", NULL };
+ "info", "contention", NULL };
const char *lock_usage[] = {
NULL,
NULL
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4713f0f3a6cf..f87ef43eb820 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1906,14 +1906,18 @@ static int record__synthesize(struct record *rec, bool tail)
err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
machine, opts);
- if (err < 0)
+ if (err < 0) {
pr_warning("Couldn't synthesize bpf events.\n");
+ err = 0;
+ }
if (rec->opts.synth & PERF_SYNTH_CGROUP) {
err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
machine);
- if (err < 0)
+ if (err < 0) {
pr_warning("Couldn't synthesize cgroup events.\n");
+ err = 0;
+ }
}
if (rec->opts.nr_threads_synthesize > 1) {
@@ -3358,16 +3362,22 @@ static struct option __record_options[] = {
struct option *record_options = __record_options;
-static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
+static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
{
struct perf_cpu cpu;
int idx;
if (cpu_map__is_dummy(cpus))
- return;
+ return 0;
- perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+ perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+ /* Return ENODEV is input cpu is greater than max cpu */
+ if ((unsigned long)cpu.cpu > mask->nbits)
+ return -ENODEV;
set_bit(cpu.cpu, mask->bits);
+ }
+
+ return 0;
}
static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
@@ -3379,7 +3389,9 @@ static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const cha
return -ENOMEM;
bitmap_zero(mask->bits, mask->nbits);
- record__mmap_cpu_mask_init(mask, cpus);
+ if (record__mmap_cpu_mask_init(mask, cpus))
+ return -ENODEV;
+
perf_cpu_map__put(cpus);
return 0;
@@ -3461,7 +3473,12 @@ static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_ma
pr_err("Failed to allocate CPUs mask\n");
return ret;
}
- record__mmap_cpu_mask_init(&cpus_mask, cpus);
+
+ ret = record__mmap_cpu_mask_init(&cpus_mask, cpus);
+ if (ret) {
+ pr_err("Failed to init cpu mask\n");
+ goto out_free_cpu_mask;
+ }
ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
if (ret) {
@@ -3702,7 +3719,8 @@ static int record__init_thread_default_masks(struct record *rec, struct perf_cpu
if (ret)
return ret;
- record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus);
+ if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
+ return -ENODEV;
rec->nr_threads = 1;
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 13580a9c50b8..029b4330e59b 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -445,6 +445,9 @@ static int evsel__check_attr(struct evsel *evsel, struct perf_session *session)
struct perf_event_attr *attr = &evsel->core.attr;
bool allow_user_set;
+ if (evsel__is_dummy_event(evsel))
+ return 0;
+
if (perf_header__has_feat(&session->header, HEADER_STAT))
return 0;
@@ -566,6 +569,8 @@ static struct evsel *find_first_output_type(struct evlist *evlist,
struct evsel *evsel;
evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_dummy_event(evsel))
+ continue;
if (output_type(evsel->core.attr.type) == (int)type)
return evsel;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 54cd29d07ca8..0b4a62e4ff67 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1932,6 +1932,9 @@ setup_metrics:
free(str);
}
+ if (!stat_config.topdown_level)
+ stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
+
if (!evsel_list->core.nr_entries) {
if (target__has_cpu(&target))
default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK;
@@ -1948,8 +1951,6 @@ setup_metrics:
}
if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
return -1;
-
- stat_config.topdown_level = TOPDOWN_MAX_LEVEL;
/* Platform specific attrs */
if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
return -1;
diff --git a/tools/perf/dlfilters/dlfilter-show-cycles.c b/tools/perf/dlfilters/dlfilter-show-cycles.c
index 9eccc97bff82..6d47298ebe9f 100644
--- a/tools/perf/dlfilters/dlfilter-show-cycles.c
+++ b/tools/perf/dlfilters/dlfilter-show-cycles.c
@@ -98,9 +98,9 @@ int filter_event_early(void *data, const struct perf_dlfilter_sample *sample, vo
static void print_vals(__u64 cycles, __u64 delta)
{
if (delta)
- printf("%10llu %10llu ", cycles, delta);
+ printf("%10llu %10llu ", (unsigned long long)cycles, (unsigned long long)delta);
else
- printf("%10llu %10s ", cycles, "");
+ printf("%10llu %10s ", (unsigned long long)cycles, "");
}
int filter_event(void *data, const struct perf_dlfilter_sample *sample, void *ctx)
diff --git a/tools/perf/util/affinity.c b/tools/perf/util/affinity.c
index 4d216c0dc425..4ee96b3c755b 100644
--- a/tools/perf/util/affinity.c
+++ b/tools/perf/util/affinity.c
@@ -49,8 +49,14 @@ void affinity__set(struct affinity *a, int cpu)
{
int cpu_set_size = get_cpu_set_size();
- if (cpu == -1)
+ /*
+ * Return:
+ * - if cpu is -1
+ * - restrict out of bound access to sched_cpus
+ */
+ if (cpu == -1 || ((cpu >= (cpu_set_size * 8))))
return;
+
a->changed = true;
set_bit(cpu, a->sched_cpus);
/*
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
index 953338b9e887..ed28a0dbcb7f 100644
--- a/tools/perf/util/genelf.c
+++ b/tools/perf/util/genelf.c
@@ -30,10 +30,6 @@
#define BUILD_ID_URANDOM /* different uuid for each run */
-// FIXME, remove this and fix the deprecation warnings before its removed and
-// We'll break for good here...
-#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
-
#ifdef HAVE_LIBCRYPTO_SUPPORT
#define BUILD_ID_MD5
@@ -45,6 +41,7 @@
#endif
#ifdef BUILD_ID_MD5
+#include <openssl/evp.h>
#include <openssl/md5.h>
#endif
#endif
@@ -142,15 +139,20 @@ gen_build_id(struct buildid_note *note,
static void
gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *code, size_t csize)
{
- MD5_CTX context;
+ EVP_MD_CTX *mdctx;
if (sizeof(note->build_id) < 16)
errx(1, "build_id too small for MD5");
- MD5_Init(&context);
- MD5_Update(&context, &load_addr, sizeof(load_addr));
- MD5_Update(&context, code, csize);
- MD5_Final((unsigned char *)note->build_id, &context);
+ mdctx = EVP_MD_CTX_new();
+ if (!mdctx)
+ errx(2, "failed to create EVP_MD_CTX");
+
+ EVP_DigestInit_ex(mdctx, EVP_md5(), NULL);
+ EVP_DigestUpdate(mdctx, &load_addr, sizeof(load_addr));
+ EVP_DigestUpdate(mdctx, code, csize);
+ EVP_DigestFinal_ex(mdctx, (unsigned char *)note->build_id, NULL);
+ EVP_MD_CTX_free(mdctx);
}
#endif
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 464475fd6b9a..c93bcaf6d55d 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -1655,6 +1655,9 @@ int metricgroup__parse_groups(const struct option *opt,
struct evlist *perf_evlist = *(struct evlist **)opt->value;
const struct pmu_events_table *table = pmu_events_table__find();
+ if (!table)
+ return -EINVAL;
+
return parse_groups(perf_evlist, str, metric_no_group,
metric_no_merge, NULL, metric_events, table);
}
diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x
index e33cab34d22f..db9810611788 100644
--- a/tools/testing/selftests/bpf/DENYLIST.s390x
+++ b/tools/testing/selftests/bpf/DENYLIST.s390x
@@ -65,3 +65,4 @@ send_signal # intermittently fails to receive signa
select_reuseport # intermittently fails on new s390x setup
xdp_synproxy # JIT does not support calling kernel function (kfunc)
unpriv_bpf_disabled # fentry
+lru_bug # prog 'printk': failed to auto-attach: -524
diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
index 9e754423fa8b..6c03a7d805f9 100644
--- a/tools/testing/selftests/bpf/verifier/precise.c
+++ b/tools/testing/selftests/bpf/verifier/precise.c
@@ -192,3 +192,28 @@
.result = VERBOSE_ACCEPT,
.retval = -1,
},
+{
+ "precise: mark_chain_precision for ARG_CONST_ALLOC_SIZE_OR_ZERO",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1, offsetof(struct xdp_md, ingress_ifindex)),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_4, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_2, 0x1000),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 42),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_ringbuf = { 1 },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = BPF_F_TEST_STATE_FREQ,
+ .errstr = "invalid access to memory, mem_size=1 off=42 size=8",
+ .result = REJECT,
+},
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 87d1a0b1bae0..2f0d705db9db 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -29,6 +29,7 @@
/x86_64/max_vcpuid_cap_test
/x86_64/mmio_warning_test
/x86_64/monitor_mwait_test
+/x86_64/nested_exceptions_test
/x86_64/nx_huge_pages_test
/x86_64/platform_info_test
/x86_64/pmu_event_filter_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 784abe7f0962..0172eb6cb6ee 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -48,6 +48,8 @@ LIBKVM += lib/rbtree.c
LIBKVM += lib/sparsebit.c
LIBKVM += lib/test_util.c
+LIBKVM_STRING += lib/string_override.c
+
LIBKVM_x86_64 += lib/x86_64/apic.c
LIBKVM_x86_64 += lib/x86_64/handlers.S
LIBKVM_x86_64 += lib/x86_64/perf_test_util.c
@@ -89,6 +91,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/kvm_clock_test
TEST_GEN_PROGS_x86_64 += x86_64/kvm_pv_test
TEST_GEN_PROGS_x86_64 += x86_64/mmio_warning_test
TEST_GEN_PROGS_x86_64 += x86_64/monitor_mwait_test
+TEST_GEN_PROGS_x86_64 += x86_64/nested_exceptions_test
TEST_GEN_PROGS_x86_64 += x86_64/platform_info_test
TEST_GEN_PROGS_x86_64 += x86_64/pmu_event_filter_test
TEST_GEN_PROGS_x86_64 += x86_64/set_boot_cpu_id
@@ -221,7 +224,8 @@ LIBKVM_C := $(filter %.c,$(LIBKVM))
LIBKVM_S := $(filter %.S,$(LIBKVM))
LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
LIBKVM_S_OBJ := $(patsubst %.S, $(OUTPUT)/%.o, $(LIBKVM_S))
-LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)
+LIBKVM_STRING_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_STRING))
+LIBKVM_OBJS = $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ) $(LIBKVM_STRING_OBJ)
EXTRA_CLEAN += $(LIBKVM_OBJS) cscope.*
@@ -232,6 +236,12 @@ $(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c
$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S
$(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@
+# Compile the string overrides as freestanding to prevent the compiler from
+# generating self-referential code, e.g. without "freestanding" the compiler may
+# "optimize" memcmp() by invoking memcmp(), thus causing infinite recursion.
+$(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c
+ $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@
+
x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS))))
$(TEST_GEN_PROGS): $(LIBKVM_OBJS)
$(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS)
diff --git a/tools/testing/selftests/kvm/access_tracking_perf_test.c b/tools/testing/selftests/kvm/access_tracking_perf_test.c
index 1c2749b1481a..76c583a07ea2 100644
--- a/tools/testing/selftests/kvm/access_tracking_perf_test.c
+++ b/tools/testing/selftests/kvm/access_tracking_perf_test.c
@@ -31,8 +31,9 @@
* These limitations are worked around in this test by using a large enough
* region of memory for each vCPU such that the number of translations cached in
* the TLB and the number of pages held in pagevecs are a small fraction of the
- * overall workload. And if either of those conditions are not true this test
- * will fail rather than silently passing.
+ * overall workload. And if either of those conditions are not true (for example
+ * in nesting, where TLB size is unlimited) this test will print a warning
+ * rather than silently passing.
*/
#include <inttypes.h>
#include <limits.h>
@@ -172,17 +173,23 @@ static void mark_vcpu_memory_idle(struct kvm_vm *vm,
vcpu_idx, no_pfn, pages);
/*
- * Test that at least 90% of memory has been marked idle (the rest might
- * not be marked idle because the pages have not yet made it to an LRU
- * list or the translations are still cached in the TLB). 90% is
+ * Check that at least 90% of memory has been marked idle (the rest
+ * might not be marked idle because the pages have not yet made it to an
+ * LRU list or the translations are still cached in the TLB). 90% is
* arbitrary; high enough that we ensure most memory access went through
* access tracking but low enough as to not make the test too brittle
* over time and across architectures.
+ *
+ * Note that when run in nested virtualization, this check will trigger
+ * much more frequently because TLB size is unlimited and since no flush
+ * happens, much more pages are cached there and guest won't see the
+ * "idle" bit cleared.
*/
- TEST_ASSERT(still_idle < pages / 10,
- "vCPU%d: Too many pages still idle (%"PRIu64 " out of %"
- PRIu64 ").\n",
- vcpu_idx, still_idle, pages);
+ if (still_idle < pages / 10)
+ printf("WARNING: vCPU%d: Too many pages still idle (%" PRIu64
+ "out of %" PRIu64 "), this will affect performance results"
+ ".\n",
+ vcpu_idx, still_idle, pages);
close(page_idle_fd);
close(pagemap_fd);
diff --git a/tools/testing/selftests/kvm/include/kvm_util_base.h b/tools/testing/selftests/kvm/include/kvm_util_base.h
index 24fde97f6121..e42a09cd24a0 100644
--- a/tools/testing/selftests/kvm/include/kvm_util_base.h
+++ b/tools/testing/selftests/kvm/include/kvm_util_base.h
@@ -175,6 +175,10 @@ extern const struct vm_guest_mode_params vm_guest_mode_params[];
int open_path_or_exit(const char *path, int flags);
int open_kvm_dev_path_or_exit(void);
+
+bool get_kvm_intel_param_bool(const char *param);
+bool get_kvm_amd_param_bool(const char *param);
+
unsigned int kvm_check_cap(long cap);
static inline bool kvm_has_cap(long cap)
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index 5c5a88180b6c..befc754ce9b3 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -63,8 +63,10 @@ void test_assert(bool exp, const char *exp_str,
#a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \
} while (0)
-#define TEST_FAIL(fmt, ...) \
- TEST_ASSERT(false, fmt, ##__VA_ARGS__)
+#define TEST_FAIL(fmt, ...) do { \
+ TEST_ASSERT(false, fmt, ##__VA_ARGS__); \
+ __builtin_unreachable(); \
+} while (0)
size_t parse_size(const char *size);
diff --git a/tools/testing/selftests/kvm/include/x86_64/evmcs.h b/tools/testing/selftests/kvm/include/x86_64/evmcs.h
index 3c9260f8e116..58db74f68af2 100644
--- a/tools/testing/selftests/kvm/include/x86_64/evmcs.h
+++ b/tools/testing/selftests/kvm/include/x86_64/evmcs.h
@@ -203,14 +203,25 @@ struct hv_enlightened_vmcs {
u32 reserved:30;
} hv_enlightenments_control;
u32 hv_vp_id;
-
+ u32 padding32_2;
u64 hv_vm_id;
u64 partition_assist_page;
u64 padding64_4[4];
u64 guest_bndcfgs;
- u64 padding64_5[7];
+ u64 guest_ia32_perf_global_ctrl;
+ u64 guest_ia32_s_cet;
+ u64 guest_ssp;
+ u64 guest_ia32_int_ssp_table_addr;
+ u64 guest_ia32_lbr_ctl;
+ u64 padding64_5[2];
u64 xss_exit_bitmap;
- u64 padding64_6[7];
+ u64 encls_exiting_bitmap;
+ u64 host_ia32_perf_global_ctrl;
+ u64 tsc_multiplier;
+ u64 host_ia32_s_cet;
+ u64 host_ssp;
+ u64 host_ia32_int_ssp_table_addr;
+ u64 padding64_6;
};
#define HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE 0
@@ -656,6 +667,18 @@ static inline int evmcs_vmread(uint64_t encoding, uint64_t *value)
case VIRTUAL_PROCESSOR_ID:
*value = current_evmcs->virtual_processor_id;
break;
+ case HOST_IA32_PERF_GLOBAL_CTRL:
+ *value = current_evmcs->host_ia32_perf_global_ctrl;
+ break;
+ case GUEST_IA32_PERF_GLOBAL_CTRL:
+ *value = current_evmcs->guest_ia32_perf_global_ctrl;
+ break;
+ case ENCLS_EXITING_BITMAP:
+ *value = current_evmcs->encls_exiting_bitmap;
+ break;
+ case TSC_MULTIPLIER:
+ *value = current_evmcs->tsc_multiplier;
+ break;
default: return 1;
}
@@ -1169,6 +1192,22 @@ static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value)
current_evmcs->virtual_processor_id = value;
current_evmcs->hv_clean_fields &= ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT;
break;
+ case HOST_IA32_PERF_GLOBAL_CTRL:
+ current_evmcs->host_ia32_perf_global_ctrl = value;
+ current_evmcs->hv_clean_fields &= ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1;
+ break;
+ case GUEST_IA32_PERF_GLOBAL_CTRL:
+ current_evmcs->guest_ia32_perf_global_ctrl = value;
+ current_evmcs->hv_clean_fields &= ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1;
+ break;
+ case ENCLS_EXITING_BITMAP:
+ current_evmcs->encls_exiting_bitmap = value;
+ current_evmcs->hv_clean_fields &= ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2;
+ break;
+ case TSC_MULTIPLIER:
+ current_evmcs->tsc_multiplier = value;
+ current_evmcs->hv_clean_fields &= ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2;
+ break;
default: return 1;
}
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 45edf45821d0..e8ca0d8a6a7e 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -754,7 +754,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
void (*handler)(struct ex_regs *));
/* If a toddler were to say "abracadabra". */
-#define KVM_EXCEPTION_MAGIC 0xabacadabaull
+#define KVM_EXCEPTION_MAGIC 0xabacadabaULL
/*
* KVM selftest exception fixup uses registers to coordinate with the exception
@@ -786,7 +786,7 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
"lea 1f(%%rip), %%r10\n\t" \
"lea 2f(%%rip), %%r11\n\t" \
"1: " insn "\n\t" \
- "mov $0, %[vector]\n\t" \
+ "movb $0, %[vector]\n\t" \
"jmp 3f\n\t" \
"2:\n\t" \
"mov %%r9b, %[vector]\n\t" \
@@ -825,6 +825,8 @@ static inline uint8_t wrmsr_safe(uint32_t msr, uint64_t val)
return kvm_asm_safe("wrmsr", "a"(val & -1u), "d"(val >> 32), "c"(msr));
}
+bool kvm_is_tdp_enabled(void);
+
uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
uint64_t vaddr);
void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
@@ -855,6 +857,8 @@ enum pg_level {
#define PG_SIZE_1G PG_LEVEL_SIZE(PG_LEVEL_1G)
void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level);
+void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+ uint64_t nr_bytes, int level);
/*
* Basic CPU control in CR0
diff --git a/tools/testing/selftests/kvm/include/x86_64/svm_util.h b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
index a339b537a575..7aee6244ab6a 100644
--- a/tools/testing/selftests/kvm/include/x86_64/svm_util.h
+++ b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
@@ -9,15 +9,12 @@
#ifndef SELFTEST_KVM_SVM_UTILS_H
#define SELFTEST_KVM_SVM_UTILS_H
+#include <asm/svm.h>
+
#include <stdint.h>
#include "svm.h"
#include "processor.h"
-#define SVM_EXIT_EXCP_BASE 0x040
-#define SVM_EXIT_HLT 0x078
-#define SVM_EXIT_MSR 0x07c
-#define SVM_EXIT_VMMCALL 0x081
-
struct svm_test_data {
/* VMCB */
struct vmcb *vmcb; /* gva */
diff --git a/tools/testing/selftests/kvm/include/x86_64/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index 99fa1410964c..71b290b6469d 100644
--- a/tools/testing/selftests/kvm/include/x86_64/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -8,6 +8,8 @@
#ifndef SELFTEST_KVM_VMX_H
#define SELFTEST_KVM_VMX_H
+#include <asm/vmx.h>
+
#include <stdint.h>
#include "processor.h"
#include "apic.h"
@@ -100,55 +102,6 @@
#define VMX_EPT_VPID_CAP_AD_BITS 0x00200000
#define EXIT_REASON_FAILED_VMENTRY 0x80000000
-#define EXIT_REASON_EXCEPTION_NMI 0
-#define EXIT_REASON_EXTERNAL_INTERRUPT 1
-#define EXIT_REASON_TRIPLE_FAULT 2
-#define EXIT_REASON_INTERRUPT_WINDOW 7
-#define EXIT_REASON_NMI_WINDOW 8
-#define EXIT_REASON_TASK_SWITCH 9
-#define EXIT_REASON_CPUID 10
-#define EXIT_REASON_HLT 12
-#define EXIT_REASON_INVD 13
-#define EXIT_REASON_INVLPG 14
-#define EXIT_REASON_RDPMC 15
-#define EXIT_REASON_RDTSC 16
-#define EXIT_REASON_VMCALL 18
-#define EXIT_REASON_VMCLEAR 19
-#define EXIT_REASON_VMLAUNCH 20
-#define EXIT_REASON_VMPTRLD 21
-#define EXIT_REASON_VMPTRST 22
-#define EXIT_REASON_VMREAD 23
-#define EXIT_REASON_VMRESUME 24
-#define EXIT_REASON_VMWRITE 25
-#define EXIT_REASON_VMOFF 26
-#define EXIT_REASON_VMON 27
-#define EXIT_REASON_CR_ACCESS 28
-#define EXIT_REASON_DR_ACCESS 29
-#define EXIT_REASON_IO_INSTRUCTION 30
-#define EXIT_REASON_MSR_READ 31
-#define EXIT_REASON_MSR_WRITE 32
-#define EXIT_REASON_INVALID_STATE 33
-#define EXIT_REASON_MWAIT_INSTRUCTION 36
-#define EXIT_REASON_MONITOR_INSTRUCTION 39
-#define EXIT_REASON_PAUSE_INSTRUCTION 40
-#define EXIT_REASON_MCE_DURING_VMENTRY 41
-#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
-#define EXIT_REASON_APIC_ACCESS 44
-#define EXIT_REASON_EOI_INDUCED 45
-#define EXIT_REASON_EPT_VIOLATION 48
-#define EXIT_REASON_EPT_MISCONFIG 49
-#define EXIT_REASON_INVEPT 50
-#define EXIT_REASON_RDTSCP 51
-#define EXIT_REASON_PREEMPTION_TIMER 52
-#define EXIT_REASON_INVVPID 53
-#define EXIT_REASON_WBINVD 54
-#define EXIT_REASON_XSETBV 55
-#define EXIT_REASON_APIC_WRITE 56
-#define EXIT_REASON_INVPCID 58
-#define EXIT_REASON_PML_FULL 62
-#define EXIT_REASON_XSAVES 63
-#define EXIT_REASON_XRSTORS 64
-#define LAST_EXIT_REASON 64
enum vmcs_field {
VIRTUAL_PROCESSOR_ID = 0x00000000,
@@ -208,6 +161,8 @@ enum vmcs_field {
VMWRITE_BITMAP_HIGH = 0x00002029,
XSS_EXIT_BITMAP = 0x0000202C,
XSS_EXIT_BITMAP_HIGH = 0x0000202D,
+ ENCLS_EXITING_BITMAP = 0x0000202E,
+ ENCLS_EXITING_BITMAP_HIGH = 0x0000202F,
TSC_MULTIPLIER = 0x00002032,
TSC_MULTIPLIER_HIGH = 0x00002033,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
@@ -617,6 +572,7 @@ void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t memslot);
void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
uint64_t addr, uint64_t size);
+bool kvm_vm_has_ept(struct kvm_vm *vm);
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot);
void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index 71ade6100fd3..2bd25b191d15 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -22,7 +22,7 @@ static void test_dump_stack(void)
* Build and run this command:
*
* addr2line -s -e /proc/$PPID/exe -fpai {backtrace addresses} | \
- * grep -v test_dump_stack | cat -n 1>&2
+ * cat -n 1>&2
*
* Note that the spacing is different and there's no newline.
*/
@@ -36,18 +36,24 @@ static void test_dump_stack(void)
n * (((sizeof(void *)) * 2) + 1) +
/* Null terminator: */
1];
- char *c;
+ char *c = cmd;
n = backtrace(stack, n);
- c = &cmd[0];
- c += sprintf(c, "%s", addr2line);
/*
- * Skip the first 3 frames: backtrace, test_dump_stack, and
- * test_assert. We hope that backtrace isn't inlined and the other two
- * we've declared noinline.
+ * Skip the first 2 frames, which should be test_dump_stack() and
+ * test_assert(); both of which are declared noinline. Bail if the
+ * resulting stack trace would be empty. Otherwise, addr2line will block
+ * waiting for addresses to be passed in via stdin.
*/
+ if (n <= 2) {
+ fputs(" (stack trace empty)\n", stderr);
+ return;
+ }
+
+ c += sprintf(c, "%s", addr2line);
for (i = 2; i < n; i++)
c += sprintf(c, " %lx", ((unsigned long) stack[i]) - 1);
+
c += sprintf(c, "%s", pipeline);
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-result"
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 411a4c0bc81c..f1cb1627161f 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -50,6 +50,45 @@ int open_kvm_dev_path_or_exit(void)
return _open_kvm_dev_path_or_exit(O_RDONLY);
}
+static bool get_module_param_bool(const char *module_name, const char *param)
+{
+ const int path_size = 128;
+ char path[path_size];
+ char value;
+ ssize_t r;
+ int fd;
+
+ r = snprintf(path, path_size, "/sys/module/%s/parameters/%s",
+ module_name, param);
+ TEST_ASSERT(r < path_size,
+ "Failed to construct sysfs path in %d bytes.", path_size);
+
+ fd = open_path_or_exit(path, O_RDONLY);
+
+ r = read(fd, &value, 1);
+ TEST_ASSERT(r == 1, "read(%s) failed", path);
+
+ r = close(fd);
+ TEST_ASSERT(!r, "close(%s) failed", path);
+
+ if (value == 'Y')
+ return true;
+ else if (value == 'N')
+ return false;
+
+ TEST_FAIL("Unrecognized value '%c' for boolean module param", value);
+}
+
+bool get_kvm_intel_param_bool(const char *param)
+{
+ return get_module_param_bool("kvm_intel", param);
+}
+
+bool get_kvm_amd_param_bool(const char *param)
+{
+ return get_module_param_bool("kvm_amd", param);
+}
+
/*
* Capability
*
diff --git a/tools/testing/selftests/kvm/lib/string_override.c b/tools/testing/selftests/kvm/lib/string_override.c
new file mode 100644
index 000000000000..632398adc229
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/string_override.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <stddef.h>
+
+/*
+ * Override the "basic" built-in string helpers so that they can be used in
+ * guest code. KVM selftests don't support dynamic loading in guest code and
+ * will jump into the weeds if the compiler decides to insert an out-of-line
+ * call via the PLT.
+ */
+int memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) {
+ if ((res = *su1 - *su2) != 0)
+ break;
+ }
+ return res;
+}
+
+void *memcpy(void *dest, const void *src, size_t count)
+{
+ char *tmp = dest;
+ const char *s = src;
+
+ while (count--)
+ *tmp++ = *s++;
+ return dest;
+}
+
+void *memset(void *s, int c, size_t count)
+{
+ char *xs = s;
+
+ while (count--)
+ *xs++ = c;
+ return s;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index 2e6e61bbe81b..39c4409ef56a 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -111,6 +111,14 @@ static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
}
}
+bool kvm_is_tdp_enabled(void)
+{
+ if (is_intel_cpu())
+ return get_kvm_intel_param_bool("ept");
+ else
+ return get_kvm_amd_param_bool("npt");
+}
+
void virt_arch_pgd_alloc(struct kvm_vm *vm)
{
TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
@@ -214,6 +222,25 @@ void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
__virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
}
+void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+ uint64_t nr_bytes, int level)
+{
+ uint64_t pg_size = PG_LEVEL_SIZE(level);
+ uint64_t nr_pages = nr_bytes / pg_size;
+ int i;
+
+ TEST_ASSERT(nr_bytes % pg_size == 0,
+ "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx",
+ nr_bytes, pg_size);
+
+ for (i = 0; i < nr_pages; i++) {
+ __virt_pg_map(vm, vaddr, paddr, level);
+
+ vaddr += pg_size;
+ paddr += pg_size;
+ }
+}
+
static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm,
struct kvm_vcpu *vcpu,
uint64_t vaddr)
@@ -1294,20 +1321,9 @@ done:
/* Returns true if kvm_intel was loaded with unrestricted_guest=1. */
bool vm_is_unrestricted_guest(struct kvm_vm *vm)
{
- char val = 'N';
- size_t count;
- FILE *f;
-
/* Ensure that a KVM vendor-specific module is loaded. */
if (vm == NULL)
close(open_kvm_dev_path_or_exit());
- f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
- if (f) {
- count = fread(&val, sizeof(char), 1, f);
- TEST_ASSERT(count == 1, "Unable to read from param file.");
- fclose(f);
- }
-
- return val == 'Y';
+ return get_kvm_intel_param_bool("unrestricted_guest");
}
diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c
index 6d445886e16c..5495a92dfd5a 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/svm.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c
@@ -60,18 +60,6 @@ static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
seg->base = base;
}
-/*
- * Avoid using memset to clear the vmcb, since libc may not be
- * available in L1 (and, even if it is, features that libc memset may
- * want to use, like AVX, may not be enabled).
- */
-static void clear_vmcb(struct vmcb *vmcb)
-{
- int n = sizeof(*vmcb) / sizeof(u32);
-
- asm volatile ("rep stosl" : "+c"(n), "+D"(vmcb) : "a"(0) : "memory");
-}
-
void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp)
{
struct vmcb *vmcb = svm->vmcb;
@@ -88,7 +76,7 @@ void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_r
wrmsr(MSR_EFER, efer | EFER_SVME);
wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa);
- clear_vmcb(vmcb);
+ memset(vmcb, 0, sizeof(*vmcb));
asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory");
vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr);
vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr);
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 80a568c439b8..d21049c38fc5 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -5,6 +5,8 @@
* Copyright (C) 2018, Google LLC.
*/
+#include <asm/msr-index.h>
+
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
@@ -542,9 +544,27 @@ void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
__nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
}
+bool kvm_vm_has_ept(struct kvm_vm *vm)
+{
+ struct kvm_vcpu *vcpu;
+ uint64_t ctrl;
+
+ vcpu = list_first_entry(&vm->vcpus, struct kvm_vcpu, list);
+ TEST_ASSERT(vcpu, "Cannot determine EPT support without vCPUs.\n");
+
+ ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
+ if (!(ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
+ return false;
+
+ ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_PROCBASED_CTLS2) >> 32;
+ return ctrl & SECONDARY_EXEC_ENABLE_EPT;
+}
+
void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
uint32_t eptp_memslot)
{
+ TEST_REQUIRE(kvm_vm_has_ept(vm));
+
vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c
index fac248a43666..6f88da7e60be 100644
--- a/tools/testing/selftests/kvm/rseq_test.c
+++ b/tools/testing/selftests/kvm/rseq_test.c
@@ -227,7 +227,7 @@ int main(int argc, char *argv[])
ucall_init(vm, NULL);
pthread_create(&migration_thread, NULL, migration_worker,
- (void *)(unsigned long)gettid());
+ (void *)(unsigned long)syscall(SYS_gettid));
for (i = 0; !done; i++) {
vcpu_run(vcpu);
diff --git a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
index b1905d280ef5..32f7e09ef67c 100644
--- a/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
+++ b/tools/testing/selftests/kvm/x86_64/fix_hypercall_test.c
@@ -14,89 +14,73 @@
#include "kvm_util.h"
#include "processor.h"
-static bool ud_expected;
+/* VMCALL and VMMCALL are both 3-byte opcodes. */
+#define HYPERCALL_INSN_SIZE 3
+
+static bool quirk_disabled;
static void guest_ud_handler(struct ex_regs *regs)
{
- GUEST_ASSERT(ud_expected);
- GUEST_DONE();
+ regs->rax = -EFAULT;
+ regs->rip += HYPERCALL_INSN_SIZE;
}
-extern unsigned char svm_hypercall_insn;
-static uint64_t svm_do_sched_yield(uint8_t apic_id)
-{
- uint64_t ret;
-
- asm volatile("mov %1, %%rax\n\t"
- "mov %2, %%rbx\n\t"
- "svm_hypercall_insn:\n\t"
- "vmmcall\n\t"
- "mov %%rax, %0\n\t"
- : "=r"(ret)
- : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id)
- : "rax", "rbx", "memory");
+static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 };
+static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 };
- return ret;
-}
-
-extern unsigned char vmx_hypercall_insn;
-static uint64_t vmx_do_sched_yield(uint8_t apic_id)
+extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE];
+static uint64_t do_sched_yield(uint8_t apic_id)
{
uint64_t ret;
- asm volatile("mov %1, %%rax\n\t"
- "mov %2, %%rbx\n\t"
- "vmx_hypercall_insn:\n\t"
- "vmcall\n\t"
- "mov %%rax, %0\n\t"
- : "=r"(ret)
- : "r"((uint64_t)KVM_HC_SCHED_YIELD), "r"((uint64_t)apic_id)
- : "rax", "rbx", "memory");
+ asm volatile("hypercall_insn:\n\t"
+ ".byte 0xcc,0xcc,0xcc\n\t"
+ : "=a"(ret)
+ : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
+ : "memory");
return ret;
}
-static void assert_hypercall_insn(unsigned char *exp_insn, unsigned char *obs_insn)
-{
- uint32_t exp = 0, obs = 0;
-
- memcpy(&exp, exp_insn, sizeof(exp));
- memcpy(&obs, obs_insn, sizeof(obs));
-
- GUEST_ASSERT_EQ(exp, obs);
-}
-
static void guest_main(void)
{
- unsigned char *native_hypercall_insn, *hypercall_insn;
- uint8_t apic_id;
-
- apic_id = GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID));
+ const uint8_t *native_hypercall_insn;
+ const uint8_t *other_hypercall_insn;
+ uint64_t ret;
if (is_intel_cpu()) {
- native_hypercall_insn = &vmx_hypercall_insn;
- hypercall_insn = &svm_hypercall_insn;
- svm_do_sched_yield(apic_id);
+ native_hypercall_insn = vmx_vmcall;
+ other_hypercall_insn = svm_vmmcall;
} else if (is_amd_cpu()) {
- native_hypercall_insn = &svm_hypercall_insn;
- hypercall_insn = &vmx_hypercall_insn;
- vmx_do_sched_yield(apic_id);
+ native_hypercall_insn = svm_vmmcall;
+ other_hypercall_insn = vmx_vmcall;
} else {
GUEST_ASSERT(0);
/* unreachable */
return;
}
- GUEST_ASSERT(!ud_expected);
- assert_hypercall_insn(native_hypercall_insn, hypercall_insn);
- GUEST_DONE();
-}
+ memcpy(hypercall_insn, other_hypercall_insn, HYPERCALL_INSN_SIZE);
-static void setup_ud_vector(struct kvm_vcpu *vcpu)
-{
- vm_init_descriptor_tables(vcpu->vm);
- vcpu_init_descriptor_tables(vcpu);
- vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
+ ret = do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)));
+
+ /*
+ * If the quirk is disabled, verify that guest_ud_handler() "returned"
+ * -EFAULT and that KVM did NOT patch the hypercall. If the quirk is
+ * enabled, verify that the hypercall succeeded and that KVM patched in
+ * the "right" hypercall.
+ */
+ if (quirk_disabled) {
+ GUEST_ASSERT(ret == (uint64_t)-EFAULT);
+ GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn,
+ HYPERCALL_INSN_SIZE));
+ } else {
+ GUEST_ASSERT(!ret);
+ GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn,
+ HYPERCALL_INSN_SIZE));
+ }
+
+ GUEST_DONE();
}
static void enter_guest(struct kvm_vcpu *vcpu)
@@ -119,35 +103,23 @@ static void enter_guest(struct kvm_vcpu *vcpu)
}
}
-static void test_fix_hypercall(void)
+static void test_fix_hypercall(bool disable_quirk)
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
vm = vm_create_with_one_vcpu(&vcpu, guest_main);
- setup_ud_vector(vcpu);
-
- ud_expected = false;
- sync_global_to_guest(vm, ud_expected);
-
- virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
-
- enter_guest(vcpu);
-}
-
-static void test_fix_hypercall_disabled(void)
-{
- struct kvm_vcpu *vcpu;
- struct kvm_vm *vm;
- vm = vm_create_with_one_vcpu(&vcpu, guest_main);
- setup_ud_vector(vcpu);
+ vm_init_descriptor_tables(vcpu->vm);
+ vcpu_init_descriptor_tables(vcpu);
+ vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
- vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
- KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
+ if (disable_quirk)
+ vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
+ KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
- ud_expected = true;
- sync_global_to_guest(vm, ud_expected);
+ quirk_disabled = disable_quirk;
+ sync_global_to_guest(vm, quirk_disabled);
virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
@@ -158,6 +130,6 @@ int main(void)
{
TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
- test_fix_hypercall();
- test_fix_hypercall_disabled();
+ test_fix_hypercall(false);
+ test_fix_hypercall(true);
}
diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
index 79ab0152d281..05b32e550a80 100644
--- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c
+++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c
@@ -26,7 +26,8 @@ static inline uint8_t hypercall(u64 control, vm_vaddr_t input_address,
: "=a" (*hv_status),
"+c" (control), "+d" (input_address),
KVM_ASM_SAFE_OUTPUTS(vector)
- : [output_address] "r"(output_address)
+ : [output_address] "r"(output_address),
+ "a" (-EFAULT)
: "cc", "memory", "r8", KVM_ASM_SAFE_CLOBBERS);
return vector;
}
@@ -81,13 +82,13 @@ static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
}
vector = hypercall(hcall->control, input, output, &res);
- if (hcall->ud_expected)
+ if (hcall->ud_expected) {
GUEST_ASSERT_2(vector == UD_VECTOR, hcall->control, vector);
- else
+ } else {
GUEST_ASSERT_2(!vector, hcall->control, vector);
+ GUEST_ASSERT_2(res == hcall->expect, hcall->expect, res);
+ }
- GUEST_ASSERT_2(!hcall->ud_expected || res == hcall->expect,
- hcall->expect, res);
GUEST_DONE();
}
@@ -507,7 +508,7 @@ static void guest_test_hcalls_access(void)
switch (stage) {
case 0:
feat->eax |= HV_MSR_HYPERCALL_AVAILABLE;
- hcall->control = 0xdeadbeef;
+ hcall->control = 0xbeef;
hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
diff --git a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c
new file mode 100644
index 000000000000..ac33835f78f4
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define _GNU_SOURCE /* for program_invocation_short_name */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+#include "svm_util.h"
+
+#define L2_GUEST_STACK_SIZE 256
+
+/*
+ * Arbitrary, never shoved into KVM/hardware, just need to avoid conflict with
+ * the "real" exceptions used, #SS/#GP/#DF (12/13/8).
+ */
+#define FAKE_TRIPLE_FAULT_VECTOR 0xaa
+
+/* Arbitrary 32-bit error code injected by this test. */
+#define SS_ERROR_CODE 0xdeadbeef
+
+/*
+ * Bit '0' is set on Intel if the exception occurs while delivering a previous
+ * event/exception. AMD's wording is ambiguous, but presumably the bit is set
+ * if the exception occurs while delivering an external event, e.g. NMI or INTR,
+ * but not for exceptions that occur when delivering other exceptions or
+ * software interrupts.
+ *
+ * Note, Intel's name for it, "External event", is misleading and much more
+ * aligned with AMD's behavior, but the SDM is quite clear on its behavior.
+ */
+#define ERROR_CODE_EXT_FLAG BIT(0)
+
+/*
+ * Bit '1' is set if the fault occurred when looking up a descriptor in the
+ * IDT, which is the case here as the IDT is empty/NULL.
+ */
+#define ERROR_CODE_IDT_FLAG BIT(1)
+
+/*
+ * The #GP that occurs when vectoring #SS should show the index into the IDT
+ * for #SS, plus have the "IDT flag" set.
+ */
+#define GP_ERROR_CODE_AMD ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG)
+#define GP_ERROR_CODE_INTEL ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG | ERROR_CODE_EXT_FLAG)
+
+/*
+ * Intel and AMD both shove '0' into the error code on #DF, regardless of what
+ * led to the double fault.
+ */
+#define DF_ERROR_CODE 0
+
+#define INTERCEPT_SS (BIT_ULL(SS_VECTOR))
+#define INTERCEPT_SS_DF (INTERCEPT_SS | BIT_ULL(DF_VECTOR))
+#define INTERCEPT_SS_GP_DF (INTERCEPT_SS_DF | BIT_ULL(GP_VECTOR))
+
+static void l2_ss_pending_test(void)
+{
+ GUEST_SYNC(SS_VECTOR);
+}
+
+static void l2_ss_injected_gp_test(void)
+{
+ GUEST_SYNC(GP_VECTOR);
+}
+
+static void l2_ss_injected_df_test(void)
+{
+ GUEST_SYNC(DF_VECTOR);
+}
+
+static void l2_ss_injected_tf_test(void)
+{
+ GUEST_SYNC(FAKE_TRIPLE_FAULT_VECTOR);
+}
+
+static void svm_run_l2(struct svm_test_data *svm, void *l2_code, int vector,
+ uint32_t error_code)
+{
+ struct vmcb *vmcb = svm->vmcb;
+ struct vmcb_control_area *ctrl = &vmcb->control;
+
+ vmcb->save.rip = (u64)l2_code;
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ if (vector == FAKE_TRIPLE_FAULT_VECTOR)
+ return;
+
+ GUEST_ASSERT_EQ(ctrl->exit_code, (SVM_EXIT_EXCP_BASE + vector));
+ GUEST_ASSERT_EQ(ctrl->exit_info_1, error_code);
+}
+
+static void l1_svm_code(struct svm_test_data *svm)
+{
+ struct vmcb_control_area *ctrl = &svm->vmcb->control;
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ generic_svm_setup(svm, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ svm->vmcb->save.idtr.limit = 0;
+ ctrl->intercept |= BIT_ULL(INTERCEPT_SHUTDOWN);
+
+ ctrl->intercept_exceptions = INTERCEPT_SS_GP_DF;
+ svm_run_l2(svm, l2_ss_pending_test, SS_VECTOR, SS_ERROR_CODE);
+ svm_run_l2(svm, l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_AMD);
+
+ ctrl->intercept_exceptions = INTERCEPT_SS_DF;
+ svm_run_l2(svm, l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
+
+ ctrl->intercept_exceptions = INTERCEPT_SS;
+ svm_run_l2(svm, l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
+ GUEST_ASSERT_EQ(ctrl->exit_code, SVM_EXIT_SHUTDOWN);
+
+ GUEST_DONE();
+}
+
+static void vmx_run_l2(void *l2_code, int vector, uint32_t error_code)
+{
+ GUEST_ASSERT(!vmwrite(GUEST_RIP, (u64)l2_code));
+
+ GUEST_ASSERT_EQ(vector == SS_VECTOR ? vmlaunch() : vmresume(), 0);
+
+ if (vector == FAKE_TRIPLE_FAULT_VECTOR)
+ return;
+
+ GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_EXCEPTION_NMI);
+ GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO) & 0xff), vector);
+ GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE), error_code);
+}
+
+static void l1_vmx_code(struct vmx_pages *vmx)
+{
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx), true);
+
+ GUEST_ASSERT_EQ(load_vmcs(vmx), true);
+
+ prepare_vmcs(vmx, NULL, &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+ GUEST_ASSERT_EQ(vmwrite(GUEST_IDTR_LIMIT, 0), 0);
+
+ /*
+ * VMX disallows injecting an exception with error_code[31:16] != 0,
+ * and hardware will never generate a VM-Exit with bits 31:16 set.
+ * KVM should likewise truncate the "bad" userspace value.
+ */
+ GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_GP_DF), 0);
+ vmx_run_l2(l2_ss_pending_test, SS_VECTOR, (u16)SS_ERROR_CODE);
+ vmx_run_l2(l2_ss_injected_gp_test, GP_VECTOR, GP_ERROR_CODE_INTEL);
+
+ GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS_DF), 0);
+ vmx_run_l2(l2_ss_injected_df_test, DF_VECTOR, DF_ERROR_CODE);
+
+ GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP, INTERCEPT_SS), 0);
+ vmx_run_l2(l2_ss_injected_tf_test, FAKE_TRIPLE_FAULT_VECTOR, 0);
+ GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON), EXIT_REASON_TRIPLE_FAULT);
+
+ GUEST_DONE();
+}
+
+static void __attribute__((__flatten__)) l1_guest_code(void *test_data)
+{
+ if (this_cpu_has(X86_FEATURE_SVM))
+ l1_svm_code(test_data);
+ else
+ l1_vmx_code(test_data);
+}
+
+static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector)
+{
+ struct kvm_run *run = vcpu->run;
+ struct ucall uc;
+
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason, exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vcpu, &uc)) {
+ case UCALL_SYNC:
+ TEST_ASSERT(vector == uc.args[1],
+ "Expected L2 to ask for %d, got %ld", vector, uc.args[1]);
+ break;
+ case UCALL_DONE:
+ TEST_ASSERT(vector == -1,
+ "Expected L2 to ask for %d, L2 says it's done", vector);
+ break;
+ case UCALL_ABORT:
+ TEST_FAIL("%s at %s:%ld (0x%lx != 0x%lx)",
+ (const char *)uc.args[0], __FILE__, uc.args[1],
+ uc.args[2], uc.args[3]);
+ break;
+ default:
+ TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector, uc.cmd);
+ }
+}
+
+static void queue_ss_exception(struct kvm_vcpu *vcpu, bool inject)
+{
+ struct kvm_vcpu_events events;
+
+ vcpu_events_get(vcpu, &events);
+
+ TEST_ASSERT(!events.exception.pending,
+ "Vector %d unexpectedlt pending", events.exception.nr);
+ TEST_ASSERT(!events.exception.injected,
+ "Vector %d unexpectedly injected", events.exception.nr);
+
+ events.flags = KVM_VCPUEVENT_VALID_PAYLOAD;
+ events.exception.pending = !inject;
+ events.exception.injected = inject;
+ events.exception.nr = SS_VECTOR;
+ events.exception.has_error_code = true;
+ events.exception.error_code = SS_ERROR_CODE;
+ vcpu_events_set(vcpu, &events);
+}
+
+/*
+ * Verify KVM_{G,S}ET_EVENTS play nice with pending vs. injected exceptions
+ * when an exception is being queued for L2. Specifically, verify that KVM
+ * honors L1 exception intercept controls when a #SS is pending/injected,
+ * triggers a #GP on vectoring the #SS, morphs to #DF if #GP isn't intercepted
+ * by L1, and finally causes (nested) SHUTDOWN if #DF isn't intercepted by L1.
+ */
+int main(int argc, char *argv[])
+{
+ vm_vaddr_t nested_test_data_gva;
+ struct kvm_vcpu_events events;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXCEPTION_PAYLOAD));
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM) || kvm_cpu_has(X86_FEATURE_VMX));
+
+ vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
+ vm_enable_cap(vm, KVM_CAP_EXCEPTION_PAYLOAD, -2ul);
+
+ if (kvm_cpu_has(X86_FEATURE_SVM))
+ vcpu_alloc_svm(vm, &nested_test_data_gva);
+ else
+ vcpu_alloc_vmx(vm, &nested_test_data_gva);
+
+ vcpu_args_set(vcpu, 1, nested_test_data_gva);
+
+ /* Run L1 => L2. L2 should sync and request #SS. */
+ vcpu_run(vcpu);
+ assert_ucall_vector(vcpu, SS_VECTOR);
+
+ /* Pend #SS and request immediate exit. #SS should still be pending. */
+ queue_ss_exception(vcpu, false);
+ vcpu->run->immediate_exit = true;
+ vcpu_run_complete_io(vcpu);
+
+ /* Verify the pending events comes back out the same as it went in. */
+ vcpu_events_get(vcpu, &events);
+ ASSERT_EQ(events.flags & KVM_VCPUEVENT_VALID_PAYLOAD,
+ KVM_VCPUEVENT_VALID_PAYLOAD);
+ ASSERT_EQ(events.exception.pending, true);
+ ASSERT_EQ(events.exception.nr, SS_VECTOR);
+ ASSERT_EQ(events.exception.has_error_code, true);
+ ASSERT_EQ(events.exception.error_code, SS_ERROR_CODE);
+
+ /*
+ * Run for real with the pending #SS, L1 should get a VM-Exit due to
+ * #SS interception and re-enter L2 to request #GP (via injected #SS).
+ */
+ vcpu->run->immediate_exit = false;
+ vcpu_run(vcpu);
+ assert_ucall_vector(vcpu, GP_VECTOR);
+
+ /*
+ * Inject #SS, the #SS should bypass interception and cause #GP, which
+ * L1 should intercept before KVM morphs it to #DF. L1 should then
+ * disable #GP interception and run L2 to request #DF (via #SS => #GP).
+ */
+ queue_ss_exception(vcpu, true);
+ vcpu_run(vcpu);
+ assert_ucall_vector(vcpu, DF_VECTOR);
+
+ /*
+ * Inject #SS, the #SS should bypass interception and cause #GP, which
+ * L1 is no longer interception, and so should see a #DF VM-Exit. L1
+ * should then signal that is done.
+ */
+ queue_ss_exception(vcpu, true);
+ vcpu_run(vcpu);
+ assert_ucall_vector(vcpu, FAKE_TRIPLE_FAULT_VECTOR);
+
+ /*
+ * Inject #SS yet again. L1 is not intercepting #GP or #DF, and so
+ * should see nested TRIPLE_FAULT / SHUTDOWN.
+ */
+ queue_ss_exception(vcpu, true);
+ vcpu_run(vcpu);
+ assert_ucall_vector(vcpu, -1);
+
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
index cc6421716400..59ffe7fd354f 100644
--- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
+++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c
@@ -112,19 +112,13 @@ void run_test(int reclaim_period_ms, bool disable_nx_huge_pages,
{
struct kvm_vcpu *vcpu;
struct kvm_vm *vm;
+ uint64_t nr_bytes;
void *hva;
int r;
vm = vm_create(1);
if (disable_nx_huge_pages) {
- /*
- * Cannot run the test without NX huge pages if the kernel
- * does not support it.
- */
- if (!kvm_check_cap(KVM_CAP_VM_DISABLE_NX_HUGE_PAGES))
- return;
-
r = __vm_disable_nx_huge_pages(vm);
if (reboot_permissions) {
TEST_ASSERT(!r, "Disabling NX huge pages should succeed if process has reboot permissions");
@@ -141,10 +135,24 @@ void run_test(int reclaim_period_ms, bool disable_nx_huge_pages,
HPAGE_GPA, HPAGE_SLOT,
HPAGE_SLOT_NPAGES, 0);
- virt_map(vm, HPAGE_GVA, HPAGE_GPA, HPAGE_SLOT_NPAGES);
+ nr_bytes = HPAGE_SLOT_NPAGES * vm->page_size;
+
+ /*
+ * Ensure that KVM can map HPAGE_SLOT with huge pages by mapping the
+ * region into the guest with 2MiB pages whenever TDP is disabled (i.e.
+ * whenever KVM is shadowing the guest page tables).
+ *
+ * When TDP is enabled, KVM should be able to map HPAGE_SLOT with huge
+ * pages irrespective of the guest page size, so map with 4KiB pages
+ * to test that that is the case.
+ */
+ if (kvm_is_tdp_enabled())
+ virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_4K);
+ else
+ virt_map_level(vm, HPAGE_GVA, HPAGE_GPA, nr_bytes, PG_LEVEL_2M);
hva = addr_gpa2hva(vm, HPAGE_GPA);
- memset(hva, RETURN_OPCODE, HPAGE_SLOT_NPAGES * PAGE_SIZE);
+ memset(hva, RETURN_OPCODE, nr_bytes);
check_2m_page_count(vm, 0);
check_split_count(vm, 0);
@@ -248,18 +256,13 @@ int main(int argc, char **argv)
}
}
- if (token != MAGIC_TOKEN) {
- print_skip("This test must be run with the magic token %d.\n"
- "This is done by nx_huge_pages_test.sh, which\n"
- "also handles environment setup for the test.",
- MAGIC_TOKEN);
- exit(KSFT_SKIP);
- }
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_DISABLE_NX_HUGE_PAGES));
+ TEST_REQUIRE(reclaim_period_ms > 0);
- if (!reclaim_period_ms) {
- print_skip("The NX reclaim period must be specified and non-zero");
- exit(KSFT_SKIP);
- }
+ __TEST_REQUIRE(token == MAGIC_TOKEN,
+ "This test must be run with the magic token %d.\n"
+ "This is done by nx_huge_pages_test.sh, which\n"
+ "also handles environment setup for the test.");
run_test(reclaim_period_ms, false, reboot_permissions);
run_test(reclaim_period_ms, true, reboot_permissions);
diff --git a/tools/testing/selftests/landlock/fs_test.c b/tools/testing/selftests/landlock/fs_test.c
index 21a2ce8fa739..45de42a027c5 100644
--- a/tools/testing/selftests/landlock/fs_test.c
+++ b/tools/testing/selftests/landlock/fs_test.c
@@ -4,7 +4,7 @@
*
* Copyright © 2017-2020 Mickaël Salaün <mic@digikod.net>
* Copyright © 2020 ANSSI
- * Copyright © 2020-2021 Microsoft Corporation
+ * Copyright © 2020-2022 Microsoft Corporation
*/
#define _GNU_SOURCE
@@ -371,6 +371,13 @@ TEST_F_FORK(layout1, inval)
ASSERT_EQ(EINVAL, errno);
path_beneath.allowed_access &= ~LANDLOCK_ACCESS_FS_EXECUTE;
+ /* Tests with denied-by-default access right. */
+ path_beneath.allowed_access |= LANDLOCK_ACCESS_FS_REFER;
+ ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
+ &path_beneath, 0));
+ ASSERT_EQ(EINVAL, errno);
+ path_beneath.allowed_access &= ~LANDLOCK_ACCESS_FS_REFER;
+
/* Test with unknown (64-bits) value. */
path_beneath.allowed_access |= (1ULL << 60);
ASSERT_EQ(-1, landlock_add_rule(ruleset_fd, LANDLOCK_RULE_PATH_BENEATH,
@@ -1826,6 +1833,20 @@ TEST_F_FORK(layout1, link)
ASSERT_EQ(0, link(file1_s1d3, file2_s1d3));
}
+static int test_rename(const char *const oldpath, const char *const newpath)
+{
+ if (rename(oldpath, newpath))
+ return errno;
+ return 0;
+}
+
+static int test_exchange(const char *const oldpath, const char *const newpath)
+{
+ if (renameat2(AT_FDCWD, oldpath, AT_FDCWD, newpath, RENAME_EXCHANGE))
+ return errno;
+ return 0;
+}
+
TEST_F_FORK(layout1, rename_file)
{
const struct rule rules[] = {
@@ -1867,10 +1888,10 @@ TEST_F_FORK(layout1, rename_file)
* to a different directory (which allows file removal).
*/
ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3));
- ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, file1_s2d1, AT_FDCWD, file1_s1d3,
RENAME_EXCHANGE));
- ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, renameat2(AT_FDCWD, dir_s2d2, AT_FDCWD, file1_s1d3,
RENAME_EXCHANGE));
ASSERT_EQ(EXDEV, errno);
@@ -1894,7 +1915,7 @@ TEST_F_FORK(layout1, rename_file)
ASSERT_EQ(EXDEV, errno);
ASSERT_EQ(0, unlink(file1_s1d3));
ASSERT_EQ(-1, rename(file1_s2d1, file1_s1d3));
- ASSERT_EQ(EXDEV, errno);
+ ASSERT_EQ(EACCES, errno);
/* Exchanges and renames files with same parent. */
ASSERT_EQ(0, renameat2(AT_FDCWD, file2_s2d3, AT_FDCWD, file1_s2d3,
@@ -2014,6 +2035,115 @@ TEST_F_FORK(layout1, reparent_refer)
ASSERT_EQ(0, rename(dir_s1d3, dir_s2d3));
}
+/* Checks renames beneath dir_s1d1. */
+static void refer_denied_by_default(struct __test_metadata *const _metadata,
+ const struct rule layer1[],
+ const int layer1_err,
+ const struct rule layer2[])
+{
+ int ruleset_fd;
+
+ ASSERT_EQ(0, unlink(file1_s1d2));
+
+ ruleset_fd = create_ruleset(_metadata, layer1[0].access, layer1);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /*
+ * If the first layer handles LANDLOCK_ACCESS_FS_REFER (according to
+ * layer1_err), then it allows some different-parent renames and links.
+ */
+ ASSERT_EQ(layer1_err, test_rename(file1_s1d1, file1_s1d2));
+ if (layer1_err == 0)
+ ASSERT_EQ(layer1_err, test_rename(file1_s1d2, file1_s1d1));
+ ASSERT_EQ(layer1_err, test_exchange(file2_s1d1, file2_s1d2));
+ ASSERT_EQ(layer1_err, test_exchange(file2_s1d2, file2_s1d1));
+
+ ruleset_fd = create_ruleset(_metadata, layer2[0].access, layer2);
+ ASSERT_LE(0, ruleset_fd);
+ enforce_ruleset(_metadata, ruleset_fd);
+ ASSERT_EQ(0, close(ruleset_fd));
+
+ /*
+ * Now, either the first or the second layer does not handle
+ * LANDLOCK_ACCESS_FS_REFER, which means that any different-parent
+ * renames and links are denied, thus making the layer handling
+ * LANDLOCK_ACCESS_FS_REFER null and void.
+ */
+ ASSERT_EQ(EXDEV, test_rename(file1_s1d1, file1_s1d2));
+ ASSERT_EQ(EXDEV, test_exchange(file2_s1d1, file2_s1d2));
+ ASSERT_EQ(EXDEV, test_exchange(file2_s1d2, file2_s1d1));
+}
+
+const struct rule layer_dir_s1d1_refer[] = {
+ {
+ .path = dir_s1d1,
+ .access = LANDLOCK_ACCESS_FS_REFER,
+ },
+ {},
+};
+
+const struct rule layer_dir_s1d1_execute[] = {
+ {
+ /* Matches a parent directory. */
+ .path = dir_s1d1,
+ .access = LANDLOCK_ACCESS_FS_EXECUTE,
+ },
+ {},
+};
+
+const struct rule layer_dir_s2d1_execute[] = {
+ {
+ /* Does not match a parent directory. */
+ .path = dir_s2d1,
+ .access = LANDLOCK_ACCESS_FS_EXECUTE,
+ },
+ {},
+};
+
+/*
+ * Tests precedence over renames: denied by default for different parent
+ * directories, *with* a rule matching a parent directory, but not directly
+ * denying access (with MAKE_REG nor REMOVE).
+ */
+TEST_F_FORK(layout1, refer_denied_by_default1)
+{
+ refer_denied_by_default(_metadata, layer_dir_s1d1_refer, 0,
+ layer_dir_s1d1_execute);
+}
+
+/*
+ * Same test but this time turning around the ABI version order: the first
+ * layer does not handle LANDLOCK_ACCESS_FS_REFER.
+ */
+TEST_F_FORK(layout1, refer_denied_by_default2)
+{
+ refer_denied_by_default(_metadata, layer_dir_s1d1_execute, EXDEV,
+ layer_dir_s1d1_refer);
+}
+
+/*
+ * Tests precedence over renames: denied by default for different parent
+ * directories, *without* a rule matching a parent directory, but not directly
+ * denying access (with MAKE_REG nor REMOVE).
+ */
+TEST_F_FORK(layout1, refer_denied_by_default3)
+{
+ refer_denied_by_default(_metadata, layer_dir_s1d1_refer, 0,
+ layer_dir_s2d1_execute);
+}
+
+/*
+ * Same test but this time turning around the ABI version order: the first
+ * layer does not handle LANDLOCK_ACCESS_FS_REFER.
+ */
+TEST_F_FORK(layout1, refer_denied_by_default4)
+{
+ refer_denied_by_default(_metadata, layer_dir_s2d1_execute, EXDEV,
+ layer_dir_s1d1_refer);
+}
+
TEST_F_FORK(layout1, reparent_link)
{
const struct rule layer1[] = {
@@ -2336,11 +2466,12 @@ TEST_F_FORK(layout1, reparent_exdev_layers_rename1)
ASSERT_EQ(EXDEV, errno);
/*
- * However, moving the file2_s1d3 file below dir_s2d3 is allowed
- * because it cannot inherit MAKE_REG nor MAKE_DIR rights (which are
- * dedicated to directories).
+ * Moving the file2_s1d3 file below dir_s2d3 is denied because the
+ * second layer does not handle REFER, which is always denied by
+ * default.
*/
- ASSERT_EQ(0, rename(file2_s1d3, file1_s2d3));
+ ASSERT_EQ(-1, rename(file2_s1d3, file1_s2d3));
+ ASSERT_EQ(EXDEV, errno);
}
TEST_F_FORK(layout1, reparent_exdev_layers_rename2)
@@ -2373,8 +2504,12 @@ TEST_F_FORK(layout1, reparent_exdev_layers_rename2)
ASSERT_EQ(EACCES, errno);
ASSERT_EQ(-1, rename(file1_s1d1, file1_s2d3));
ASSERT_EQ(EXDEV, errno);
- /* Modify layout! */
- ASSERT_EQ(0, rename(file2_s1d2, file1_s2d3));
+ /*
+ * Modifying the layout is now denied because the second layer does not
+ * handle REFER, which is always denied by default.
+ */
+ ASSERT_EQ(-1, rename(file2_s1d2, file1_s2d3));
+ ASSERT_EQ(EXDEV, errno);
/* Without REFER source, EACCES wins over EXDEV. */
ASSERT_EQ(-1, rename(dir_s1d1, file1_s2d2));
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 0e5751af6247..de7d5cc15f85 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -1,42 +1,42 @@
# SPDX-License-Identifier: GPL-2.0-only
+cmsg_sender
+fin_ack_lat
+gro
+hwtstamp_config
+ioam6_parser
+ip_defrag
ipsec
+ipv6_flowlabel
+ipv6_flowlabel_mgr
msg_zerocopy
-socket
+nettest
psock_fanout
psock_snd
psock_tpacket
-stress_reuseport_listen
+reuseaddr_conflict
+reuseaddr_ports_exhausted
reuseport_addr_any
reuseport_bpf
reuseport_bpf_cpu
reuseport_bpf_numa
reuseport_dualstack
-reuseaddr_conflict
-tcp_mmap
-udpgso
-udpgso_bench_rx
-udpgso_bench_tx
-tcp_inq
-tls
-txring_overwrite
-ip_defrag
-ipv6_flowlabel
-ipv6_flowlabel_mgr
-so_txtime
-tcp_fastopen_backup_key
-nettest
-fin_ack_lat
-reuseaddr_ports_exhausted
-hwtstamp_config
rxtimestamp
-timestamping
-txtimestamp
+socket
so_netns_cookie
+so_txtime
+stress_reuseport_listen
+tap
+tcp_fastopen_backup_key
+tcp_inq
+tcp_mmap
test_unix_oob
-gro
-ioam6_parser
+timestamping
+tls
toeplitz
tun
-cmsg_sender
+txring_overwrite
+txtimestamp
+udpgso
+udpgso_bench_rx
+udpgso_bench_tx
unix_connect
-tap \ No newline at end of file
diff --git a/tools/testing/selftests/net/io_uring_zerocopy_tx.c b/tools/testing/selftests/net/io_uring_zerocopy_tx.c
index 9d64c560a2d6..8ce48aca8321 100644
--- a/tools/testing/selftests/net/io_uring_zerocopy_tx.c
+++ b/tools/testing/selftests/net/io_uring_zerocopy_tx.c
@@ -47,7 +47,6 @@ enum {
MODE_MIXED = 3,
};
-static bool cfg_flush = false;
static bool cfg_cork = false;
static int cfg_mode = MODE_ZC_FIXED;
static int cfg_nr_reqs = 8;
@@ -166,21 +165,6 @@ static int io_uring_register_buffers(struct io_uring *ring,
return (ret < 0) ? -errno : ret;
}
-static int io_uring_register_notifications(struct io_uring *ring,
- unsigned nr,
- struct io_uring_notification_slot *slots)
-{
- int ret;
- struct io_uring_notification_register r = {
- .nr_slots = nr,
- .data = (unsigned long)slots,
- };
-
- ret = syscall(__NR_io_uring_register, ring->ring_fd,
- IORING_REGISTER_NOTIFIERS, &r, sizeof(r));
- return (ret < 0) ? -errno : ret;
-}
-
static int io_uring_mmap(int fd, struct io_uring_params *p,
struct io_uring_sq *sq, struct io_uring_cq *cq)
{
@@ -297,11 +281,10 @@ static inline void io_uring_prep_send(struct io_uring_sqe *sqe, int sockfd,
static inline void io_uring_prep_sendzc(struct io_uring_sqe *sqe, int sockfd,
const void *buf, size_t len, int flags,
- unsigned slot_idx, unsigned zc_flags)
+ unsigned zc_flags)
{
io_uring_prep_send(sqe, sockfd, buf, len, flags);
- sqe->opcode = (__u8) IORING_OP_SENDZC_NOTIF;
- sqe->notification_idx = slot_idx;
+ sqe->opcode = (__u8) IORING_OP_SEND_ZC;
sqe->ioprio = zc_flags;
}
@@ -374,7 +357,6 @@ static int do_setup_tx(int domain, int type, int protocol)
static void do_tx(int domain, int type, int protocol)
{
- struct io_uring_notification_slot b[1] = {{.tag = NOTIF_TAG}};
struct io_uring_sqe *sqe;
struct io_uring_cqe *cqe;
unsigned long packets = 0, bytes = 0;
@@ -390,10 +372,6 @@ static void do_tx(int domain, int type, int protocol)
if (ret)
error(1, ret, "io_uring: queue init");
- ret = io_uring_register_notifications(&ring, 1, b);
- if (ret)
- error(1, ret, "io_uring: tx ctx registration");
-
iov.iov_base = payload;
iov.iov_len = cfg_payload_len;
@@ -409,9 +387,8 @@ static void do_tx(int domain, int type, int protocol)
for (i = 0; i < cfg_nr_reqs; i++) {
unsigned zc_flags = 0;
unsigned buf_idx = 0;
- unsigned slot_idx = 0;
unsigned mode = cfg_mode;
- unsigned msg_flags = 0;
+ unsigned msg_flags = MSG_WAITALL;
if (cfg_mode == MODE_MIXED)
mode = rand() % 3;
@@ -423,13 +400,10 @@ static void do_tx(int domain, int type, int protocol)
cfg_payload_len, msg_flags);
sqe->user_data = NONZC_TAG;
} else {
- if (cfg_flush) {
- zc_flags |= IORING_RECVSEND_NOTIF_FLUSH;
- compl_cqes++;
- }
+ compl_cqes++;
io_uring_prep_sendzc(sqe, fd, payload,
cfg_payload_len,
- msg_flags, slot_idx, zc_flags);
+ msg_flags, zc_flags);
if (mode == MODE_ZC_FIXED) {
sqe->ioprio |= IORING_RECVSEND_FIXED_BUF;
sqe->buf_index = buf_idx;
@@ -442,51 +416,57 @@ static void do_tx(int domain, int type, int protocol)
if (ret != cfg_nr_reqs)
error(1, ret, "submit");
+ if (cfg_cork)
+ do_setsockopt(fd, IPPROTO_UDP, UDP_CORK, 0);
for (i = 0; i < cfg_nr_reqs; i++) {
ret = io_uring_wait_cqe(&ring, &cqe);
if (ret)
error(1, ret, "wait cqe");
- if (cqe->user_data == NOTIF_TAG) {
+ if (cqe->user_data != NONZC_TAG &&
+ cqe->user_data != ZC_TAG)
+ error(1, -EINVAL, "invalid cqe->user_data");
+
+ if (cqe->flags & IORING_CQE_F_NOTIF) {
+ if (cqe->flags & IORING_CQE_F_MORE)
+ error(1, -EINVAL, "invalid notif flags");
compl_cqes--;
i--;
- } else if (cqe->user_data != NONZC_TAG &&
- cqe->user_data != ZC_TAG) {
- error(1, cqe->res, "invalid user_data");
- } else if (cqe->res <= 0 && cqe->res != -EAGAIN) {
+ } else if (cqe->res <= 0) {
+ if (cqe->flags & IORING_CQE_F_MORE)
+ error(1, cqe->res, "more with a failed send");
error(1, cqe->res, "send failed");
} else {
- if (cqe->res > 0) {
- packets++;
- bytes += cqe->res;
- }
- /* failed requests don't flush */
- if (cfg_flush &&
- cqe->res <= 0 &&
- cqe->user_data == ZC_TAG)
- compl_cqes--;
+ if (cqe->user_data == ZC_TAG &&
+ !(cqe->flags & IORING_CQE_F_MORE))
+ error(1, cqe->res, "missing more flag");
+ packets++;
+ bytes += cqe->res;
}
io_uring_cqe_seen(&ring);
}
- if (cfg_cork)
- do_setsockopt(fd, IPPROTO_UDP, UDP_CORK, 0);
} while (gettimeofday_ms() < tstop);
- if (close(fd))
- error(1, errno, "close");
-
- fprintf(stderr, "tx=%lu (MB=%lu), tx/s=%lu (MB/s=%lu)\n",
- packets, bytes >> 20,
- packets / (cfg_runtime_ms / 1000),
- (bytes >> 20) / (cfg_runtime_ms / 1000));
-
while (compl_cqes) {
ret = io_uring_wait_cqe(&ring, &cqe);
if (ret)
error(1, ret, "wait cqe");
+ if (cqe->flags & IORING_CQE_F_MORE)
+ error(1, -EINVAL, "invalid notif flags");
+ if (!(cqe->flags & IORING_CQE_F_NOTIF))
+ error(1, -EINVAL, "missing notif flag");
+
io_uring_cqe_seen(&ring);
compl_cqes--;
}
+
+ fprintf(stderr, "tx=%lu (MB=%lu), tx/s=%lu (MB/s=%lu)\n",
+ packets, bytes >> 20,
+ packets / (cfg_runtime_ms / 1000),
+ (bytes >> 20) / (cfg_runtime_ms / 1000));
+
+ if (close(fd))
+ error(1, errno, "close");
}
static void do_test(int domain, int type, int protocol)
@@ -500,8 +480,8 @@ static void do_test(int domain, int type, int protocol)
static void usage(const char *filepath)
{
- error(1, 0, "Usage: %s [-f] [-n<N>] [-z0] [-s<payload size>] "
- "(-4|-6) [-t<time s>] -D<dst_ip> udp", filepath);
+ error(1, 0, "Usage: %s (-4|-6) (udp|tcp) -D<dst_ip> [-s<payload size>] "
+ "[-t<time s>] [-n<batch>] [-p<port>] [-m<mode>]", filepath);
}
static void parse_opts(int argc, char **argv)
@@ -519,7 +499,7 @@ static void parse_opts(int argc, char **argv)
usage(argv[0]);
cfg_payload_len = max_payload_len;
- while ((c = getopt(argc, argv, "46D:p:s:t:n:fc:m:")) != -1) {
+ while ((c = getopt(argc, argv, "46D:p:s:t:n:c:m:")) != -1) {
switch (c) {
case '4':
if (cfg_family != PF_UNSPEC)
@@ -548,9 +528,6 @@ static void parse_opts(int argc, char **argv)
case 'n':
cfg_nr_reqs = strtoul(optarg, NULL, 0);
break;
- case 'f':
- cfg_flush = 1;
- break;
case 'c':
cfg_cork = strtol(optarg, NULL, 0);
break;
@@ -583,8 +560,6 @@ static void parse_opts(int argc, char **argv)
if (cfg_payload_len > max_payload_len)
error(1, 0, "-s: payload exceeds max (%d)", max_payload_len);
- if (cfg_mode == MODE_NONZC && cfg_flush)
- error(1, 0, "-f: only zerocopy modes support notifications");
if (optind != argc - 1)
usage(argv[0]);
}
diff --git a/tools/testing/selftests/net/io_uring_zerocopy_tx.sh b/tools/testing/selftests/net/io_uring_zerocopy_tx.sh
index 6a65e4437640..32aa6e9dacc2 100755
--- a/tools/testing/selftests/net/io_uring_zerocopy_tx.sh
+++ b/tools/testing/selftests/net/io_uring_zerocopy_tx.sh
@@ -25,15 +25,11 @@ readonly path_sysctl_mem="net.core.optmem_max"
# No arguments: automated test
if [[ "$#" -eq "0" ]]; then
IPs=( "4" "6" )
- protocols=( "tcp" "udp" )
for IP in "${IPs[@]}"; do
- for proto in "${protocols[@]}"; do
- for mode in $(seq 1 3); do
- $0 "$IP" "$proto" -m "$mode" -t 1 -n 32
- $0 "$IP" "$proto" -m "$mode" -t 1 -n 32 -f
- $0 "$IP" "$proto" -m "$mode" -t 1 -n 32 -c -f
- done
+ for mode in $(seq 1 3); do
+ $0 "$IP" udp -m "$mode" -t 1 -n 32
+ $0 "$IP" tcp -m "$mode" -t 1 -n 32
done
done
diff --git a/tools/testing/selftests/netfilter/nft_conntrack_helper.sh b/tools/testing/selftests/netfilter/nft_conntrack_helper.sh
index bf6b9626c7dd..faa7778d7bd1 100755
--- a/tools/testing/selftests/netfilter/nft_conntrack_helper.sh
+++ b/tools/testing/selftests/netfilter/nft_conntrack_helper.sh
@@ -102,26 +102,42 @@ check_for_helper()
ip netns exec ${netns} conntrack -L -f $family -p tcp --dport $port 2> /dev/null |grep -q 'helper=ftp'
if [ $? -ne 0 ] ; then
- echo "FAIL: ${netns} did not show attached helper $message" 1>&2
- ret=1
+ if [ $autoassign -eq 0 ] ;then
+ echo "FAIL: ${netns} did not show attached helper $message" 1>&2
+ ret=1
+ else
+ echo "PASS: ${netns} did not show attached helper $message" 1>&2
+ fi
+ else
+ if [ $autoassign -eq 0 ] ;then
+ echo "PASS: ${netns} connection on port $port has ftp helper attached" 1>&2
+ else
+ echo "FAIL: ${netns} connection on port $port has ftp helper attached" 1>&2
+ ret=1
+ fi
fi
- echo "PASS: ${netns} connection on port $port has ftp helper attached" 1>&2
return 0
}
test_helper()
{
local port=$1
- local msg=$2
+ local autoassign=$2
+
+ if [ $autoassign -eq 0 ] ;then
+ msg="set via ruleset"
+ else
+ msg="auto-assign"
+ fi
sleep 3 | ip netns exec ${ns2} nc -w 2 -l -p $port > /dev/null &
sleep 1 | ip netns exec ${ns1} nc -w 2 10.0.1.2 $port > /dev/null &
sleep 1
- check_for_helper "$ns1" "ip $msg" $port
- check_for_helper "$ns2" "ip $msg" $port
+ check_for_helper "$ns1" "ip $msg" $port $autoassign
+ check_for_helper "$ns2" "ip $msg" $port $autoassign
wait
@@ -173,9 +189,9 @@ if [ $? -ne 0 ];then
fi
fi
-test_helper 2121 "set via ruleset"
-ip netns exec ${ns1} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
-ip netns exec ${ns2} sysctl -q 'net.netfilter.nf_conntrack_helper=1'
-test_helper 21 "auto-assign"
+test_helper 2121 0
+ip netns exec ${ns1} sysctl -qe 'net.netfilter.nf_conntrack_helper=1'
+ip netns exec ${ns2} sysctl -qe 'net.netfilter.nf_conntrack_helper=1'
+test_helper 21 1
exit $ret
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5b064dbadaf4..e30f1b4ecfa5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3409,10 +3409,8 @@ static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
int ret = -EINTR;
int idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (kvm_arch_vcpu_runnable(vcpu)) {
- kvm_make_request(KVM_REQ_UNHALT, vcpu);
+ if (kvm_arch_vcpu_runnable(vcpu))
goto out;
- }
if (kvm_cpu_has_pending_timer(vcpu))
goto out;
if (signal_pending(current))
@@ -5888,7 +5886,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
r = kvm_async_pf_init();
if (r)
- goto out_free_5;
+ goto out_free_4;
kvm_chardev_ops.owner = module;
@@ -5912,10 +5910,9 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
out_unreg:
kvm_async_pf_deinit();
-out_free_5:
+out_free_4:
for_each_possible_cpu(cpu)
free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
-out_free_4:
kmem_cache_destroy(kvm_vcpu_cache);
out_free_3:
unregister_reboot_notifier(&kvm_reboot_notifier);