summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt6
-rw-r--r--Documentation/core-api/irq/irq-domain.rst5
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml46
-rw-r--r--Documentation/hwmon/k10temp.rst17
-rw-r--r--Documentation/networking/device_drivers/ethernet/intel/ice.rst2
-rw-r--r--MAINTAINERS63
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/boot/dts/qcom/ipq8074.dtsi2
-rw-r--r--arch/arm64/include/asm/acpi.h3
-rw-r--r--arch/arm64/include/asm/assembler.h5
-rw-r--r--arch/arm64/include/asm/kvm_arm.h1
-rw-r--r--arch/arm64/include/asm/kvm_asm.h48
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h5
-rw-r--r--arch/arm64/include/asm/mte.h6
-rw-r--r--arch/arm64/include/asm/string.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h3
-rw-r--r--arch/arm64/kernel/acpi.c19
-rw-r--r--arch/arm64/kernel/cpufeature.c8
-rw-r--r--arch/arm64/kernel/mte.c10
-rw-r--r--arch/arm64/kernel/smp.c3
-rw-r--r--arch/arm64/kvm/Kconfig10
-rw-r--r--arch/arm64/kvm/arm.c94
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/fault.h75
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h235
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/fixed_config.h200
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/trap_handler.h2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S26
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c48
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c11
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c185
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c3
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c99
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sys_regs.c487
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c22
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c16
-rw-r--r--arch/arm64/kvm/mmu.c2
-rw-r--r--arch/arm64/kvm/pmu-emul.c2
-rw-r--r--arch/arm64/kvm/reset.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c41
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-irqfd.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-its.c18
-rw-r--r--arch/arm64/kvm/vgic/vgic-kvm-device.c25
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c8
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c27
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic.h5
-rw-r--r--arch/arm64/lib/strcmp.S2
-rw-r--r--arch/arm64/lib/strncmp.S2
-rw-r--r--arch/m68k/68000/entry.S4
-rw-r--r--arch/m68k/Kconfig1
-rw-r--r--arch/m68k/coldfire/entry.S4
-rw-r--r--arch/m68k/include/asm/processor.h31
-rw-r--r--arch/m68k/include/asm/segment.h59
-rw-r--r--arch/m68k/include/asm/thread_info.h3
-rw-r--r--arch/m68k/include/asm/tlbflush.h11
-rw-r--r--arch/m68k/include/asm/traps.h4
-rw-r--r--arch/m68k/include/asm/uaccess.h215
-rw-r--r--arch/m68k/kernel/asm-offsets.c2
-rw-r--r--arch/m68k/kernel/entry.S58
-rw-r--r--arch/m68k/kernel/process.c4
-rw-r--r--arch/m68k/kernel/signal.c199
-rw-r--r--arch/m68k/kernel/traps.c13
-rw-r--r--arch/m68k/mac/misc.c1
-rw-r--r--arch/m68k/mm/cache.c25
-rw-r--r--arch/m68k/mm/init.c6
-rw-r--r--arch/m68k/mm/kmap.c1
-rw-r--r--arch/m68k/mm/memory.c1
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/m68k/sun3/config.c3
-rw-r--r--arch/m68k/sun3/mmu_emu.c6
-rw-r--r--arch/m68k/sun3/sun3ints.c1
-rw-r--r--arch/m68k/sun3x/prom.c1
-rw-r--r--arch/mips/net/bpf_jit.c57
-rw-r--r--arch/nios2/Kconfig.debug3
-rw-r--r--arch/nios2/kernel/setup.c2
-rw-r--r--arch/s390/include/asm/ccwgroup.h2
-rw-r--r--arch/s390/net/bpf_jit_comp.c70
-rw-r--r--arch/sh/include/asm/pgtable-3level.h2
-rw-r--r--arch/sparc/lib/iomap.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/crypto/sm4-aesni-avx-asm_64.S5
-rw-r--r--arch/x86/events/core.c1
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/include/asm/pkeys.h2
-rw-r--r--arch/x86/include/asm/special_insns.h2
-rw-r--r--arch/x86/include/asm/xen/swiotlb-xen.h6
-rw-r--r--arch/x86/kernel/setup.c26
-rw-r--r--arch/x86/kvm/emulate.c1
-rw-r--r--arch/x86/lib/insn.c4
-rw-r--r--arch/x86/mm/fault.c26
-rw-r--r--arch/x86/net/bpf_jit_comp.c66
-rw-r--r--arch/x86/xen/enlighten_pv.c15
-rw-r--r--arch/x86/xen/pci-swiotlb-xen.c4
-rw-r--r--arch/x86/xen/smp_pv.c4
-rw-r--r--block/bfq-iosched.c16
-rw-r--r--block/bio.c2
-rw-r--r--block/bsg.c23
-rw-r--r--block/fops.c21
-rw-r--r--drivers/acpi/nfit/core.c12
-rw-r--r--drivers/acpi/osl.c23
-rw-r--r--drivers/android/binder.c58
-rw-r--r--drivers/android/binder_internal.h2
-rw-r--r--drivers/base/core.c90
-rw-r--r--drivers/base/swnode.c3
-rw-r--r--drivers/block/nbd.c29
-rw-r--r--drivers/comedi/comedi_fops.c1
-rw-r--r--drivers/crypto/ccp/ccp-ops.c14
-rw-r--r--drivers/edac/dmc520_edac.c2
-rw-r--r--drivers/edac/synopsys_edac.c2
-rw-r--r--drivers/fpga/dfl.c14
-rw-r--r--drivers/fpga/machxo2-spi.c6
-rw-r--r--drivers/gpio/gpio-aspeed-sgpio.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c11
-rw-r--r--drivers/gpio/gpio-rockchip.c26
-rw-r--r--drivers/gpio/gpio-uniphier.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.h5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c24
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/si_dpm.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h10
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/i915_request.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c2
-rw-r--r--drivers/gpu/drm/tegra/dc.c3
-rw-r--r--drivers/gpu/drm/tegra/dc.h6
-rw-r--r--drivers/gpu/drm/tegra/uapi.c2
-rw-r--r--drivers/gpu/host1x/fence.c6
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c8
-rw-r--r--drivers/hid/hid-apple.c7
-rw-r--r--drivers/hid/hid-betopff.c13
-rw-r--r--drivers/hid/hid-u2fzero.c4
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hwmon/k10temp.c6
-rw-r--r--drivers/hwmon/ltc2947-core.c8
-rw-r--r--drivers/hwmon/mlxreg-fan.c12
-rw-r--r--drivers/hwmon/occ/common.c17
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c10
-rw-r--r--drivers/hwmon/pmbus/mp2975.c2
-rw-r--r--drivers/hwmon/tmp421.c71
-rw-r--r--drivers/hwmon/w83791d.c29
-rw-r--r--drivers/hwmon/w83792d.c28
-rw-r--r--drivers/hwmon/w83793.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c1
-rw-r--r--drivers/infiniband/core/cma.c51
-rw-r--r--drivers/infiniband/core/cma_priv.h1
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c31
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c13
-rw-r--r--drivers/infiniband/hw/irdma/cm.c4
-rw-r--r--drivers/infiniband/hw/irdma/hw.c14
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.h1
-rw-r--r--drivers/infiniband/hw/irdma/user.h2
-rw-r--r--drivers/infiniband/hw/irdma/utils.c2
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c16
-rw-r--r--drivers/interconnect/qcom/sdm660.c25
-rw-r--r--drivers/iommu/apple-dart.c56
-rw-r--r--drivers/iommu/intel/dmar.c6
-rw-r--r--drivers/ipack/devices/ipoctal.c63
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic.c52
-rw-r--r--drivers/irqchip/irq-mbigen.c6
-rw-r--r--drivers/irqchip/irq-renesas-rza1.c12
-rw-r--r--drivers/mcb/mcb-core.c12
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c18
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h28
-rw-r--r--drivers/media/rc/ir_toy.c21
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c6
-rw-r--r--drivers/misc/genwqe/card_base.c2
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c71
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c9
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c11
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c115
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h2
-rw-r--r--drivers/mmc/host/dw_mmc.c15
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c2
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c21
-rw-r--r--drivers/net/dsa/b53/b53_mmap.c13
-rw-r--r--drivers/net/dsa/b53/b53_priv.h5
-rw-r--r--drivers/net/dsa/b53/b53_spi.c13
-rw-r--r--drivers/net/dsa/b53/b53_srab.c21
-rw-r--r--drivers/net/dsa/bcm_sf2.c14
-rw-r--r--drivers/net/dsa/dsa_loop.c22
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c16
-rw-r--r--drivers/net/dsa/lan9303-core.c6
-rw-r--r--drivers/net/dsa/lan9303.h1
-rw-r--r--drivers/net/dsa/lan9303_i2c.c24
-rw-r--r--drivers/net/dsa/lan9303_mdio.c15
-rw-r--r--drivers/net/dsa/lantiq_gswip.c18
-rw-r--r--drivers/net/dsa/microchip/ksz8795_spi.c11
-rw-r--r--drivers/net/dsa/microchip/ksz8863_smi.c13
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c14
-rw-r--r--drivers/net/dsa/microchip/ksz9477_spi.c8
-rw-r--r--drivers/net/dsa/mt7530.c18
-rw-r--r--drivers/net/dsa/mv88e6060.c18
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c55
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c73
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c2
-rw-r--r--drivers/net/dsa/ocelot/felix.c2
-rw-r--r--drivers/net/dsa/ocelot/felix.h2
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c22
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c20
-rw-r--r--drivers/net/dsa/qca/ar9331.c18
-rw-r--r--drivers/net/dsa/qca8k.c18
-rw-r--r--drivers/net/dsa/realtek-smi-core.c22
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c21
-rw-r--r--drivers/net/dsa/sja1105/sja1105_mdio.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_static_config.h2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.h2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c6
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-platform.c22
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-spi.c22
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h1
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c6
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.h1
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c18
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c18
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c7
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ierb.h2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c107
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c35
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c52
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c8
-rw-r--r--drivers/net/ethernet/intel/Kconfig1
-rw-r--r--drivers/net/ethernet/intel/e100.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/micrel/Makefile6
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c21
-rw-r--r--drivers/net/ethernet/mscc/ocelot_devlink.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/ethernet/sun/Kconfig1
-rw-r--r--drivers/net/hamradio/Kconfig1
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c6
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c15
-rw-r--r--drivers/net/mhi_net.c6
-rw-r--r--drivers/net/pcs/pcs-xpcs-nxp.c2
-rw-r--r--drivers/net/phy/bcm7xxx.c114
-rw-r--r--drivers/net/phy/mdio_bus.c5
-rw-r--r--drivers/net/phy/mdio_device.c11
-rw-r--r--drivers/net/phy/mxl-gpy.c23
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/nfc/st-nci/spi.c1
-rw-r--r--drivers/nvdimm/pmem.c5
-rw-r--r--drivers/nvme/host/core.c37
-rw-r--r--drivers/nvme/host/fc.c18
-rw-r--r--drivers/nvme/host/nvme.h6
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/tcp.c13
-rw-r--r--drivers/nvmem/Kconfig1
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c19
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c67
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.h10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c37
-rw-r--r--drivers/platform/x86/amd-pmc.c2
-rw-r--r--drivers/platform/x86/dell/Kconfig3
-rw-r--r--drivers/platform/x86/gigabyte-wmi.c1
-rw-r--r--drivers/platform/x86/intel/hid.c27
-rw-r--r--drivers/platform/x86/intel/punit_ipc.c3
-rw-r--r--drivers/platform/x86/lg-laptop.c2
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c54
-rw-r--r--drivers/ptp/Kconfig1
-rw-r--r--drivers/regulator/max14577-regulator.c2
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/s390/cio/blacklist.c8
-rw-r--r--drivers/s390/cio/ccwgroup.c10
-rw-r--r--drivers/s390/cio/css.c40
-rw-r--r--drivers/s390/cio/css.h10
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c4
-rw-r--r--drivers/s390/net/qeth_core.h1
-rw-r--r--drivers/s390/net/qeth_core_main.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c1
-rw-r--r--drivers/s390/net/qeth_l3_main.c1
-rw-r--r--drivers/scsi/arm/Kconfig11
-rw-r--r--drivers/scsi/arm/acornscsi.c103
-rw-r--r--drivers/scsi/arm/fas216.c31
-rw-r--r--drivers/scsi/arm/queue.c2
-rw-r--r--drivers/scsi/csiostor/csio_init.c1
-rw-r--r--drivers/scsi/elx/efct/efct_lio.c4
-rw-r--r--drivers/scsi/elx/libefc/efc_device.c7
-rw-r--r--drivers/scsi/elx/libefc/efc_fabric.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c3
-rw-r--r--drivers/scsi/ncr53c8xx.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c8
-rw-r--r--drivers/scsi/sd.c14
-rw-r--r--drivers/scsi/sd_zbc.c8
-rw-r--r--drivers/scsi/ses.c24
-rw-r--r--drivers/scsi/sr_ioctl.c2
-rw-r--r--drivers/scsi/st.c1
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c78
-rw-r--r--drivers/scsi/ufs/ufshcd.c119
-rw-r--r--drivers/scsi/ufs/ufshcd.h5
-rw-r--r--drivers/scsi/ufs/ufshpb.c8
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/spi/spi-rockchip.c6
-rw-r--r--drivers/spi/spi-tegra20-slink.c9
-rw-r--r--drivers/spi/spi.c8
-rw-r--r--drivers/staging/greybus/uart.c62
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c2
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c8
-rw-r--r--drivers/target/target_core_configfs.c32
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c5
-rw-r--r--drivers/thermal/qcom/tsens.c4
-rw-r--r--drivers/thermal/thermal_core.c7
-rw-r--r--drivers/tty/serial/8250/8250_omap.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c2
-rw-r--r--drivers/tty/synclink_gt.c44
-rw-r--r--drivers/tty/tty_ldisc.c1
-rw-r--r--drivers/usb/cdns3/cdns3-gadget.c14
-rw-r--r--drivers/usb/class/cdc-acm.c7
-rw-r--r--drivers/usb/class/cdc-acm.h2
-rw-r--r--drivers/usb/core/hcd.c68
-rw-r--r--drivers/usb/dwc2/gadget.c193
-rw-r--r--drivers/usb/dwc2/hcd.c4
-rw-r--r--drivers/usb/dwc3/core.c30
-rw-r--r--drivers/usb/gadget/function/f_uac2.c19
-rw-r--r--drivers/usb/gadget/function/u_audio.c13
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c2
-rw-r--r--drivers/usb/host/bcma-hcd.c5
-rw-r--r--drivers/usb/host/ehci-hcd.c75
-rw-r--r--drivers/usb/host/xhci.c1
-rw-r--r--drivers/usb/musb/tusb6010.c1
-rw-r--r--drivers/usb/serial/cp210x.c38
-rw-r--r--drivers/usb/serial/mos7840.c2
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/unusual_uas.h2
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c5
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c10
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c2
-rw-r--r--drivers/vhost/vdpa.c2
-rw-r--r--drivers/virtio/virtio.c7
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/xen/Kconfig1
-rw-r--r--drivers/xen/balloon.c4
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/swiotlb-xen.c7
-rw-r--r--fs/afs/callback.c44
-rw-r--r--fs/afs/cell.c2
-rw-r--r--fs/afs/dir.c57
-rw-r--r--fs/afs/dir_edit.c4
-rw-r--r--fs/afs/file.c86
-rw-r--r--fs/afs/fs_probe.c8
-rw-r--r--fs/afs/fsclient.c31
-rw-r--r--fs/afs/inode.c98
-rw-r--r--fs/afs/internal.h21
-rw-r--r--fs/afs/protocol_afs.h15
-rw-r--r--fs/afs/protocol_yfs.h6
-rw-r--r--fs/afs/rotate.c1
-rw-r--r--fs/afs/server.c2
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/afs/write.c29
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/btrfs/file-item.c13
-rw-r--r--fs/btrfs/space-info.c5
-rw-r--r--fs/btrfs/verity.c6
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/buffer.c8
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/cifs/cache.c2
-rw-r--r--fs/cifs/cifs_debug.c1
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_ioctl.h1
-rw-r--r--fs/cifs/cifs_spnego.c2
-rw-r--r--fs/cifs/cifs_spnego.h2
-rw-r--r--fs/cifs/cifs_unicode.c1
-rw-r--r--fs/cifs/cifsacl.c1
-rw-r--r--fs/cifs/cifsacl.h1
-rw-r--r--fs/cifs/cifsencrypt.c1
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/cifsfs.h1
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifspdu.h1
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/cifssmb.c1
-rw-r--r--fs/cifs/connect.c18
-rw-r--r--fs/cifs/dir.c1
-rw-r--r--fs/cifs/dns_resolve.c1
-rw-r--r--fs/cifs/dns_resolve.h4
-rw-r--r--fs/cifs/export.c1
-rw-r--r--fs/cifs/file.c7
-rw-r--r--fs/cifs/fscache.c2
-rw-r--r--fs/cifs/fscache.h2
-rw-r--r--fs/cifs/inode.c7
-rw-r--r--fs/cifs/ioctl.c3
-rw-r--r--fs/cifs/link.c1
-rw-r--r--fs/cifs/misc.c59
-rw-r--r--fs/cifs/netmisc.c1
-rw-r--r--fs/cifs/ntlmssp.h1
-rw-r--r--fs/cifs/readdir.c1
-rw-r--r--fs/cifs/rfc1002pdu.h1
-rw-r--r--fs/cifs/sess.c1
-rw-r--r--fs/cifs/smb2file.c1
-rw-r--r--fs/cifs/smb2glob.h1
-rw-r--r--fs/cifs/smb2inode.c1
-rw-r--r--fs/cifs/smb2misc.c1
-rw-r--r--fs/cifs/smb2pdu.c5
-rw-r--r--fs/cifs/smb2pdu.h1
-rw-r--r--fs/cifs/smb2proto.h1
-rw-r--r--fs/cifs/smb2status.h1
-rw-r--r--fs/cifs/smb2transport.c1
-rw-r--r--fs/cifs/smberr.h1
-rw-r--r--fs/cifs/transport.c1
-rw-r--r--fs/cifs/winucase.c1
-rw-r--r--fs/cifs/xattr.c1
-rw-r--r--fs/debugfs/inode.c2
-rw-r--r--fs/erofs/inode.c2
-rw-r--r--fs/erofs/zmap.c3
-rw-r--r--fs/ext2/balloc.c14
-rw-r--r--fs/ext4/dir.c6
-rw-r--r--fs/ext4/ext4.h3
-rw-r--r--fs/ext4/extents.c19
-rw-r--r--fs/ext4/fast_commit.c6
-rw-r--r--fs/ext4/inline.c150
-rw-r--r--fs/ext4/inode.c176
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/inode.c6
-rw-r--r--fs/io-wq.c4
-rw-r--r--fs/io_uring.c102
-rw-r--r--fs/kernfs/dir.c9
-rw-r--r--fs/ksmbd/auth.c205
-rw-r--r--fs/ksmbd/crypto_ctx.c16
-rw-r--r--fs/ksmbd/crypto_ctx.h8
-rw-r--r--fs/ksmbd/misc.c21
-rw-r--r--fs/ksmbd/misc.h4
-rw-r--r--fs/ksmbd/oplock.c41
-rw-r--r--fs/ksmbd/server.c3
-rw-r--r--fs/ksmbd/smb2pdu.c371
-rw-r--r--fs/ksmbd/smb2pdu.h9
-rw-r--r--fs/ksmbd/smb_common.c60
-rw-r--r--fs/ksmbd/smb_common.h9
-rw-r--r--fs/ksmbd/smbacl.c21
-rw-r--r--fs/ksmbd/transport_rdma.c1
-rw-r--r--fs/ksmbd/transport_tcp.c4
-rw-r--r--fs/ksmbd/vfs.c172
-rw-r--r--fs/ksmbd/vfs.h9
-rw-r--r--fs/lockd/svcxdr.h13
-rw-r--r--fs/nfsd/nfs4state.c16
-rw-r--r--fs/ocfs2/dlmglue.c3
-rw-r--r--fs/qnx4/dir.c36
-rw-r--r--fs/smbfs_common/smbfsctl.h2
-rw-r--r--fs/vboxsf/super.c12
-rw-r--r--fs/verity/enable.c2
-rw-r--r--fs/verity/open.c2
-rw-r--r--include/acpi/acpi_io.h8
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/buffer_head.h4
-rw-r--r--include/linux/cpumask.h7
-rw-r--r--include/linux/dsa/ocelot.h2
-rw-r--r--include/linux/fwnode.h11
-rw-r--r--include/linux/irqdomain.h2
-rw-r--r--include/linux/mdio.h3
-rw-r--r--include/linux/migrate.h6
-rw-r--r--include/linux/nvmem-consumer.h14
-rw-r--r--include/linux/packing.h2
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/pkeys.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/net/dsa.h9
-rw-r--r--include/net/ip_fib.h2
-rw-r--r--include/net/mac80211.h8
-rw-r--r--include/net/nexthop.h2
-rw-r--r--include/net/pkt_sched.h1
-rw-r--r--include/net/sock.h34
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/sound/rawmidi.h1
-rw-r--r--include/trace/events/afs.h8
-rw-r--r--include/trace/events/cachefiles.h2
-rw-r--r--include/trace/events/erofs.h6
-rw-r--r--include/uapi/linux/android/binder.h7
-rw-r--r--include/uapi/linux/cifs/cifs_mount.h1
-rw-r--r--include/uapi/sound/asound.h1
-rw-r--r--include/xen/xen-ops.h12
-rw-r--r--init/do_mounts.c30
-rw-r--r--init/main.c6
-rw-r--r--kernel/bpf/bpf_struct_ops.c7
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/cgroup/cgroup.c17
-rw-r--r--kernel/events/core.c34
-rw-r--r--kernel/irq/irqdomain.c2
-rw-r--r--kernel/sched/debug.c8
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/time/posix-cpu-timers.c3
-rw-r--r--kernel/trace/blktrace.c8
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/packing.c2
-rw-r--r--lib/zlib_inflate/inffast.c13
-rw-r--r--mm/damon/dbgfs-test.h16
-rw-r--r--mm/debug.c4
-rw-r--r--mm/memcontrol.c10
-rw-r--r--mm/memory-failure.c12
-rw-r--r--mm/memory.c1
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/swap.c19
-rw-r--r--mm/util.c4
-rw-r--r--mm/workingset.c1
-rw-r--r--net/bpf/test_run.c14
-rw-r--r--net/bridge/br_multicast.c6
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/dev_addr_lists.c6
-rw-r--r--net/core/sock.c83
-rw-r--r--net/dsa/dsa2.c114
-rw-r--r--net/dsa/tag_ocelot.c2
-rw-r--r--net/dsa/tag_ocelot_8021q.c2
-rw-r--r--net/ipv4/fib_semantics.c16
-rw-r--r--net/ipv4/netfilter/iptable_raw.c2
-rw-r--r--net/ipv4/nexthop.c21
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/mac80211/mesh_pathtbl.c5
-rw-r--r--net/mac80211/mesh_ps.c3
-rw-r--r--net/mac80211/rate.c4
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/tx.c12
-rw-r--r--net/mac80211/wpa.c6
-rw-r--r--net/mptcp/mptcp_diag.c2
-rw-r--r--net/mptcp/pm_netlink.c4
-rw-r--r--net/mptcp/protocol.c6
-rw-r--r--net/mptcp/protocol.h2
-rw-r--r--net/mptcp/subflow.c2
-rw-r--r--net/mptcp/syncookies.c13
-rw-r--r--net/mptcp/token.c11
-rw-r--r--net/mptcp/token_test.c14
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h4
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c154
-rw-r--r--net/netfilter/nf_nat_core.c17
-rw-r--r--net/netfilter/nf_nat_masquerade.c168
-rw-r--r--net/netfilter/nf_tables_api.c30
-rw-r--r--net/netfilter/nft_compat.c17
-rw-r--r--net/netfilter/xt_LOG.c10
-rw-r--r--net/netfilter/xt_NFLOG.c10
-rw-r--r--net/sched/cls_flower.c6
-rw-r--r--net/sched/sch_api.c6
-rw-r--r--net/sctp/input.c2
-rw-r--r--net/smc/smc_clc.c3
-rw-r--r--net/smc/smc_core.c2
-rw-r--r--net/unix/af_unix.c83
-rw-r--r--scripts/Makefile.kasan3
-rw-r--r--scripts/sorttable.c4
-rw-r--r--security/selinux/hooks.c4
-rw-r--r--security/smack/smack_lsm.c4
-rw-r--r--sound/core/rawmidi.c9
-rw-r--r--sound/drivers/pcsp/pcsp_lib.c2
-rw-r--r--sound/firewire/motu/amdtp-motu.c7
-rw-r--r--sound/firewire/oxfw/oxfw.c13
-rw-r--r--sound/pci/hda/hda_intel.c12
-rw-r--r--sound/pci/hda/patch_cs8409.c3
-rw-r--r--sound/pci/hda/patch_realtek.c129
-rw-r--r--sound/pci/pcxhr/pcxhr_core.c2
-rw-r--r--sound/soc/fsl/fsl_esai.c16
-rw-r--r--sound/soc/fsl/fsl_micfil.c15
-rw-r--r--sound/soc/fsl/fsl_sai.c14
-rw-r--r--sound/soc/fsl/fsl_spdif.c14
-rw-r--r--sound/soc/fsl/fsl_xcvr.c15
-rw-r--r--sound/soc/intel/boards/sof_sdw.c5
-rw-r--r--sound/soc/mediatek/Kconfig3
-rw-r--r--sound/soc/mediatek/common/mtk-afe-fe-dai.c19
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c7
-rw-r--r--sound/soc/sof/core.c4
-rw-r--r--sound/soc/sof/imx/imx8.c9
-rw-r--r--sound/soc/sof/imx/imx8m.c9
-rw-r--r--sound/soc/sof/loader.c8
-rw-r--r--sound/soc/sof/trace.c1
-rw-r--r--sound/soc/sof/xtensa/core.c4
-rw-r--r--sound/usb/card.c18
-rw-r--r--sound/usb/mixer.c26
-rw-r--r--sound/usb/mixer.h3
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--tools/arch/arm64/include/asm/sysreg.h1296
-rw-r--r--tools/arch/x86/lib/insn.c4
-rw-r--r--tools/lib/bpf/linker.c8
-rw-r--r--tools/objtool/special.c38
-rw-r--r--tools/perf/Documentation/jitdump-specification.txt2
-rw-r--r--tools/perf/Documentation/perf-c2c.txt2
-rw-r--r--tools/perf/Documentation/perf-intel-pt.txt2
-rw-r--r--tools/perf/Documentation/perf-lock.txt2
-rw-r--r--tools/perf/Documentation/perf-script-perl.txt2
-rw-r--r--tools/perf/Documentation/perf-script-python.txt2
-rw-r--r--tools/perf/Documentation/perf-stat.txt2
-rw-r--r--tools/perf/Documentation/topdown.txt2
-rw-r--r--tools/perf/arch/arm/util/auxtrace.c8
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c24
-rw-r--r--tools/perf/arch/arm/util/perf_regs.c2
-rw-r--r--tools/perf/arch/arm/util/pmu.c2
-rw-r--r--tools/perf/arch/arm/util/unwind-libdw.c6
-rw-r--r--tools/perf/arch/arm/util/unwind-libunwind.c4
-rw-r--r--tools/perf/arch/x86/util/iostat.c2
-rw-r--r--tools/perf/builtin-stat.c2
-rw-r--r--tools/perf/pmu-events/arch/powerpc/power8/other.json2
-rw-r--r--tools/perf/tests/code-reading.c4
-rw-r--r--tools/perf/tests/dwarf-unwind.c39
-rw-r--r--tools/perf/util/config.c2
-rw-r--r--tools/testing/selftests/arm64/signal/test_signals_utils.c7
-rw-r--r--tools/testing/selftests/bpf/Makefile3
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_ip_encap.sh13
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh2
-rw-r--r--tools/testing/selftests/kvm/.gitignore1
-rw-r--r--tools/testing/selftests/kvm/Makefile5
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c479
-rw-r--r--tools/testing/selftests/kvm/aarch64/debug-exceptions.c30
-rw-r--r--tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c2
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_init.c369
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/arch_timer.h142
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/delay.h25
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/gic.h21
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h90
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/spinlock.h13
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/vgic.h20
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h2
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h3
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic.c95
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic_private.h21
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic_v3.c240
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic_v3.h70
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c24
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/spinlock.c27
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/vgic.c70
-rw-r--r--tools/testing/selftests/kvm/lib/test_util.c22
-rw-r--r--tools/testing/selftests/kvm/memslot_perf_test.c56
-rw-r--r--tools/testing/selftests/kvm/steal_time.c16
-rw-r--r--tools/testing/selftests/kvm/x86_64/mmio_warning_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c15
-rw-r--r--tools/testing/selftests/lib.mk1
-rw-r--r--tools/testing/selftests/net/af_unix/Makefile5
-rw-r--r--tools/testing/selftests/net/af_unix/test_unix_oob.c5
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat_zones.sh309
-rwxr-xr-xtools/testing/selftests/netfilter/nft_zones_many.sh156
-rw-r--r--tools/usb/testusb.c14
-rw-r--r--tools/vm/page-types.c2
731 files changed, 10961 insertions, 4234 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index f780844c8390..bfeaec9b0de2 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2372,6 +2372,8 @@
kvm-arm.mode=
[KVM,ARM] Select one of KVM/arm64's modes of operation.
+ none: Forcefully disable KVM.
+
nvhe: Standard nVHE-based mode, without support for
protected guests.
@@ -2379,7 +2381,9 @@
state is kept private from the host.
Not valid if the kernel is running in EL2.
- Defaults to VHE/nVHE based on hardware support.
+ Defaults to VHE/nVHE based on hardware support. Setting
+ mode to "protected" will disable kexec and hibernation
+ for the host.
kvm-arm.vgic_v3_group0_trap=
[KVM,ARM] Trap guest accesses to GICv3 group-0
diff --git a/Documentation/core-api/irq/irq-domain.rst b/Documentation/core-api/irq/irq-domain.rst
index 6979b4af2c1f..9c0e8758037a 100644
--- a/Documentation/core-api/irq/irq-domain.rst
+++ b/Documentation/core-api/irq/irq-domain.rst
@@ -175,9 +175,10 @@ for IRQ numbers that are passed to struct device registrations. In that
case the Linux IRQ numbers cannot be dynamically assigned and the legacy
mapping should be used.
-As the name implies, the *_legacy() functions are deprecated and only
+As the name implies, the \*_legacy() functions are deprecated and only
exist to ease the support of ancient platforms. No new users should be
-added.
+added. Same goes for the \*_simple() functions when their use results
+in the legacy behaviour.
The legacy map assumes a contiguous range of IRQ numbers has already
been allocated for the controller and that the IRQ number can be
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
index 29de7807df54..bcd41e491f1d 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
@@ -31,11 +31,11 @@ properties:
clocks:
minItems: 1
- maxItems: 3
+ maxItems: 7
clock-names:
minItems: 1
- maxItems: 3
+ maxItems: 7
required:
- compatible
@@ -72,6 +72,32 @@ allOf:
contains:
enum:
- qcom,sdm660-a2noc
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Bus Clock.
+ - description: Bus A Clock.
+ - description: IPA Clock.
+ - description: UFS AXI Clock.
+ - description: Aggregate2 UFS AXI Clock.
+ - description: Aggregate2 USB3 AXI Clock.
+ - description: Config NoC USB2 AXI Clock.
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+ - const: ipa
+ - const: ufs_axi
+ - const: aggre2_ufs_axi
+ - const: aggre2_usb3_axi
+ - const: cfg_noc_usb2_axi
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,sdm660-bimc
- qcom,sdm660-cnoc
- qcom,sdm660-gnoc
@@ -91,6 +117,7 @@ examples:
- |
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/clock/qcom,mmcc-sdm660.h>
+ #include <dt-bindings/clock/qcom,gcc-sdm660.h>
bimc: interconnect@1008000 {
compatible = "qcom,sdm660-bimc";
@@ -123,9 +150,20 @@ examples:
compatible = "qcom,sdm660-a2noc";
reg = <0x01704000 0xc100>;
#interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
+ clock-names = "bus",
+ "bus_a",
+ "ipa",
+ "ufs_axi",
+ "aggre2_ufs_axi",
+ "aggre2_usb3_axi",
+ "cfg_noc_usb2_axi";
clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
- <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>;
+ <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
+ <&rpmcc RPM_SMD_IPA_CLK>,
+ <&gcc GCC_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&gcc GCC_CFG_NOC_USB2_AXI_CLK>;
};
mnoc: interconnect@1745000 {
diff --git a/Documentation/hwmon/k10temp.rst b/Documentation/hwmon/k10temp.rst
index 8557e26281c3..91b99adc6c48 100644
--- a/Documentation/hwmon/k10temp.rst
+++ b/Documentation/hwmon/k10temp.rst
@@ -132,20 +132,3 @@ On Family 17h and Family 18h CPUs, additional temperature sensors may report
Core Complex Die (CCD) temperatures. Up to 8 such temperatures are reported
as temp{3..10}_input, labeled Tccd{1..8}. Actual support depends on the CPU
variant.
-
-Various Family 17h and 18h CPUs report voltage and current telemetry
-information. The following attributes may be reported.
-
-Attribute Label Description
-=============== ======= ================
-in0_input Vcore Core voltage
-in1_input Vsoc SoC voltage
-curr1_input Icore Core current
-curr2_input Isoc SoC current
-=============== ======= ================
-
-Current values are raw (unscaled) as reported by the CPU. Core current is
-reported as multiples of 1A / LSB. SoC is reported as multiples of 0.25A
-/ LSB. The real current is board specific. Reported currents should be seen
-as rough guidance, and should be scaled using sensors3.conf as appropriate
-for a given board.
diff --git a/Documentation/networking/device_drivers/ethernet/intel/ice.rst b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
index e7d9cbff771b..67b7a701ce9e 100644
--- a/Documentation/networking/device_drivers/ethernet/intel/ice.rst
+++ b/Documentation/networking/device_drivers/ethernet/intel/ice.rst
@@ -851,7 +851,7 @@ NOTES:
- 0x88A8 traffic will not be received unless VLAN stripping is disabled with
the following command::
- # ethool -K <ethX> rxvlan off
+ # ethtool -K <ethX> rxvlan off
- 0x88A8/0x8100 double VLANs cannot be used with 0x8100 or 0x8100/0x8100 VLANS
configured on the same port. 0x88a8/0x8100 traffic will not be received if
diff --git a/MAINTAINERS b/MAINTAINERS
index faaf9780af6d..df865a9e9f6a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -414,7 +414,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
F: drivers/acpi/pmic/
ACPI THERMAL DRIVER
-M: Zhang Rui <rui.zhang@intel.com>
+M: Rafael J. Wysocki <rafael@kernel.org>
+R: Zhang Rui <rui.zhang@intel.com>
L: linux-acpi@vger.kernel.org
S: Supported
W: https://01.org/linux-acpi
@@ -810,7 +811,7 @@ F: Documentation/devicetree/bindings/dma/altr,msgdma.yaml
F: drivers/dma/altera-msgdma.c
ALTERA PIO DRIVER
-M: Joyce Ooi <joyce.ooi@intel.com>
+M: Mun Yew Tham <mun.yew.tham@intel.com>
L: linux-gpio@vger.kernel.org
S: Maintained
F: drivers/gpio/gpio-altera.c
@@ -977,12 +978,12 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/amd-pmc.*
-AMD POWERPLAY
+AMD POWERPLAY AND SWSMU
M: Evan Quan <evan.quan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
-F: drivers/gpu/drm/amd/pm/powerplay/
+F: drivers/gpu/drm/amd/pm/
AMD PTDMA DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com>
@@ -2804,9 +2805,8 @@ F: arch/arm/mach-pxa/include/mach/vpac270.h
F: arch/arm/mach-pxa/vpac270.c
ARM/VT8500 ARM ARCHITECTURE
-M: Tony Prisk <linux@prisktech.co.nz>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
+S: Orphan
F: Documentation/devicetree/bindings/i2c/i2c-wmt.txt
F: arch/arm/mach-vt8500/
F: drivers/clocksource/timer-vt8500.c
@@ -2962,7 +2962,7 @@ F: crypto/async_tx/
F: include/linux/async_tx.h
AT24 EEPROM DRIVER
-M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-i2c@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux.git
@@ -3385,9 +3385,11 @@ F: Documentation/networking/filter.rst
F: Documentation/userspace-api/ebpf/
F: arch/*/net/*
F: include/linux/bpf*
+F: include/linux/btf*
F: include/linux/filter.h
F: include/trace/events/xdp.h
F: include/uapi/linux/bpf*
+F: include/uapi/linux/btf*
F: include/uapi/linux/filter.h
F: kernel/bpf/
F: kernel/trace/bpf_trace.c
@@ -3821,7 +3823,6 @@ F: drivers/scsi/mpi3mr/
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <selvin.xavier@broadcom.com>
-M: Naresh Kumar PBS <nareshkumar.pbs@broadcom.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: http://www.broadcom.com
@@ -4656,7 +4657,7 @@ W: http://linux-cifs.samba.org/
T: git git://git.samba.org/sfrench/cifs-2.6.git
F: Documentation/admin-guide/cifs/
F: fs/cifs/
-F: fs/cifs_common/
+F: fs/smbfs_common/
COMPACTPCI HOTPLUG CORE
M: Scott Murray <scott@spiteful.org>
@@ -7986,7 +7987,7 @@ F: include/linux/gpio/regmap.h
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
-M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-gpio@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
@@ -10194,8 +10195,8 @@ M: Hyunchul Lee <hyc.lee@gmail.com>
L: linux-cifs@vger.kernel.org
S: Maintained
T: git git://git.samba.org/ksmbd.git
-F: fs/cifs_common/
F: fs/ksmbd/
+F: fs/smbfs_common/
KERNEL UNIT TESTING FRAMEWORK (KUnit)
M: Brendan Higgins <brendanhiggins@google.com>
@@ -11379,7 +11380,7 @@ F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
F: drivers/iio/proximity/mb1232.c
MAXIM MAX77650 PMIC MFD DRIVER
-M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+M: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-kernel@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/*/*max77650.yaml
@@ -13267,9 +13268,9 @@ F: Documentation/scsi/NinjaSCSI.rst
F: drivers/scsi/nsp32*
NIOS2 ARCHITECTURE
-M: Ley Foon Tan <ley.foon.tan@intel.com>
+M: Dinh Nguyen <dinguyen@kernel.org>
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
F: arch/nios2/
NITRO ENCLAVES (NE)
@@ -16663,13 +16664,6 @@ M: Lubomir Rintel <lkundrak@v3.sk>
S: Supported
F: drivers/char/pcmcia/scr24x_cs.c
-SCSI CDROM DRIVER
-M: Jens Axboe <axboe@kernel.dk>
-L: linux-scsi@vger.kernel.org
-S: Maintained
-W: http://www.kernel.dk
-F: drivers/scsi/sr*
-
SCSI RDMA PROTOCOL (SRP) INITIATOR
M: Bart Van Assche <bvanassche@acm.org>
L: linux-rdma@vger.kernel.org
@@ -16968,7 +16962,6 @@ F: drivers/misc/sgi-xp/
SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
M: Karsten Graul <kgraul@linux.ibm.com>
-M: Guvenc Gulce <guvenc@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -17904,7 +17897,8 @@ M: Olivier Moysan <olivier.moysan@foss.st.com>
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Maintained
-F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
+F: Documentation/devicetree/bindings/iio/adc/st,stm32-dfsdm-adc.yaml
+F: Documentation/devicetree/bindings/sound/st,stm32-*.yaml
F: sound/soc/stm/
STM32 TIMER/LPTIMER DRIVERS
@@ -17981,10 +17975,11 @@ F: Documentation/admin-guide/svga.rst
F: arch/x86/boot/video*
SWIOTLB SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Christoph Hellwig <hch@infradead.org>
L: iommu@lists.linux-foundation.org
S: Supported
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
+W: http://git.infradead.org/users/hch/dma-mapping.git
+T: git git://git.infradead.org/users/hch/dma-mapping.git
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
F: kernel/dma/swiotlb.c
@@ -18567,13 +18562,14 @@ T: git git://linuxtv.org/media_tree.git
F: drivers/media/radio/radio-raremono.c
THERMAL
-M: Zhang Rui <rui.zhang@intel.com>
+M: Rafael J. Wysocki <rafael@kernel.org>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
R: Amit Kucheria <amitk@kernel.org>
+R: Zhang Rui <rui.zhang@intel.com>
L: linux-pm@vger.kernel.org
S: Supported
Q: https://patchwork.kernel.org/project/linux-pm/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/thermal/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git thermal
F: Documentation/devicetree/bindings/thermal/
F: drivers/thermal/
F: include/linux/cpu_cooling.h
@@ -18702,7 +18698,7 @@ F: include/linux/clk/ti.h
TI DAVINCI MACHINE SUPPORT
M: Sekhar Nori <nsekhar@ti.com>
-R: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+R: Bartosz Golaszewski <brgl@bgdev.pl>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
@@ -19301,13 +19297,12 @@ S: Maintained
F: drivers/usb/misc/chaoskey.c
USB CYPRESS C67X00 DRIVER
-M: Peter Korsgaard <jacmet@sunsite.dk>
L: linux-usb@vger.kernel.org
-S: Maintained
+S: Orphan
F: drivers/usb/c67x00/
USB DAVICOM DM9601 DRIVER
-M: Peter Korsgaard <jacmet@sunsite.dk>
+M: Peter Korsgaard <peter@korsgaard.com>
L: netdev@vger.kernel.org
S: Maintained
W: http://www.linux-usb.org/usbnet
@@ -20487,7 +20482,6 @@ F: samples/bpf/xdpsock*
F: tools/lib/bpf/xsk*
XEN BLOCK SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
M: Roger Pau Monné <roger.pau@citrix.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
@@ -20535,7 +20529,7 @@ S: Supported
F: drivers/net/xen-netback/*
XEN PCI SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Juergen Gross <jgross@suse.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: arch/x86/pci/*xen*
@@ -20558,7 +20552,8 @@ S: Supported
F: sound/xen/*
XEN SWIOTLB SUBSYSTEM
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+M: Juergen Gross <jgross@suse.com>
+M: Stefano Stabellini <sstabellini@kernel.org>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: iommu@lists.linux-foundation.org
S: Supported
diff --git a/Makefile b/Makefile
index 5e7c1d854441..7b74223d1309 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc4
NAME = Opossums on Parade
# *DOCUMENTATION*
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 5c7ae4c3954b..0d921d21fd35 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -186,6 +186,7 @@ config ARM64
select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
+ select HAVE_KVM
select HAVE_NMI
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
diff --git a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
index a620ac0d0b19..db333001df4d 100644
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
@@ -487,7 +487,6 @@
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
phys = <&qusb_phy_0>, <&usb0_ssphy>;
phy-names = "usb2-phy", "usb3-phy";
- tx-fifo-resize;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk;
@@ -528,7 +527,6 @@
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
phys = <&qusb_phy_1>, <&usb1_ssphy>;
phy-names = "usb2-phy", "usb3-phy";
- tx-fifo-resize;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
snps,dis_u2_susphy_quirk;
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 7535dc7cc5aa..bd68e1b7f29f 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -50,9 +50,6 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr);
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
#define acpi_os_ioremap acpi_os_ioremap
-void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size);
-#define acpi_os_memmap acpi_os_memmap
-
typedef u64 phys_cpuid_t;
#define PHYS_CPUID_INVALID INVALID_HWID
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 89faca0e740d..bfa58409a4d4 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -525,6 +525,11 @@ alternative_endif
#define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name)
#endif
+#ifdef CONFIG_KASAN_HW_TAGS
+#define EXPORT_SYMBOL_NOHWKASAN(name)
+#else
+#define EXPORT_SYMBOL_NOHWKASAN(name) EXPORT_SYMBOL_NOKASAN(name)
+#endif
/*
* Emit a 64-bit absolute little endian symbol reference in a way that
* ensures that it will be resolved at build time, even when building a
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 327120c0089f..a39fcf318c77 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -295,6 +295,7 @@
#define MDCR_EL2_HPMFZO (UL(1) << 29)
#define MDCR_EL2_MTPME (UL(1) << 28)
#define MDCR_EL2_TDCC (UL(1) << 27)
+#define MDCR_EL2_HLP (UL(1) << 26)
#define MDCR_EL2_HCCD (UL(1) << 23)
#define MDCR_EL2_TTRF (UL(1) << 19)
#define MDCR_EL2_HPMD (UL(1) << 17)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index e86045ac43ba..0167f3702c61 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -44,31 +44,39 @@
#define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
#define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init 0
-#define __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run 1
-#define __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context 2
-#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa 3
-#define __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid 4
-#define __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context 5
-#define __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff 6
-#define __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs 7
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config 8
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr 9
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr 10
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs 11
-#define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13
-#define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14
-#define __KVM_HOST_SMCCC_FUNC___pkvm_init 15
-#define __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp 16
-#define __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping 17
-#define __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector 18
-#define __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize 19
-#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 20
#ifndef __ASSEMBLY__
#include <linux/mm.h>
+enum __kvm_host_smccc_func {
+ /* Hypercalls available only prior to pKVM finalisation */
+ /* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
+ __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
+ __KVM_HOST_SMCCC_FUNC___pkvm_init,
+ __KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
+ __KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
+ __KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
+ __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
+
+ /* Hypercalls available after pKVM finalisation */
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
+ __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
+ __KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
+ __KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
+ __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
+ __KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
+ __KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
+ __KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_read_vmcr,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_write_vmcr,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
+ __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs,
+ __KVM_HOST_SMCCC_FUNC___pkvm_vcpu_init_traps,
+};
+
#define DECLARE_KVM_VHE_SYM(sym) extern char sym[]
#define DECLARE_KVM_NVHE_SYM(sym) extern char kvm_nvhe_sym(sym)[]
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fd418955e31e..f4871e47b2d0 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -396,7 +396,10 @@ static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
if (vcpu_mode_is_32bit(vcpu))
return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
- return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
+ if (vcpu_mode_priv(vcpu))
+ return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
+ else
+ return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
}
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 369c30e28301..4be8486042a7 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -58,6 +58,7 @@
enum kvm_mode {
KVM_MODE_DEFAULT,
KVM_MODE_PROTECTED,
+ KVM_MODE_NONE,
};
enum kvm_mode kvm_get_mode(void);
@@ -779,6 +780,8 @@ static inline bool kvm_vm_is_protected(struct kvm *kvm)
return false;
}
+void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
+
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 657d0c94cf82..5afd14ab15b9 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -115,7 +115,12 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
#endif
+extern u64 kvm_nvhe_sym(id_aa64pfr0_el1_sys_val);
+extern u64 kvm_nvhe_sym(id_aa64pfr1_el1_sys_val);
+extern u64 kvm_nvhe_sym(id_aa64isar0_el1_sys_val);
+extern u64 kvm_nvhe_sym(id_aa64isar1_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val);
extern u64 kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val);
+extern u64 kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val);
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 3f93b9e0b339..02511650cffe 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -99,11 +99,17 @@ void mte_check_tfsr_el1(void);
static inline void mte_check_tfsr_entry(void)
{
+ if (!system_supports_mte())
+ return;
+
mte_check_tfsr_el1();
}
static inline void mte_check_tfsr_exit(void)
{
+ if (!system_supports_mte())
+ return;
+
/*
* The asynchronous faults are sync'ed automatically with
* TFSR_EL1 on kernel entry but for exit an explicit dsb()
diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h
index 3a3264ff47b9..95f7686b728d 100644
--- a/arch/arm64/include/asm/string.h
+++ b/arch/arm64/include/asm/string.h
@@ -12,11 +12,13 @@ extern char *strrchr(const char *, int c);
#define __HAVE_ARCH_STRCHR
extern char *strchr(const char *, int c);
+#ifndef CONFIG_KASAN_HW_TAGS
#define __HAVE_ARCH_STRCMP
extern int strcmp(const char *, const char *);
#define __HAVE_ARCH_STRNCMP
extern int strncmp(const char *, const char *, __kernel_size_t);
+#endif
#define __HAVE_ARCH_STRLEN
extern __kernel_size_t strlen(const char *);
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b268082d67ed..9412a645a1c0 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -1152,6 +1152,7 @@
#define ICH_HCR_TC (1 << 10)
#define ICH_HCR_TALL0 (1 << 11)
#define ICH_HCR_TALL1 (1 << 12)
+#define ICH_HCR_TDIR (1 << 14)
#define ICH_HCR_EOIcount_SHIFT 27
#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
@@ -1184,6 +1185,8 @@
#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
#define ICH_VTR_A3V_SHIFT 21
#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
+#define ICH_VTR_TDS_SHIFT 19
+#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT)
#define ARM64_FEATURE_FIELD_BITS 4
diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c
index 1c9c2f7a1c04..f3851724fe35 100644
--- a/arch/arm64/kernel/acpi.c
+++ b/arch/arm64/kernel/acpi.c
@@ -273,8 +273,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
return __pgprot(PROT_DEVICE_nGnRnE);
}
-static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
- acpi_size size, bool memory)
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
efi_memory_desc_t *md, *region = NULL;
pgprot_t prot;
@@ -300,11 +299,9 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
* It is fine for AML to remap regions that are not represented in the
* EFI memory map at all, as it only describes normal memory, and MMIO
* regions that require a virtual mapping to make them accessible to
- * the EFI runtime services. Determine the region default
- * attributes by checking the requested memory semantics.
+ * the EFI runtime services.
*/
- prot = memory ? __pgprot(PROT_NORMAL_NC) :
- __pgprot(PROT_DEVICE_nGnRnE);
+ prot = __pgprot(PROT_DEVICE_nGnRnE);
if (region) {
switch (region->type) {
case EFI_LOADER_CODE:
@@ -364,16 +361,6 @@ static void __iomem *__acpi_os_ioremap(acpi_physical_address phys,
return __ioremap(phys, size, prot);
}
-void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
-{
- return __acpi_os_ioremap(phys, size, false);
-}
-
-void __iomem *acpi_os_memmap(acpi_physical_address phys, acpi_size size)
-{
- return __acpi_os_ioremap(phys, size, true);
-}
-
/*
* Claim Synchronous External Aborts as a firmware first notification.
*
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f8a3067d10c6..6ec7036ef7e1 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1526,9 +1526,13 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
/*
* For reasons that aren't entirely clear, enabling KPTI on Cavium
* ThunderX leads to apparent I-cache corruption of kernel text, which
- * ends as well as you might imagine. Don't even try.
+ * ends as well as you might imagine. Don't even try. We cannot rely
+ * on the cpus_have_*cap() helpers here to detect the CPU erratum
+ * because cpucap detection order may change. However, since we know
+ * affected CPUs are always in a homogeneous configuration, it is
+ * safe to rely on this_cpu_has_cap() here.
*/
- if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
+ if (this_cpu_has_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
str = "ARM64_WORKAROUND_CAVIUM_27456";
__kpti_forced = -1;
}
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index 9d314a3bad3b..e5e801bc5312 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -142,12 +142,7 @@ void mte_enable_kernel_async(void)
#ifdef CONFIG_KASAN_HW_TAGS
void mte_check_tfsr_el1(void)
{
- u64 tfsr_el1;
-
- if (!system_supports_mte())
- return;
-
- tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
+ u64 tfsr_el1 = read_sysreg_s(SYS_TFSR_EL1);
if (unlikely(tfsr_el1 & SYS_TFSR_EL1_TF1)) {
/*
@@ -199,6 +194,9 @@ void mte_thread_init_user(void)
void mte_thread_switch(struct task_struct *next)
{
+ if (!system_supports_mte())
+ return;
+
mte_update_sctlr_user(next);
/*
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 6f6ff072acbd..44369b99a57e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -1128,5 +1128,6 @@ bool cpus_are_stuck_in_kernel(void)
{
bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
- return !!cpus_stuck_in_kernel || smp_spin_tables;
+ return !!cpus_stuck_in_kernel || smp_spin_tables ||
+ is_protected_kvm_enabled();
}
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index d7eec0b43744..8ffcbe29395e 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -4,6 +4,7 @@
#
source "virt/lib/Kconfig"
+source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
@@ -19,7 +20,7 @@ if VIRTUALIZATION
menuconfig KVM
bool "Kernel-based Virtual Machine (KVM) support"
- depends on OF
+ depends on HAVE_KVM
select MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select HAVE_KVM_CPU_RELAX_INTERCEPT
@@ -43,12 +44,9 @@ menuconfig KVM
If unsure, say N.
-if KVM
-
-source "virt/kvm/Kconfig"
-
config NVHE_EL2_DEBUG
bool "Debug mode for non-VHE EL2 object"
+ depends on KVM
help
Say Y here to enable the debug mode for the non-VHE KVM EL2 object.
Failure reports will BUG() in the hypervisor. This is intended for
@@ -56,6 +54,4 @@ config NVHE_EL2_DEBUG
If unsure, say N.
-endif # KVM
-
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 7838e9fb693e..f5490afe1ebf 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -291,10 +291,12 @@ long kvm_arch_dev_ioctl(struct file *filp,
struct kvm *kvm_arch_alloc_vm(void)
{
+ size_t sz = sizeof(struct kvm);
+
if (!has_vhe())
- return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+ return kzalloc(sz, GFP_KERNEL_ACCOUNT);
- return vzalloc(sizeof(struct kvm));
+ return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO);
}
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
@@ -612,6 +614,14 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
ret = kvm_arm_pmu_v3_enable(vcpu);
+ /*
+ * Initialize traps for protected VMs.
+ * NOTE: Move to run in EL2 directly, rather than via a hypercall, once
+ * the code is in place for first run initialization at EL2.
+ */
+ if (kvm_vm_is_protected(kvm))
+ kvm_call_hyp_nvhe(__pkvm_vcpu_init_traps, vcpu);
+
return ret;
}
@@ -1571,25 +1581,33 @@ static void cpu_set_hyp_vector(void)
kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot);
}
-static void cpu_hyp_reinit(void)
+static void cpu_hyp_init_context(void)
{
kvm_init_host_cpu_context(&this_cpu_ptr_hyp_sym(kvm_host_data)->host_ctxt);
- cpu_hyp_reset();
-
- if (is_kernel_in_hyp_mode())
- kvm_timer_init_vhe();
- else
+ if (!is_kernel_in_hyp_mode())
cpu_init_hyp_mode();
+}
+static void cpu_hyp_init_features(void)
+{
cpu_set_hyp_vector();
-
kvm_arm_init_debug();
+ if (is_kernel_in_hyp_mode())
+ kvm_timer_init_vhe();
+
if (vgic_present)
kvm_vgic_init_cpu_hardware();
}
+static void cpu_hyp_reinit(void)
+{
+ cpu_hyp_reset();
+ cpu_hyp_init_context();
+ cpu_hyp_init_features();
+}
+
static void _kvm_arch_hardware_enable(void *discard)
{
if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
@@ -1780,10 +1798,17 @@ static int do_pkvm_init(u32 hyp_va_bits)
int ret;
preempt_disable();
- hyp_install_host_vector();
+ cpu_hyp_init_context();
ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size,
num_possible_cpus(), kern_hyp_va(per_cpu_base),
hyp_va_bits);
+ cpu_hyp_init_features();
+
+ /*
+ * The stub hypercalls are now disabled, so set our local flag to
+ * prevent a later re-init attempt in kvm_arch_hardware_enable().
+ */
+ __this_cpu_write(kvm_arm_hardware_enabled, 1);
preempt_enable();
return ret;
@@ -1794,8 +1819,13 @@ static int kvm_hyp_init_protection(u32 hyp_va_bits)
void *addr = phys_to_virt(hyp_mem_base);
int ret;
+ kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
+ kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
+ kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1);
+ kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1);
kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
+ kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP);
if (ret)
@@ -1963,9 +1993,25 @@ out_err:
return err;
}
-static void _kvm_host_prot_finalize(void *discard)
+static void _kvm_host_prot_finalize(void *arg)
{
- WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize));
+ int *err = arg;
+
+ if (WARN_ON(kvm_call_hyp_nvhe(__pkvm_prot_finalize)))
+ WRITE_ONCE(*err, -EINVAL);
+}
+
+static int pkvm_drop_host_privileges(void)
+{
+ int ret = 0;
+
+ /*
+ * Flip the static key upfront as that may no longer be possible
+ * once the host stage 2 is installed.
+ */
+ static_branch_enable(&kvm_protected_mode_initialized);
+ on_each_cpu(_kvm_host_prot_finalize, &ret, 1);
+ return ret;
}
static int finalize_hyp_mode(void)
@@ -1979,15 +2025,7 @@ static int finalize_hyp_mode(void)
* None of other sections should ever be introspected.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
-
- /*
- * Flip the static key upfront as that may no longer be possible
- * once the host stage 2 is installed.
- */
- static_branch_enable(&kvm_protected_mode_initialized);
- on_each_cpu(_kvm_host_prot_finalize, NULL, 1);
-
- return 0;
+ return pkvm_drop_host_privileges();
}
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
@@ -2056,6 +2094,11 @@ int kvm_arch_init(void *opaque)
return -ENODEV;
}
+ if (kvm_get_mode() == KVM_MODE_NONE) {
+ kvm_info("KVM disabled from command line\n");
+ return -ENODEV;
+ }
+
in_hyp_mode = is_kernel_in_hyp_mode();
if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) ||
@@ -2129,8 +2172,15 @@ static int __init early_kvm_mode_cfg(char *arg)
return 0;
}
- if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode()))
+ if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) {
+ kvm_mode = KVM_MODE_DEFAULT;
+ return 0;
+ }
+
+ if (strcmp(arg, "none") == 0) {
+ kvm_mode = KVM_MODE_NONE;
return 0;
+ }
return -EINVAL;
}
diff --git a/arch/arm64/kvm/hyp/include/hyp/fault.h b/arch/arm64/kvm/hyp/include/hyp/fault.h
new file mode 100644
index 000000000000..1b8a2dcd712f
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/hyp/fault.h
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ */
+
+#ifndef __ARM64_KVM_HYP_FAULT_H__
+#define __ARM64_KVM_HYP_FAULT_H__
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
+
+static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
+{
+ u64 par, tmp;
+
+ /*
+ * Resolve the IPA the hard way using the guest VA.
+ *
+ * Stage-1 translation already validated the memory access
+ * rights. As such, we can use the EL1 translation regime, and
+ * don't have to distinguish between EL0 and EL1 access.
+ *
+ * We do need to save/restore PAR_EL1 though, as we haven't
+ * saved the guest context yet, and we may return early...
+ */
+ par = read_sysreg_par();
+ if (!__kvm_at("s1e1r", far))
+ tmp = read_sysreg_par();
+ else
+ tmp = SYS_PAR_EL1_F; /* back to the guest */
+ write_sysreg(par, par_el1);
+
+ if (unlikely(tmp & SYS_PAR_EL1_F))
+ return false; /* Translation failed, back to guest */
+
+ /* Convert PAR to HPFAR format */
+ *hpfar = PAR_TO_HPFAR(tmp);
+ return true;
+}
+
+static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
+{
+ u64 hpfar, far;
+
+ far = read_sysreg_el2(SYS_FAR);
+
+ /*
+ * The HPFAR can be invalid if the stage 2 fault did not
+ * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
+ * bit is clear) and one of the two following cases are true:
+ * 1. The fault was due to a permission fault
+ * 2. The processor carries errata 834220
+ *
+ * Therefore, for all non S1PTW faults where we either have a
+ * permission fault or the errata workaround is enabled, we
+ * resolve the IPA using the AT instruction.
+ */
+ if (!(esr & ESR_ELx_S1PTW) &&
+ (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
+ (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+ if (!__translate_far_to_hpfar(far, &hpfar))
+ return false;
+ } else {
+ hpfar = read_sysreg(hpfar_el2);
+ }
+
+ fault->far_el2 = far;
+ fault->hpfar_el2 = hpfar;
+ return true;
+}
+
+#endif
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index a0e78a6027be..c6e98c7e918b 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -8,6 +8,7 @@
#define __ARM64_KVM_HYP_SWITCH_H__
#include <hyp/adjust_pc.h>
+#include <hyp/fault.h>
#include <linux/arm-smccc.h>
#include <linux/kvm_host.h>
@@ -133,78 +134,9 @@ static inline void ___deactivate_traps(struct kvm_vcpu *vcpu)
}
}
-static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar)
-{
- u64 par, tmp;
-
- /*
- * Resolve the IPA the hard way using the guest VA.
- *
- * Stage-1 translation already validated the memory access
- * rights. As such, we can use the EL1 translation regime, and
- * don't have to distinguish between EL0 and EL1 access.
- *
- * We do need to save/restore PAR_EL1 though, as we haven't
- * saved the guest context yet, and we may return early...
- */
- par = read_sysreg_par();
- if (!__kvm_at("s1e1r", far))
- tmp = read_sysreg_par();
- else
- tmp = SYS_PAR_EL1_F; /* back to the guest */
- write_sysreg(par, par_el1);
-
- if (unlikely(tmp & SYS_PAR_EL1_F))
- return false; /* Translation failed, back to guest */
-
- /* Convert PAR to HPFAR format */
- *hpfar = PAR_TO_HPFAR(tmp);
- return true;
-}
-
-static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
-{
- u64 hpfar, far;
-
- far = read_sysreg_el2(SYS_FAR);
-
- /*
- * The HPFAR can be invalid if the stage 2 fault did not
- * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
- * bit is clear) and one of the two following cases are true:
- * 1. The fault was due to a permission fault
- * 2. The processor carries errata 834220
- *
- * Therefore, for all non S1PTW faults where we either have a
- * permission fault or the errata workaround is enabled, we
- * resolve the IPA using the AT instruction.
- */
- if (!(esr & ESR_ELx_S1PTW) &&
- (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
- (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
- if (!__translate_far_to_hpfar(far, &hpfar))
- return false;
- } else {
- hpfar = read_sysreg(hpfar_el2);
- }
-
- fault->far_el2 = far;
- fault->hpfar_el2 = hpfar;
- return true;
-}
-
static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
{
- u8 ec;
- u64 esr;
-
- esr = vcpu->arch.fault.esr_el2;
- ec = ESR_ELx_EC(esr);
-
- if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
- return true;
-
- return __get_fault_info(esr, &vcpu->arch.fault);
+ return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
}
static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu)
@@ -225,8 +157,13 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR);
}
-/* Check for an FPSIMD/SVE trap and handle as appropriate */
-static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
+/*
+ * We trap the first access to the FP/SIMD to save the host context and
+ * restore the guest context lazily.
+ * If FP/SIMD is not implemented, handle the trap and inject an undefined
+ * instruction exception to the guest. Similarly for trapped SVE accesses.
+ */
+static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
bool sve_guest, sve_host;
u8 esr_ec;
@@ -244,9 +181,6 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
}
esr_ec = kvm_vcpu_trap_get_class(vcpu);
- if (esr_ec != ESR_ELx_EC_FP_ASIMD &&
- esr_ec != ESR_ELx_EC_SVE)
- return false;
/* Don't handle SVE traps for non-SVE vcpus here: */
if (!sve_guest && esr_ec != ESR_ELx_EC_FP_ASIMD)
@@ -348,14 +282,6 @@ static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu)
static inline bool esr_is_ptrauth_trap(u32 esr)
{
- u32 ec = ESR_ELx_EC(esr);
-
- if (ec == ESR_ELx_EC_PAC)
- return true;
-
- if (ec != ESR_ELx_EC_SYS64)
- return false;
-
switch (esr_sys64_to_sysreg(esr)) {
case SYS_APIAKEYLO_EL1:
case SYS_APIAKEYHI_EL1:
@@ -384,13 +310,12 @@ static inline bool esr_is_ptrauth_trap(u32 esr)
DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
-static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
+static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code)
{
struct kvm_cpu_context *ctxt;
u64 val;
- if (!vcpu_has_ptrauth(vcpu) ||
- !esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
+ if (!vcpu_has_ptrauth(vcpu))
return false;
ctxt = this_cpu_ptr(&kvm_hyp_ctxt);
@@ -409,6 +334,90 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
return true;
}
+static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+ handle_tx2_tvm(vcpu))
+ return true;
+
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
+ __vgic_v3_perform_cpuif_access(vcpu) == 1)
+ return true;
+
+ if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu)))
+ return kvm_hyp_handle_ptrauth(vcpu, exit_code);
+
+ return false;
+}
+
+static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
+ __vgic_v3_perform_cpuif_access(vcpu) == 1)
+ return true;
+
+ return false;
+}
+
+static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ if (!__populate_fault_info(vcpu))
+ return true;
+
+ return false;
+}
+
+static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ if (!__populate_fault_info(vcpu))
+ return true;
+
+ if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
+ bool valid;
+
+ valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+ kvm_vcpu_dabt_isvalid(vcpu) &&
+ !kvm_vcpu_abt_issea(vcpu) &&
+ !kvm_vcpu_abt_iss1tw(vcpu);
+
+ if (valid) {
+ int ret = __vgic_v2_perform_cpuif_access(vcpu);
+
+ if (ret == 1)
+ return true;
+
+ /* Promote an illegal access to an SError.*/
+ if (ret == -1)
+ *exit_code = ARM_EXCEPTION_EL1_SERROR;
+ }
+ }
+
+ return false;
+}
+
+typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
+
+static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
+
+/*
+ * Allow the hypervisor to handle the exit with an exit handler if it has one.
+ *
+ * Returns true if the hypervisor handled the exit, and control should go back
+ * to the guest, or false if it hasn't.
+ */
+static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu);
+ exit_handler_fn fn;
+
+ fn = handlers[kvm_vcpu_trap_get_class(vcpu)];
+
+ if (fn)
+ return fn(vcpu, exit_code);
+
+ return false;
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -443,59 +452,9 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (*exit_code != ARM_EXCEPTION_TRAP)
goto exit;
- if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
- kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
- handle_tx2_tvm(vcpu))
+ /* Check if there's an exit handler and allow it to handle the exit. */
+ if (kvm_hyp_handle_exit(vcpu, exit_code))
goto guest;
-
- /*
- * We trap the first access to the FP/SIMD to save the host context
- * and restore the guest context lazily.
- * If FP/SIMD is not implemented, handle the trap and inject an
- * undefined instruction exception to the guest.
- * Similarly for trapped SVE accesses.
- */
- if (__hyp_handle_fpsimd(vcpu))
- goto guest;
-
- if (__hyp_handle_ptrauth(vcpu))
- goto guest;
-
- if (!__populate_fault_info(vcpu))
- goto guest;
-
- if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
- bool valid;
-
- valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
- kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
- kvm_vcpu_dabt_isvalid(vcpu) &&
- !kvm_vcpu_abt_issea(vcpu) &&
- !kvm_vcpu_abt_iss1tw(vcpu);
-
- if (valid) {
- int ret = __vgic_v2_perform_cpuif_access(vcpu);
-
- if (ret == 1)
- goto guest;
-
- /* Promote an illegal access to an SError.*/
- if (ret == -1)
- *exit_code = ARM_EXCEPTION_EL1_SERROR;
-
- goto exit;
- }
- }
-
- if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
- (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
- kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
- int ret = __vgic_v3_perform_cpuif_access(vcpu);
-
- if (ret == 1)
- goto guest;
- }
-
exit:
/* Return to the host kernel and handle the exit */
return false;
diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
new file mode 100644
index 000000000000..eea1f6a53723
--- /dev/null
+++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h
@@ -0,0 +1,200 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Google LLC
+ * Author: Fuad Tabba <tabba@google.com>
+ */
+
+#ifndef __ARM64_KVM_FIXED_CONFIG_H__
+#define __ARM64_KVM_FIXED_CONFIG_H__
+
+#include <asm/sysreg.h>
+
+/*
+ * This file contains definitions for features to be allowed or restricted for
+ * guest virtual machines, depending on the mode KVM is running in and on the
+ * type of guest that is running.
+ *
+ * The ALLOW masks represent a bitmask of feature fields that are allowed
+ * without any restrictions as long as they are supported by the system.
+ *
+ * The RESTRICT_UNSIGNED masks, if present, represent unsigned fields for
+ * features that are restricted to support at most the specified feature.
+ *
+ * If a feature field is not present in either, than it is not supported.
+ *
+ * The approach taken for protected VMs is to allow features that are:
+ * - Needed by common Linux distributions (e.g., floating point)
+ * - Trivial to support, e.g., supporting the feature does not introduce or
+ * require tracking of additional state in KVM
+ * - Cannot be trapped or prevent the guest from using anyway
+ */
+
+/*
+ * Allow for protected VMs:
+ * - Floating-point and Advanced SIMD
+ * - Data Independent Timing
+ */
+#define PVM_ID_AA64PFR0_ALLOW (\
+ ARM64_FEATURE_MASK(ID_AA64PFR0_FP) | \
+ ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD) | \
+ ARM64_FEATURE_MASK(ID_AA64PFR0_DIT) \
+ )
+
+/*
+ * Restrict to the following *unsigned* features for protected VMs:
+ * - AArch64 guests only (no support for AArch32 guests):
+ * AArch32 adds complexity in trap handling, emulation, condition codes,
+ * etc...
+ * - RAS (v1)
+ * Supported by KVM
+ */
+#define PVM_ID_AA64PFR0_RESTRICT_UNSIGNED (\
+ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), ID_AA64PFR0_ELx_64BIT_ONLY) | \
+ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1), ID_AA64PFR0_ELx_64BIT_ONLY) | \
+ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL2), ID_AA64PFR0_ELx_64BIT_ONLY) | \
+ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL3), ID_AA64PFR0_ELx_64BIT_ONLY) | \
+ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), ID_AA64PFR0_RAS_V1) \
+ )
+
+/*
+ * Allow for protected VMs:
+ * - Branch Target Identification
+ * - Speculative Store Bypassing
+ */
+#define PVM_ID_AA64PFR1_ALLOW (\
+ ARM64_FEATURE_MASK(ID_AA64PFR1_BT) | \
+ ARM64_FEATURE_MASK(ID_AA64PFR1_SSBS) \
+ )
+
+/*
+ * Allow for protected VMs:
+ * - Mixed-endian
+ * - Distinction between Secure and Non-secure Memory
+ * - Mixed-endian at EL0 only
+ * - Non-context synchronizing exception entry and exit
+ */
+#define PVM_ID_AA64MMFR0_ALLOW (\
+ ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR0_SNSMEM) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR0_BIGENDEL0) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR0_EXS) \
+ )
+
+/*
+ * Restrict to the following *unsigned* features for protected VMs:
+ * - 40-bit IPA
+ * - 16-bit ASID
+ */
+#define PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED (\
+ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_PARANGE), ID_AA64MMFR0_PARANGE_40) | \
+ FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64MMFR0_ASID), ID_AA64MMFR0_ASID_16) \
+ )
+
+/*
+ * Allow for protected VMs:
+ * - Hardware translation table updates to Access flag and Dirty state
+ * - Number of VMID bits from CPU
+ * - Hierarchical Permission Disables
+ * - Privileged Access Never
+ * - SError interrupt exceptions from speculative reads
+ * - Enhanced Translation Synchronization
+ */
+#define PVM_ID_AA64MMFR1_ALLOW (\
+ ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR1_VMIDBITS) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR1_HPD) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR1_PAN) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR1_SPECSEI) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR1_ETS) \
+ )
+
+/*
+ * Allow for protected VMs:
+ * - Common not Private translations
+ * - User Access Override
+ * - IESB bit in the SCTLR_ELx registers
+ * - Unaligned single-copy atomicity and atomic functions
+ * - ESR_ELx.EC value on an exception by read access to feature ID space
+ * - TTL field in address operations.
+ * - Break-before-make sequences when changing translation block size
+ * - E0PDx mechanism
+ */
+#define PVM_ID_AA64MMFR2_ALLOW (\
+ ARM64_FEATURE_MASK(ID_AA64MMFR2_CNP) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR2_UAO) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR2_IESB) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR2_AT) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR2_IDS) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR2_TTL) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR2_BBM) | \
+ ARM64_FEATURE_MASK(ID_AA64MMFR2_E0PD) \
+ )
+
+/*
+ * No support for Scalable Vectors for protected VMs:
+ * Requires additional support from KVM, e.g., context-switching and
+ * trapping at EL2
+ */
+#define PVM_ID_AA64ZFR0_ALLOW (0ULL)
+
+/*
+ * No support for debug, including breakpoints, and watchpoints for protected
+ * VMs:
+ * The Arm architecture mandates support for at least the Armv8 debug
+ * architecture, which would include at least 2 hardware breakpoints and
+ * watchpoints. Providing that support to protected guests adds
+ * considerable state and complexity. Therefore, the reserved value of 0 is
+ * used for debug-related fields.
+ */
+#define PVM_ID_AA64DFR0_ALLOW (0ULL)
+#define PVM_ID_AA64DFR1_ALLOW (0ULL)
+
+/*
+ * No support for implementation defined features.
+ */
+#define PVM_ID_AA64AFR0_ALLOW (0ULL)
+#define PVM_ID_AA64AFR1_ALLOW (0ULL)
+
+/*
+ * No restrictions on instructions implemented in AArch64.
+ */
+#define PVM_ID_AA64ISAR0_ALLOW (\
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_AES) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA1) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA2) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_CRC32) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_RDM) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_SHA3) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_SM3) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_SM4) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_DP) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_FHM) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_TS) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_TLB) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR0_RNDR) \
+ )
+
+#define PVM_ID_AA64ISAR1_ALLOW (\
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_DPB) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_API) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_JSCVT) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_FCMA) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_LRCPC) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_FRINTTS) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_SB) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_SPECRES) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_BF16) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_DGH) | \
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_I8MM) \
+ )
+
+u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id);
+bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code);
+bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code);
+int kvm_check_pvm_sysreg_table(void);
+
+#endif /* __ARM64_KVM_FIXED_CONFIG_H__ */
diff --git a/arch/arm64/kvm/hyp/include/nvhe/trap_handler.h b/arch/arm64/kvm/hyp/include/nvhe/trap_handler.h
index 1e6d995968a1..45a84f0ade04 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/trap_handler.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/trap_handler.h
@@ -15,4 +15,6 @@
#define DECLARE_REG(type, name, ctxt, reg) \
type name = (type)cpu_reg(ctxt, (reg))
+void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu);
+
#endif /* __ARM64_KVM_NVHE_TRAP_HANDLER_H__ */
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index 8d741f71377f..c3c11974fa3b 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -14,7 +14,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs))
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \
- cache.o setup.o mm.o mem_protect.o
+ cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
obj-y += $(lib-objs)
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index 4b652ffb591d..0c6116d34e18 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -110,17 +110,14 @@ SYM_FUNC_START(__hyp_do_panic)
b __host_enter_for_panic
SYM_FUNC_END(__hyp_do_panic)
-.macro host_el1_sync_vect
- .align 7
-.L__vect_start\@:
- stp x0, x1, [sp, #-16]!
- mrs x0, esr_el2
- lsr x0, x0, #ESR_ELx_EC_SHIFT
- cmp x0, #ESR_ELx_EC_HVC64
- b.ne __host_exit
-
+SYM_FUNC_START(__host_hvc)
ldp x0, x1, [sp] // Don't fixup the stack yet
+ /* No stub for you, sonny Jim */
+alternative_if ARM64_KVM_PROTECTED_MODE
+ b __host_exit
+alternative_else_nop_endif
+
/* Check for a stub HVC call */
cmp x0, #HVC_STUB_HCALL_NR
b.hs __host_exit
@@ -137,6 +134,17 @@ SYM_FUNC_END(__hyp_do_panic)
ldr x5, =__kvm_handle_stub_hvc
hyp_pa x5, x6
br x5
+SYM_FUNC_END(__host_hvc)
+
+.macro host_el1_sync_vect
+ .align 7
+.L__vect_start\@:
+ stp x0, x1, [sp, #-16]!
+ mrs x0, esr_el2
+ lsr x0, x0, #ESR_ELx_EC_SHIFT
+ cmp x0, #ESR_ELx_EC_HVC64
+ b.eq __host_hvc
+ b __host_exit
.L__vect_end\@:
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
.error "host_el1_sync_vect larger than vector entry"
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 2da6aa8da868..b096bf009144 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -4,7 +4,7 @@
* Author: Andrew Scull <ascull@google.com>
*/
-#include <hyp/switch.h>
+#include <hyp/adjust_pc.h>
#include <asm/pgtable-types.h>
#include <asm/kvm_asm.h>
@@ -160,41 +160,65 @@ static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
{
cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
}
+
+static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+
+ __pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
static const hcall_t host_hcall[] = {
- HANDLE_FUNC(__kvm_vcpu_run),
+ /* ___kvm_hyp_init */
+ HANDLE_FUNC(__kvm_get_mdcr_el2),
+ HANDLE_FUNC(__pkvm_init),
+ HANDLE_FUNC(__pkvm_create_private_mapping),
+ HANDLE_FUNC(__pkvm_cpu_set_vector),
+ HANDLE_FUNC(__kvm_enable_ssbs),
+ HANDLE_FUNC(__vgic_v3_init_lrs),
+ HANDLE_FUNC(__vgic_v3_get_gic_config),
+ HANDLE_FUNC(__pkvm_prot_finalize),
+
+ HANDLE_FUNC(__pkvm_host_share_hyp),
HANDLE_FUNC(__kvm_adjust_pc),
+ HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff),
- HANDLE_FUNC(__kvm_enable_ssbs),
- HANDLE_FUNC(__vgic_v3_get_gic_config),
HANDLE_FUNC(__vgic_v3_read_vmcr),
HANDLE_FUNC(__vgic_v3_write_vmcr),
- HANDLE_FUNC(__vgic_v3_init_lrs),
- HANDLE_FUNC(__kvm_get_mdcr_el2),
HANDLE_FUNC(__vgic_v3_save_aprs),
HANDLE_FUNC(__vgic_v3_restore_aprs),
- HANDLE_FUNC(__pkvm_init),
- HANDLE_FUNC(__pkvm_cpu_set_vector),
- HANDLE_FUNC(__pkvm_host_share_hyp),
- HANDLE_FUNC(__pkvm_create_private_mapping),
- HANDLE_FUNC(__pkvm_prot_finalize),
+ HANDLE_FUNC(__pkvm_vcpu_init_traps),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(unsigned long, id, host_ctxt, 0);
+ unsigned long hcall_min = 0;
hcall_t hfn;
+ /*
+ * If pKVM has been initialised then reject any calls to the
+ * early "privileged" hypercalls. Note that we cannot reject
+ * calls to __pkvm_prot_finalize for two reasons: (1) The static
+ * key used to determine initialisation must be toggled prior to
+ * finalisation and (2) finalisation is performed on a per-CPU
+ * basis. This is all fine, however, since __pkvm_prot_finalize
+ * returns -EPERM after the first call for a given CPU.
+ */
+ if (static_branch_unlikely(&kvm_protected_mode_initialized))
+ hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
+
id -= KVM_HOST_SMCCC_ID(0);
- if (unlikely(id >= ARRAY_SIZE(host_hcall)))
+ if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
goto inval;
hfn = host_hcall[id];
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 34eeb524b686..c1a90dd022b8 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -11,7 +11,7 @@
#include <asm/kvm_pgtable.h>
#include <asm/stage2_pgtable.h>
-#include <hyp/switch.h>
+#include <hyp/fault.h>
#include <nvhe/gfp.h>
#include <nvhe/memory.h>
@@ -25,12 +25,6 @@ struct host_kvm host_kvm;
static struct hyp_pool host_s2_pool;
-/*
- * Copies of the host's CPU features registers holding sanitized values.
- */
-u64 id_aa64mmfr0_el1_sys_val;
-u64 id_aa64mmfr1_el1_sys_val;
-
const u8 pkvm_hyp_id = 1;
static void *host_s2_zalloc_pages_exact(size_t size)
@@ -134,6 +128,9 @@ int __pkvm_prot_finalize(void)
struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
+ if (params->hcr_el2 & HCR_VM)
+ return -EPERM;
+
params->vttbr = kvm_get_vttbr(mmu);
params->vtcr = host_kvm.arch.vtcr;
params->hcr_el2 |= HCR_VM;
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
new file mode 100644
index 000000000000..99c8d8b73e70
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Google LLC
+ * Author: Fuad Tabba <tabba@google.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/mm.h>
+#include <nvhe/fixed_config.h>
+#include <nvhe/trap_handler.h>
+
+/*
+ * Set trap register values based on features in ID_AA64PFR0.
+ */
+static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
+{
+ const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR0_EL1);
+ u64 hcr_set = HCR_RW;
+ u64 hcr_clear = 0;
+ u64 cptr_set = 0;
+
+ /* Protected KVM does not support AArch32 guests. */
+ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0),
+ PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY);
+ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1),
+ PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY);
+
+ /*
+ * Linux guests assume support for floating-point and Advanced SIMD. Do
+ * not change the trapping behavior for these from the KVM default.
+ */
+ BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP),
+ PVM_ID_AA64PFR0_ALLOW));
+ BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD),
+ PVM_ID_AA64PFR0_ALLOW));
+
+ /* Trap RAS unless all current versions are supported */
+ if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) <
+ ID_AA64PFR0_RAS_V1P1) {
+ hcr_set |= HCR_TERR | HCR_TEA;
+ hcr_clear |= HCR_FIEN;
+ }
+
+ /* Trap AMU */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) {
+ hcr_clear |= HCR_AMVOFFEN;
+ cptr_set |= CPTR_EL2_TAM;
+ }
+
+ /* Trap SVE */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids))
+ cptr_set |= CPTR_EL2_TZ;
+
+ vcpu->arch.hcr_el2 |= hcr_set;
+ vcpu->arch.hcr_el2 &= ~hcr_clear;
+ vcpu->arch.cptr_el2 |= cptr_set;
+}
+
+/*
+ * Set trap register values based on features in ID_AA64PFR1.
+ */
+static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
+{
+ const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64PFR1_EL1);
+ u64 hcr_set = 0;
+ u64 hcr_clear = 0;
+
+ /* Memory Tagging: Trap and Treat as Untagged if not supported. */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) {
+ hcr_set |= HCR_TID5;
+ hcr_clear |= HCR_DCT | HCR_ATA;
+ }
+
+ vcpu->arch.hcr_el2 |= hcr_set;
+ vcpu->arch.hcr_el2 &= ~hcr_clear;
+}
+
+/*
+ * Set trap register values based on features in ID_AA64DFR0.
+ */
+static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
+{
+ const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64DFR0_EL1);
+ u64 mdcr_set = 0;
+ u64 mdcr_clear = 0;
+ u64 cptr_set = 0;
+
+ /* Trap/constrain PMU */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) {
+ mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
+ mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
+ MDCR_EL2_HPMN_MASK;
+ }
+
+ /* Trap Debug */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids))
+ mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
+
+ /* Trap OS Double Lock */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids))
+ mdcr_set |= MDCR_EL2_TDOSA;
+
+ /* Trap SPE */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) {
+ mdcr_set |= MDCR_EL2_TPMS;
+ mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
+ }
+
+ /* Trap Trace Filter */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids))
+ mdcr_set |= MDCR_EL2_TTRF;
+
+ /* Trap Trace */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids))
+ cptr_set |= CPTR_EL2_TTA;
+
+ vcpu->arch.mdcr_el2 |= mdcr_set;
+ vcpu->arch.mdcr_el2 &= ~mdcr_clear;
+ vcpu->arch.cptr_el2 |= cptr_set;
+}
+
+/*
+ * Set trap register values based on features in ID_AA64MMFR0.
+ */
+static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
+{
+ const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR0_EL1);
+ u64 mdcr_set = 0;
+
+ /* Trap Debug Communications Channel registers */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids))
+ mdcr_set |= MDCR_EL2_TDCC;
+
+ vcpu->arch.mdcr_el2 |= mdcr_set;
+}
+
+/*
+ * Set trap register values based on features in ID_AA64MMFR1.
+ */
+static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
+{
+ const u64 feature_ids = pvm_read_id_reg(vcpu, SYS_ID_AA64MMFR1_EL1);
+ u64 hcr_set = 0;
+
+ /* Trap LOR */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids))
+ hcr_set |= HCR_TLOR;
+
+ vcpu->arch.hcr_el2 |= hcr_set;
+}
+
+/*
+ * Set baseline trap register values.
+ */
+static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
+{
+ const u64 hcr_trap_feat_regs = HCR_TID3;
+ const u64 hcr_trap_impdef = HCR_TACR | HCR_TIDCP | HCR_TID1;
+
+ /*
+ * Always trap:
+ * - Feature id registers: to control features exposed to guests
+ * - Implementation-defined features
+ */
+ vcpu->arch.hcr_el2 |= hcr_trap_feat_regs | hcr_trap_impdef;
+
+ /* Clear res0 and set res1 bits to trap potential new features. */
+ vcpu->arch.hcr_el2 &= ~(HCR_RES0);
+ vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
+ vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
+ vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+}
+
+/*
+ * Initialize trap register values for protected VMs.
+ */
+void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
+{
+ pvm_init_trap_regs(vcpu);
+ pvm_init_traps_aa64pfr0(vcpu);
+ pvm_init_traps_aa64pfr1(vcpu);
+ pvm_init_traps_aa64dfr0(vcpu);
+ pvm_init_traps_aa64mmfr0(vcpu);
+ pvm_init_traps_aa64mmfr1(vcpu);
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 57c27846320f..862c7b514e20 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -10,6 +10,7 @@
#include <asm/kvm_pgtable.h>
#include <nvhe/early_alloc.h>
+#include <nvhe/fixed_config.h>
#include <nvhe/gfp.h>
#include <nvhe/memory.h>
#include <nvhe/mem_protect.h>
@@ -260,6 +261,8 @@ int __pkvm_init(phys_addr_t phys, unsigned long size, unsigned long nr_cpus,
void (*fn)(phys_addr_t params_pa, void *finalize_fn_va);
int ret;
+ BUG_ON(kvm_check_pvm_sysreg_table());
+
if (!PAGE_ALIGNED(phys) || !PAGE_ALIGNED(size))
return -EINVAL;
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index a34b01cc8ab9..c0e3fed26d93 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -27,6 +27,7 @@
#include <asm/processor.h>
#include <asm/thread_info.h>
+#include <nvhe/fixed_config.h>
#include <nvhe/mem_protect.h>
/* Non-VHE specific context */
@@ -158,6 +159,101 @@ static void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
write_sysreg(pmu->events_host, pmcntenset_el0);
}
+/**
+ * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
+ *
+ * Returns true if the hypervisor has handled the exit, and control should go
+ * back to the guest, or false if it hasn't.
+ */
+static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ /*
+ * Make sure we handle the exit for workarounds and ptrauth
+ * before the pKVM handling, as the latter could decide to
+ * UNDEF.
+ */
+ return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
+ kvm_handle_pvm_sysreg(vcpu, exit_code));
+}
+
+/**
+ * Handler for protected floating-point and Advanced SIMD accesses.
+ *
+ * Returns true if the hypervisor has handled the exit, and control should go
+ * back to the guest, or false if it hasn't.
+ */
+static bool kvm_handle_pvm_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ /* Linux guests assume support for floating-point and Advanced SIMD. */
+ BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP),
+ PVM_ID_AA64PFR0_ALLOW));
+ BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD),
+ PVM_ID_AA64PFR0_ALLOW));
+
+ return kvm_hyp_handle_fpsimd(vcpu, exit_code);
+}
+
+static const exit_handler_fn hyp_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = NULL,
+ [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
+ [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
+ [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
+ [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
+ [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
+ [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
+ [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
+};
+
+static const exit_handler_fn pvm_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = NULL,
+ [ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
+ [ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted,
+ [ESR_ELx_EC_FP_ASIMD] = kvm_handle_pvm_fpsimd,
+ [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
+ [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
+ [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
+};
+
+static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
+{
+ if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
+ return pvm_exit_handlers;
+
+ return hyp_exit_handlers;
+}
+
+/*
+ * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
+ * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
+ * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
+ * hypervisor spots a guest in such a state ensure it is handled, and don't
+ * trust the host to spot or fix it. The check below is based on the one in
+ * kvm_arch_vcpu_ioctl_run().
+ *
+ * Returns false if the guest ran in AArch32 when it shouldn't have, and
+ * thus should exit to the host, or true if a the guest run loop can continue.
+ */
+static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+
+ if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
+ /*
+ * As we have caught the guest red-handed, decide that it isn't
+ * fit for purpose anymore by making the vcpu invalid. The VMM
+ * can try and fix it by re-initializing the vcpu with
+ * KVM_ARM_VCPU_INIT, however, this is likely not possible for
+ * protected VMs.
+ */
+ vcpu->arch.target = -1;
+ *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
+ *exit_code |= ARM_EXCEPTION_IL;
+ return false;
+ }
+
+ return true;
+}
+
/* Switch to the guest for legacy non-VHE systems */
int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
@@ -220,6 +316,9 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
/* Jump in the fire! */
exit_code = __guest_enter(vcpu);
+ if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
+ break;
+
/* And we're baaack! */
} while (fixup_guest_exit(vcpu, &exit_code));
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
new file mode 100644
index 000000000000..3787ee6fb1a2
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Google LLC
+ * Author: Fuad Tabba <tabba@google.com>
+ */
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#include <hyp/adjust_pc.h>
+
+#include <nvhe/fixed_config.h>
+
+#include "../../sys_regs.h"
+
+/*
+ * Copies of the host's CPU features registers holding sanitized values at hyp.
+ */
+u64 id_aa64pfr0_el1_sys_val;
+u64 id_aa64pfr1_el1_sys_val;
+u64 id_aa64isar0_el1_sys_val;
+u64 id_aa64isar1_el1_sys_val;
+u64 id_aa64mmfr0_el1_sys_val;
+u64 id_aa64mmfr1_el1_sys_val;
+u64 id_aa64mmfr2_el1_sys_val;
+
+/*
+ * Inject an unknown/undefined exception to an AArch64 guest while most of its
+ * sysregs are live.
+ */
+static void inject_undef64(struct kvm_vcpu *vcpu)
+{
+ u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
+
+ *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
+ *vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
+
+ vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
+ KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
+ KVM_ARM64_PENDING_EXCEPTION);
+
+ __kvm_adjust_pc(vcpu);
+
+ write_sysreg_el1(esr, SYS_ESR);
+ write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
+ write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
+ write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
+}
+
+/*
+ * Returns the restricted features values of the feature register based on the
+ * limitations in restrict_fields.
+ * A feature id field value of 0b0000 does not impose any restrictions.
+ * Note: Use only for unsigned feature field values.
+ */
+static u64 get_restricted_features_unsigned(u64 sys_reg_val,
+ u64 restrict_fields)
+{
+ u64 value = 0UL;
+ u64 mask = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
+
+ /*
+ * According to the Arm Architecture Reference Manual, feature fields
+ * use increasing values to indicate increases in functionality.
+ * Iterate over the restricted feature fields and calculate the minimum
+ * unsigned value between the one supported by the system, and what the
+ * value is being restricted to.
+ */
+ while (sys_reg_val && restrict_fields) {
+ value |= min(sys_reg_val & mask, restrict_fields & mask);
+ sys_reg_val &= ~mask;
+ restrict_fields &= ~mask;
+ mask <<= ARM64_FEATURE_FIELD_BITS;
+ }
+
+ return value;
+}
+
+/*
+ * Functions that return the value of feature id registers for protected VMs
+ * based on allowed features, system features, and KVM support.
+ */
+
+static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
+{
+ const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
+ u64 set_mask = 0;
+ u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
+
+ if (!vcpu_has_sve(vcpu))
+ allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
+
+ set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
+ PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
+
+ /* Spectre and Meltdown mitigation in KVM */
+ set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2),
+ (u64)kvm->arch.pfr0_csv2);
+ set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3),
+ (u64)kvm->arch.pfr0_csv3);
+
+ return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
+}
+
+static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
+{
+ const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
+ u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
+
+ if (!kvm_has_mte(kvm))
+ allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
+
+ return id_aa64pfr1_el1_sys_val & allow_mask;
+}
+
+static u64 get_pvm_id_aa64zfr0(const struct kvm_vcpu *vcpu)
+{
+ /*
+ * No support for Scalable Vectors, therefore, hyp has no sanitized
+ * copy of the feature id register.
+ */
+ BUILD_BUG_ON(PVM_ID_AA64ZFR0_ALLOW != 0ULL);
+ return 0;
+}
+
+static u64 get_pvm_id_aa64dfr0(const struct kvm_vcpu *vcpu)
+{
+ /*
+ * No support for debug, including breakpoints, and watchpoints,
+ * therefore, pKVM has no sanitized copy of the feature id register.
+ */
+ BUILD_BUG_ON(PVM_ID_AA64DFR0_ALLOW != 0ULL);
+ return 0;
+}
+
+static u64 get_pvm_id_aa64dfr1(const struct kvm_vcpu *vcpu)
+{
+ /*
+ * No support for debug, therefore, hyp has no sanitized copy of the
+ * feature id register.
+ */
+ BUILD_BUG_ON(PVM_ID_AA64DFR1_ALLOW != 0ULL);
+ return 0;
+}
+
+static u64 get_pvm_id_aa64afr0(const struct kvm_vcpu *vcpu)
+{
+ /*
+ * No support for implementation defined features, therefore, hyp has no
+ * sanitized copy of the feature id register.
+ */
+ BUILD_BUG_ON(PVM_ID_AA64AFR0_ALLOW != 0ULL);
+ return 0;
+}
+
+static u64 get_pvm_id_aa64afr1(const struct kvm_vcpu *vcpu)
+{
+ /*
+ * No support for implementation defined features, therefore, hyp has no
+ * sanitized copy of the feature id register.
+ */
+ BUILD_BUG_ON(PVM_ID_AA64AFR1_ALLOW != 0ULL);
+ return 0;
+}
+
+static u64 get_pvm_id_aa64isar0(const struct kvm_vcpu *vcpu)
+{
+ return id_aa64isar0_el1_sys_val & PVM_ID_AA64ISAR0_ALLOW;
+}
+
+static u64 get_pvm_id_aa64isar1(const struct kvm_vcpu *vcpu)
+{
+ u64 allow_mask = PVM_ID_AA64ISAR1_ALLOW;
+
+ if (!vcpu_has_ptrauth(vcpu))
+ allow_mask &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
+ ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
+
+ return id_aa64isar1_el1_sys_val & allow_mask;
+}
+
+static u64 get_pvm_id_aa64mmfr0(const struct kvm_vcpu *vcpu)
+{
+ u64 set_mask;
+
+ set_mask = get_restricted_features_unsigned(id_aa64mmfr0_el1_sys_val,
+ PVM_ID_AA64MMFR0_RESTRICT_UNSIGNED);
+
+ return (id_aa64mmfr0_el1_sys_val & PVM_ID_AA64MMFR0_ALLOW) | set_mask;
+}
+
+static u64 get_pvm_id_aa64mmfr1(const struct kvm_vcpu *vcpu)
+{
+ return id_aa64mmfr1_el1_sys_val & PVM_ID_AA64MMFR1_ALLOW;
+}
+
+static u64 get_pvm_id_aa64mmfr2(const struct kvm_vcpu *vcpu)
+{
+ return id_aa64mmfr2_el1_sys_val & PVM_ID_AA64MMFR2_ALLOW;
+}
+
+/* Read a sanitized cpufeature ID register by its encoding */
+u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id)
+{
+ switch (id) {
+ case SYS_ID_AA64PFR0_EL1:
+ return get_pvm_id_aa64pfr0(vcpu);
+ case SYS_ID_AA64PFR1_EL1:
+ return get_pvm_id_aa64pfr1(vcpu);
+ case SYS_ID_AA64ZFR0_EL1:
+ return get_pvm_id_aa64zfr0(vcpu);
+ case SYS_ID_AA64DFR0_EL1:
+ return get_pvm_id_aa64dfr0(vcpu);
+ case SYS_ID_AA64DFR1_EL1:
+ return get_pvm_id_aa64dfr1(vcpu);
+ case SYS_ID_AA64AFR0_EL1:
+ return get_pvm_id_aa64afr0(vcpu);
+ case SYS_ID_AA64AFR1_EL1:
+ return get_pvm_id_aa64afr1(vcpu);
+ case SYS_ID_AA64ISAR0_EL1:
+ return get_pvm_id_aa64isar0(vcpu);
+ case SYS_ID_AA64ISAR1_EL1:
+ return get_pvm_id_aa64isar1(vcpu);
+ case SYS_ID_AA64MMFR0_EL1:
+ return get_pvm_id_aa64mmfr0(vcpu);
+ case SYS_ID_AA64MMFR1_EL1:
+ return get_pvm_id_aa64mmfr1(vcpu);
+ case SYS_ID_AA64MMFR2_EL1:
+ return get_pvm_id_aa64mmfr2(vcpu);
+ default:
+ /*
+ * Should never happen because all cases are covered in
+ * pvm_sys_reg_descs[].
+ */
+ WARN_ON(1);
+ break;
+ }
+
+ return 0;
+}
+
+static u64 read_id_reg(const struct kvm_vcpu *vcpu,
+ struct sys_reg_desc const *r)
+{
+ return pvm_read_id_reg(vcpu, reg_to_encoding(r));
+}
+
+/* Handler to RAZ/WI sysregs */
+static bool pvm_access_raz_wi(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (!p->is_write)
+ p->regval = 0;
+
+ return true;
+}
+
+/*
+ * Accessor for AArch32 feature id registers.
+ *
+ * The value of these registers is "unknown" according to the spec if AArch32
+ * isn't supported.
+ */
+static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ inject_undef64(vcpu);
+ return false;
+ }
+
+ /*
+ * No support for AArch32 guests, therefore, pKVM has no sanitized copy
+ * of AArch32 feature id registers.
+ */
+ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1),
+ PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY);
+
+ return pvm_access_raz_wi(vcpu, p, r);
+}
+
+/*
+ * Accessor for AArch64 feature id registers.
+ *
+ * If access is allowed, set the regval to the protected VM's view of the
+ * register and return true.
+ * Otherwise, inject an undefined exception and return false.
+ */
+static bool pvm_access_id_aarch64(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (p->is_write) {
+ inject_undef64(vcpu);
+ return false;
+ }
+
+ p->regval = read_id_reg(vcpu, r);
+ return true;
+}
+
+static bool pvm_gic_read_sre(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ /* pVMs only support GICv3. 'nuf said. */
+ if (!p->is_write)
+ p->regval = ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB | ICC_SRE_EL1_SRE;
+
+ return true;
+}
+
+/* Mark the specified system register as an AArch32 feature id register. */
+#define AARCH32(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch32 }
+
+/* Mark the specified system register as an AArch64 feature id register. */
+#define AARCH64(REG) { SYS_DESC(REG), .access = pvm_access_id_aarch64 }
+
+/* Mark the specified system register as Read-As-Zero/Write-Ignored */
+#define RAZ_WI(REG) { SYS_DESC(REG), .access = pvm_access_raz_wi }
+
+/* Mark the specified system register as not being handled in hyp. */
+#define HOST_HANDLED(REG) { SYS_DESC(REG), .access = NULL }
+
+/*
+ * Architected system registers.
+ * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
+ *
+ * NOTE: Anything not explicitly listed here is *restricted by default*, i.e.,
+ * it will lead to injecting an exception into the guest.
+ */
+static const struct sys_reg_desc pvm_sys_reg_descs[] = {
+ /* Cache maintenance by set/way operations are restricted. */
+
+ /* Debug and Trace Registers are restricted. */
+
+ /* AArch64 mappings of the AArch32 ID registers */
+ /* CRm=1 */
+ AARCH32(SYS_ID_PFR0_EL1),
+ AARCH32(SYS_ID_PFR1_EL1),
+ AARCH32(SYS_ID_DFR0_EL1),
+ AARCH32(SYS_ID_AFR0_EL1),
+ AARCH32(SYS_ID_MMFR0_EL1),
+ AARCH32(SYS_ID_MMFR1_EL1),
+ AARCH32(SYS_ID_MMFR2_EL1),
+ AARCH32(SYS_ID_MMFR3_EL1),
+
+ /* CRm=2 */
+ AARCH32(SYS_ID_ISAR0_EL1),
+ AARCH32(SYS_ID_ISAR1_EL1),
+ AARCH32(SYS_ID_ISAR2_EL1),
+ AARCH32(SYS_ID_ISAR3_EL1),
+ AARCH32(SYS_ID_ISAR4_EL1),
+ AARCH32(SYS_ID_ISAR5_EL1),
+ AARCH32(SYS_ID_MMFR4_EL1),
+ AARCH32(SYS_ID_ISAR6_EL1),
+
+ /* CRm=3 */
+ AARCH32(SYS_MVFR0_EL1),
+ AARCH32(SYS_MVFR1_EL1),
+ AARCH32(SYS_MVFR2_EL1),
+ AARCH32(SYS_ID_PFR2_EL1),
+ AARCH32(SYS_ID_DFR1_EL1),
+ AARCH32(SYS_ID_MMFR5_EL1),
+
+ /* AArch64 ID registers */
+ /* CRm=4 */
+ AARCH64(SYS_ID_AA64PFR0_EL1),
+ AARCH64(SYS_ID_AA64PFR1_EL1),
+ AARCH64(SYS_ID_AA64ZFR0_EL1),
+ AARCH64(SYS_ID_AA64DFR0_EL1),
+ AARCH64(SYS_ID_AA64DFR1_EL1),
+ AARCH64(SYS_ID_AA64AFR0_EL1),
+ AARCH64(SYS_ID_AA64AFR1_EL1),
+ AARCH64(SYS_ID_AA64ISAR0_EL1),
+ AARCH64(SYS_ID_AA64ISAR1_EL1),
+ AARCH64(SYS_ID_AA64MMFR0_EL1),
+ AARCH64(SYS_ID_AA64MMFR1_EL1),
+ AARCH64(SYS_ID_AA64MMFR2_EL1),
+
+ /* Scalable Vector Registers are restricted. */
+
+ RAZ_WI(SYS_ERRIDR_EL1),
+ RAZ_WI(SYS_ERRSELR_EL1),
+ RAZ_WI(SYS_ERXFR_EL1),
+ RAZ_WI(SYS_ERXCTLR_EL1),
+ RAZ_WI(SYS_ERXSTATUS_EL1),
+ RAZ_WI(SYS_ERXADDR_EL1),
+ RAZ_WI(SYS_ERXMISC0_EL1),
+ RAZ_WI(SYS_ERXMISC1_EL1),
+
+ /* Performance Monitoring Registers are restricted. */
+
+ /* Limited Ordering Regions Registers are restricted. */
+
+ HOST_HANDLED(SYS_ICC_SGI1R_EL1),
+ HOST_HANDLED(SYS_ICC_ASGI1R_EL1),
+ HOST_HANDLED(SYS_ICC_SGI0R_EL1),
+ { SYS_DESC(SYS_ICC_SRE_EL1), .access = pvm_gic_read_sre, },
+
+ HOST_HANDLED(SYS_CCSIDR_EL1),
+ HOST_HANDLED(SYS_CLIDR_EL1),
+ HOST_HANDLED(SYS_CSSELR_EL1),
+ HOST_HANDLED(SYS_CTR_EL0),
+
+ /* Performance Monitoring Registers are restricted. */
+
+ /* Activity Monitoring Registers are restricted. */
+
+ HOST_HANDLED(SYS_CNTP_TVAL_EL0),
+ HOST_HANDLED(SYS_CNTP_CTL_EL0),
+ HOST_HANDLED(SYS_CNTP_CVAL_EL0),
+
+ /* Performance Monitoring Registers are restricted. */
+};
+
+/*
+ * Checks that the sysreg table is unique and in-order.
+ *
+ * Returns 0 if the table is consistent, or 1 otherwise.
+ */
+int kvm_check_pvm_sysreg_table(void)
+{
+ unsigned int i;
+
+ for (i = 1; i < ARRAY_SIZE(pvm_sys_reg_descs); i++) {
+ if (cmp_sys_reg(&pvm_sys_reg_descs[i-1], &pvm_sys_reg_descs[i]) >= 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Handler for protected VM MSR, MRS or System instruction execution.
+ *
+ * Returns true if the hypervisor has handled the exit, and control should go
+ * back to the guest, or false if it hasn't, to be handled by the host.
+ */
+bool kvm_handle_pvm_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ const struct sys_reg_desc *r;
+ struct sys_reg_params params;
+ unsigned long esr = kvm_vcpu_get_esr(vcpu);
+ int Rt = kvm_vcpu_sys_get_rt(vcpu);
+
+ params = esr_sys64_to_params(esr);
+ params.regval = vcpu_get_reg(vcpu, Rt);
+
+ r = find_reg(&params, pvm_sys_reg_descs, ARRAY_SIZE(pvm_sys_reg_descs));
+
+ /* Undefined (RESTRICTED). */
+ if (r == NULL) {
+ inject_undef64(vcpu);
+ return true;
+ }
+
+ /* Handled by the host (HOST_HANDLED) */
+ if (r->access == NULL)
+ return false;
+
+ /* Handled by hyp: skip instruction if instructed to do so. */
+ if (r->access(vcpu, &params, r))
+ __kvm_skip_instr(vcpu);
+
+ if (!params.is_write)
+ vcpu_set_reg(vcpu, Rt, params.regval);
+
+ return true;
+}
+
+/**
+ * Handler for protected VM restricted exceptions.
+ *
+ * Inject an undefined exception into the guest and return true to indicate that
+ * the hypervisor has handled the exit, and control should go back to the guest.
+ */
+bool kvm_handle_pvm_restricted(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ inject_undef64(vcpu);
+ return true;
+}
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 39f8f7f9227c..20db2f281cf2 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -695,9 +695,7 @@ static void __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
goto spurious;
lr_val &= ~ICH_LR_STATE;
- /* No active state for LPIs */
- if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI)
- lr_val |= ICH_LR_ACTIVE_BIT;
+ lr_val |= ICH_LR_ACTIVE_BIT;
__gic_v3_set_lr(lr_val, lr);
__vgic_v3_set_active_priority(lr_prio, vmcr, grp);
vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK);
@@ -764,20 +762,18 @@ static void __vgic_v3_write_eoir(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
/* Drop priority in any case */
act_prio = __vgic_v3_clear_highest_active_priority();
- /* If EOIing an LPI, no deactivate to be performed */
- if (vid >= VGIC_MIN_LPI)
- return;
-
- /* EOImode == 1, nothing to be done here */
- if (vmcr & ICH_VMCR_EOIM_MASK)
- return;
-
lr = __vgic_v3_find_active_lr(vcpu, vid, &lr_val);
if (lr == -1) {
- __vgic_v3_bump_eoicount();
+ /* Do not bump EOIcount for LPIs that aren't in the LRs */
+ if (!(vid >= VGIC_MIN_LPI))
+ __vgic_v3_bump_eoicount();
return;
}
+ /* EOImode == 1 and not an LPI, nothing to be done here */
+ if ((vmcr & ICH_VMCR_EOIM_MASK) && !(vid >= VGIC_MIN_LPI))
+ return;
+
lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT;
/* If priorities or group do not match, the guest has fscked-up. */
@@ -987,8 +983,6 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
/* IDbits */
val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
- /* SEIS */
- val |= ((vtr >> 22) & 1) << ICC_CTLR_EL1_SEIS_SHIFT;
/* A3V */
val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
/* EOImode */
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index ded2c66675f0..5a2cb5d9bc4b 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -96,6 +96,22 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
__deactivate_traps_common(vcpu);
}
+static const exit_handler_fn hyp_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = NULL,
+ [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
+ [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
+ [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
+ [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
+ [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
+ [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
+ [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
+};
+
+static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
+{
+ return hyp_exit_handlers;
+}
+
/* Switch to the guest for VHE systems running in EL2 */
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 69bd1732a299..326cdfec74a1 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -512,7 +512,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
return -EINVAL;
}
- pgt = kzalloc(sizeof(*pgt), GFP_KERNEL);
+ pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT);
if (!pgt)
return -ENOMEM;
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index 2af3c37445e0..a5e4bbf5e68f 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -978,7 +978,7 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
mutex_lock(&vcpu->kvm->lock);
if (!vcpu->kvm->arch.pmu_filter) {
- vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL);
+ vcpu->kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT);
if (!vcpu->kvm->arch.pmu_filter) {
mutex_unlock(&vcpu->kvm->lock);
return -ENOMEM;
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 5ce36b0a3343..acad22ebac12 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -106,7 +106,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
vl > SVE_VL_ARCH_MAX))
return -EIO;
- buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL);
+ buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL_ACCOUNT);
if (!buf)
return -ENOMEM;
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 1d46e185f31e..e3ec1a44f94d 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1064,7 +1064,12 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
struct sys_reg_desc const *r, bool raz)
{
u32 id = reg_to_encoding(r);
- u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
+ u64 val;
+
+ if (raz)
+ return 0;
+
+ val = read_sanitised_ftr_reg(id);
switch (id) {
case SYS_ID_AA64PFR0_EL1:
@@ -1075,16 +1080,15 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
+ if (irqchip_in_kernel(vcpu->kvm) &&
+ vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
+ val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
+ }
break;
case SYS_ID_AA64PFR1_EL1:
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
- if (kvm_has_mte(vcpu->kvm)) {
- u64 pfr, mte;
-
- pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
- mte = cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR1_MTE_SHIFT);
- val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), mte);
- }
+ if (!kvm_has_mte(vcpu->kvm))
+ val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
@@ -1268,16 +1272,19 @@ static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
return __set_id_reg(vcpu, rd, uaddr, raz);
}
-static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __get_id_reg(vcpu, rd, uaddr, true);
+ return __set_id_reg(vcpu, rd, uaddr, true);
}
-static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
- const struct kvm_one_reg *reg, void __user *uaddr)
+static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
{
- return __set_id_reg(vcpu, rd, uaddr, true);
+ const u64 id = sys_reg_to_index(rd);
+ const u64 val = 0;
+
+ return reg_to_user(uaddr, &val, id);
}
static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
@@ -1388,7 +1395,7 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
#define ID_UNALLOCATED(crm, op2) { \
Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
.access = access_raz_id_reg, \
- .get_user = get_raz_id_reg, \
+ .get_user = get_raz_reg, \
.set_user = set_raz_id_reg, \
}
@@ -1400,7 +1407,7 @@ static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
#define ID_HIDDEN(name) { \
SYS_DESC(SYS_##name), \
.access = access_raz_id_reg, \
- .get_user = get_raz_id_reg, \
+ .get_user = get_raz_reg, \
.set_user = set_raz_id_reg, \
}
@@ -1642,7 +1649,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
* previously (and pointlessly) advertised in the past...
*/
{ PMU_SYS_REG(SYS_PMSWINC_EL0),
- .get_user = get_raz_id_reg, .set_user = set_wi_reg,
+ .get_user = get_raz_reg, .set_user = set_wi_reg,
.access = access_pmswinc, .reset = NULL },
{ PMU_SYS_REG(SYS_PMSELR_EL0),
.access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 340c51d87677..0a06d0648970 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -134,7 +134,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
struct kvm_vcpu *vcpu0 = kvm_get_vcpu(kvm, 0);
int i;
- dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL);
+ dist->spis = kcalloc(nr_spis, sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
if (!dist->spis)
return -ENOMEM;
diff --git a/arch/arm64/kvm/vgic/vgic-irqfd.c b/arch/arm64/kvm/vgic/vgic-irqfd.c
index 79f8899b234c..475059bacedf 100644
--- a/arch/arm64/kvm/vgic/vgic-irqfd.c
+++ b/arch/arm64/kvm/vgic/vgic-irqfd.c
@@ -139,7 +139,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
u32 nr = dist->nr_spis;
int i, ret;
- entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
+ entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL_ACCOUNT);
if (!entries)
return -ENOMEM;
diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c
index 61728c543eb9..089fc2ffcb43 100644
--- a/arch/arm64/kvm/vgic/vgic-its.c
+++ b/arch/arm64/kvm/vgic/vgic-its.c
@@ -48,7 +48,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
if (irq)
return irq;
- irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
+ irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL_ACCOUNT);
if (!irq)
return ERR_PTR(-ENOMEM);
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
* we must be careful not to overrun the array.
*/
irq_count = READ_ONCE(dist->lpi_list_count);
- intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
+ intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL_ACCOUNT);
if (!intids)
return -ENOMEM;
@@ -985,7 +985,7 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
return E_ITS_MAPC_COLLECTION_OOR;
- collection = kzalloc(sizeof(*collection), GFP_KERNEL);
+ collection = kzalloc(sizeof(*collection), GFP_KERNEL_ACCOUNT);
if (!collection)
return -ENOMEM;
@@ -1029,7 +1029,7 @@ static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
{
struct its_ite *ite;
- ite = kzalloc(sizeof(*ite), GFP_KERNEL);
+ ite = kzalloc(sizeof(*ite), GFP_KERNEL_ACCOUNT);
if (!ite)
return ERR_PTR(-ENOMEM);
@@ -1150,7 +1150,7 @@ static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
{
struct its_device *device;
- device = kzalloc(sizeof(*device), GFP_KERNEL);
+ device = kzalloc(sizeof(*device), GFP_KERNEL_ACCOUNT);
if (!device)
return ERR_PTR(-ENOMEM);
@@ -1847,7 +1847,7 @@ void vgic_lpi_translation_cache_init(struct kvm *kvm)
struct vgic_translation_cache_entry *cte;
/* An allocation failure is not fatal */
- cte = kzalloc(sizeof(*cte), GFP_KERNEL);
+ cte = kzalloc(sizeof(*cte), GFP_KERNEL_ACCOUNT);
if (WARN_ON(!cte))
break;
@@ -1888,7 +1888,7 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
return -ENODEV;
- its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
+ its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL_ACCOUNT);
if (!its)
return -ENOMEM;
@@ -2710,8 +2710,8 @@ static int vgic_its_set_attr(struct kvm_device *dev,
if (copy_from_user(&addr, uaddr, sizeof(addr)))
return -EFAULT;
- ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
- addr, SZ_64K);
+ ret = vgic_check_iorange(dev->kvm, its->vgic_its_base,
+ addr, SZ_64K, KVM_VGIC_V3_ITS_SIZE);
if (ret)
return ret;
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index 7740995de982..0d000d2fe8d2 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -14,17 +14,21 @@
/* common helpers */
-int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
- phys_addr_t addr, phys_addr_t alignment)
+int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
+ phys_addr_t addr, phys_addr_t alignment,
+ phys_addr_t size)
{
- if (addr & ~kvm_phys_mask(kvm))
- return -E2BIG;
+ if (!IS_VGIC_ADDR_UNDEF(ioaddr))
+ return -EEXIST;
- if (!IS_ALIGNED(addr, alignment))
+ if (!IS_ALIGNED(addr, alignment) || !IS_ALIGNED(size, alignment))
return -EINVAL;
- if (!IS_VGIC_ADDR_UNDEF(*ioaddr))
- return -EEXIST;
+ if (addr + size < addr)
+ return -EINVAL;
+
+ if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm))
+ return -E2BIG;
return 0;
}
@@ -57,7 +61,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
{
int r = 0;
struct vgic_dist *vgic = &kvm->arch.vgic;
- phys_addr_t *addr_ptr, alignment;
+ phys_addr_t *addr_ptr, alignment, size;
u64 undef_value = VGIC_ADDR_UNDEF;
mutex_lock(&kvm->lock);
@@ -66,16 +70,19 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
addr_ptr = &vgic->vgic_dist_base;
alignment = SZ_4K;
+ size = KVM_VGIC_V2_DIST_SIZE;
break;
case KVM_VGIC_V2_ADDR_TYPE_CPU:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
addr_ptr = &vgic->vgic_cpu_base;
alignment = SZ_4K;
+ size = KVM_VGIC_V2_CPU_SIZE;
break;
case KVM_VGIC_V3_ADDR_TYPE_DIST:
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V3);
addr_ptr = &vgic->vgic_dist_base;
alignment = SZ_64K;
+ size = KVM_VGIC_V3_DIST_SIZE;
break;
case KVM_VGIC_V3_ADDR_TYPE_REDIST: {
struct vgic_redist_region *rdreg;
@@ -140,7 +147,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
goto out;
if (write) {
- r = vgic_check_ioaddr(kvm, addr_ptr, *addr, alignment);
+ r = vgic_check_iorange(kvm, *addr_ptr, *addr, alignment, size);
if (!r)
*addr_ptr = *addr;
} else {
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index a09cdc0b953c..bf7ec4a78497 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -796,7 +796,9 @@ static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
struct vgic_dist *d = &kvm->arch.vgic;
struct vgic_redist_region *rdreg;
struct list_head *rd_regions = &d->rd_regions;
- size_t size = count * KVM_VGIC_V3_REDIST_SIZE;
+ int nr_vcpus = atomic_read(&kvm->online_vcpus);
+ size_t size = count ? count * KVM_VGIC_V3_REDIST_SIZE
+ : nr_vcpus * KVM_VGIC_V3_REDIST_SIZE;
int ret;
/* cross the end of memory ? */
@@ -834,13 +836,13 @@ static int vgic_v3_alloc_redist_region(struct kvm *kvm, uint32_t index,
if (vgic_v3_rdist_overlap(kvm, base, size))
return -EINVAL;
- rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL);
+ rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL_ACCOUNT);
if (!rdreg)
return -ENOMEM;
rdreg->base = VGIC_ADDR_UNDEF;
- ret = vgic_check_ioaddr(kvm, &rdreg->base, base, SZ_64K);
+ ret = vgic_check_iorange(kvm, rdreg->base, base, SZ_64K, size);
if (ret)
goto free;
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 21a6207fb2ee..04f62c4b07fb 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -15,6 +15,7 @@
static bool group0_trap;
static bool group1_trap;
static bool common_trap;
+static bool dir_trap;
static bool gicv4_enable;
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
@@ -296,6 +297,8 @@ void vgic_v3_enable(struct kvm_vcpu *vcpu)
vgic_v3->vgic_hcr |= ICH_HCR_TALL1;
if (common_trap)
vgic_v3->vgic_hcr |= ICH_HCR_TC;
+ if (dir_trap)
+ vgic_v3->vgic_hcr |= ICH_HCR_TDIR;
}
int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq)
@@ -483,8 +486,10 @@ bool vgic_v3_check_base(struct kvm *kvm)
return false;
list_for_each_entry(rdreg, &d->rd_regions, list) {
- if (rdreg->base + vgic_v3_rd_region_size(kvm, rdreg) <
- rdreg->base)
+ size_t sz = vgic_v3_rd_region_size(kvm, rdreg);
+
+ if (vgic_check_iorange(kvm, VGIC_ADDR_UNDEF,
+ rdreg->base, SZ_64K, sz))
return false;
}
@@ -671,11 +676,23 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
group1_trap = true;
}
- if (group0_trap || group1_trap || common_trap) {
- kvm_info("GICv3 sysreg trapping enabled ([%s%s%s], reduced performance)\n",
+ if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) {
+ kvm_info("GICv3 with locally generated SEI\n");
+
+ group0_trap = true;
+ group1_trap = true;
+ if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
+ dir_trap = true;
+ else
+ common_trap = true;
+ }
+
+ if (group0_trap || group1_trap || common_trap | dir_trap) {
+ kvm_info("GICv3 sysreg trapping enabled ([%s%s%s%s], reduced performance)\n",
group0_trap ? "G0" : "",
group1_trap ? "G1" : "",
- common_trap ? "C" : "");
+ common_trap ? "C" : "",
+ dir_trap ? "D" : "");
static_branch_enable(&vgic_v3_cpuif_trap);
}
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index c1845d8f5f7e..772dd15a22c7 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -246,7 +246,7 @@ int vgic_v4_init(struct kvm *kvm)
nr_vcpus = atomic_read(&kvm->online_vcpus);
dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes),
- GFP_KERNEL);
+ GFP_KERNEL_ACCOUNT);
if (!dist->its_vm.vpes)
return -ENOMEM;
diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
index 14a9218641f5..3fd6c86a7ef3 100644
--- a/arch/arm64/kvm/vgic/vgic.h
+++ b/arch/arm64/kvm/vgic/vgic.h
@@ -172,8 +172,9 @@ void vgic_kick_vcpus(struct kvm *kvm);
void vgic_irq_handle_resampling(struct vgic_irq *irq,
bool lr_deactivated, bool lr_pending);
-int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
- phys_addr_t addr, phys_addr_t alignment);
+int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr,
+ phys_addr_t addr, phys_addr_t alignment,
+ phys_addr_t size);
void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index d7bee210a798..83bcad72ec97 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -173,4 +173,4 @@ L(done):
ret
SYM_FUNC_END_PI(strcmp)
-EXPORT_SYMBOL_NOKASAN(strcmp)
+EXPORT_SYMBOL_NOHWKASAN(strcmp)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
index 48d44f7fddb1..e42bcfcd37e6 100644
--- a/arch/arm64/lib/strncmp.S
+++ b/arch/arm64/lib/strncmp.S
@@ -258,4 +258,4 @@ L(ret0):
ret
SYM_FUNC_END_PI(strncmp)
-EXPORT_SYMBOL_NOKASAN(strncmp)
+EXPORT_SYMBOL_NOHWKASAN(strncmp)
diff --git a/arch/m68k/68000/entry.S b/arch/m68k/68000/entry.S
index 259b3661b614..997b54933015 100644
--- a/arch/m68k/68000/entry.S
+++ b/arch/m68k/68000/entry.S
@@ -15,7 +15,6 @@
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/setup.h>
-#include <asm/segment.h>
#include <asm/traps.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
@@ -25,7 +24,6 @@
.globl system_call
.globl resume
.globl ret_from_exception
-.globl ret_from_signal
.globl sys_call_table
.globl bad_interrupt
.globl inthandler1
@@ -59,8 +57,6 @@ do_trace:
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
jbsr syscall_trace_leave
-
-ret_from_signal:
RESTORE_SWITCH_STACK
addql #4,%sp
jra ret_from_exception
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 774c35f47eea..0b50da08a9c5 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -29,7 +29,6 @@ config M68K
select NO_DMA if !MMU && !COLDFIRE
select OLD_SIGACTION
select OLD_SIGSUSPEND3
- select SET_FS
select UACCESS_MEMCPY if !MMU
select VIRT_TO_BUS
select ZONE_DMA
diff --git a/arch/m68k/coldfire/entry.S b/arch/m68k/coldfire/entry.S
index d43a02795a4a..9f337c70243a 100644
--- a/arch/m68k/coldfire/entry.S
+++ b/arch/m68k/coldfire/entry.S
@@ -31,7 +31,6 @@
#include <asm/thread_info.h>
#include <asm/errno.h>
#include <asm/setup.h>
-#include <asm/segment.h>
#include <asm/asm-offsets.h>
#include <asm/entry.h>
@@ -51,7 +50,6 @@ sw_usp:
.globl system_call
.globl resume
.globl ret_from_exception
-.globl ret_from_signal
.globl sys_call_table
.globl inthandler
@@ -98,8 +96,6 @@ ENTRY(system_call)
subql #4,%sp /* dummy return address */
SAVE_SWITCH_STACK
jbsr syscall_trace_leave
-
-ret_from_signal:
RESTORE_SWITCH_STACK
addql #4,%sp
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h
index 3750819ac5a1..f4d82c619a5c 100644
--- a/arch/m68k/include/asm/processor.h
+++ b/arch/m68k/include/asm/processor.h
@@ -9,7 +9,6 @@
#define __ASM_M68K_PROCESSOR_H
#include <linux/thread_info.h>
-#include <asm/segment.h>
#include <asm/fpu.h>
#include <asm/ptrace.h>
@@ -75,11 +74,37 @@ static inline void wrusp(unsigned long usp)
#define TASK_UNMAPPED_BASE 0
#endif
+/* Address spaces (or Function Codes in Motorola lingo) */
+#define USER_DATA 1
+#define USER_PROGRAM 2
+#define SUPER_DATA 5
+#define SUPER_PROGRAM 6
+#define CPU_SPACE 7
+
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
+/*
+ * Set the SFC/DFC registers for special MM operations. For most normal
+ * operation these remain set to USER_DATA for the uaccess routines.
+ */
+static inline void set_fc(unsigned long val)
+{
+ WARN_ON_ONCE(in_interrupt());
+
+ __asm__ __volatile__ ("movec %0,%/sfc\n\t"
+ "movec %0,%/dfc\n\t"
+ : /* no outputs */ : "r" (val) : "memory");
+}
+#else
+static inline void set_fc(unsigned long val)
+{
+}
+#endif /* CONFIG_CPU_HAS_ADDRESS_SPACES */
+
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
unsigned long usp; /* user stack pointer */
unsigned short sr; /* saved status register */
- unsigned short fs; /* saved fs (sfc, dfc) */
+ unsigned short fc; /* saved fc (sfc, dfc) */
unsigned long crp[2]; /* cpu root pointer */
unsigned long esp0; /* points to SR of stack frame */
unsigned long faddr; /* info about last fault */
@@ -92,7 +117,7 @@ struct thread_struct {
#define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
.sr = PS_S, \
- .fs = __KERNEL_DS, \
+ .fc = USER_DATA, \
}
/*
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
deleted file mode 100644
index 2b5e68a71ef7..000000000000
--- a/arch/m68k/include/asm/segment.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _M68K_SEGMENT_H
-#define _M68K_SEGMENT_H
-
-/* define constants */
-/* Address spaces (FC0-FC2) */
-#define USER_DATA (1)
-#ifndef __USER_DS
-#define __USER_DS (USER_DATA)
-#endif
-#define USER_PROGRAM (2)
-#define SUPER_DATA (5)
-#ifndef __KERNEL_DS
-#define __KERNEL_DS (SUPER_DATA)
-#endif
-#define SUPER_PROGRAM (6)
-#define CPU_SPACE (7)
-
-#ifndef __ASSEMBLY__
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
-/*
- * Get/set the SFC/DFC registers for MOVES instructions
- */
-#define USER_DS MAKE_MM_SEG(__USER_DS)
-#define KERNEL_DS MAKE_MM_SEG(__KERNEL_DS)
-
-static inline mm_segment_t get_fs(void)
-{
- mm_segment_t _v;
- __asm__ ("movec %/dfc,%0":"=r" (_v.seg):);
- return _v;
-}
-
-static inline void set_fs(mm_segment_t val)
-{
- __asm__ __volatile__ ("movec %0,%/sfc\n\t"
- "movec %0,%/dfc\n\t"
- : /* no outputs */ : "r" (val.seg) : "memory");
-}
-
-#else
-#define USER_DS MAKE_MM_SEG(TASK_SIZE)
-#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-#endif
-
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _M68K_SEGMENT_H */
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 15a757073fa5..c952658ba792 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -4,7 +4,6 @@
#include <asm/types.h>
#include <asm/page.h>
-#include <asm/segment.h>
/*
* On machines with 4k pages we default to an 8k thread size, though we
@@ -27,7 +26,6 @@
struct thread_info {
struct task_struct *task; /* main task structure */
unsigned long flags;
- mm_segment_t addr_limit; /* thread address space */
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */
unsigned long tp_value; /* thread pointer */
@@ -37,7 +35,6 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h
index a6318ccd308f..b882e2f4f551 100644
--- a/arch/m68k/include/asm/tlbflush.h
+++ b/arch/m68k/include/asm/tlbflush.h
@@ -13,13 +13,12 @@ static inline void flush_tlb_kernel_page(void *addr)
if (CPU_IS_COLDFIRE) {
mmu_write(MMUOR, MMUOR_CNL);
} else if (CPU_IS_040_OR_060) {
- mm_segment_t old_fs = get_fs();
- set_fs(KERNEL_DS);
+ set_fc(SUPER_DATA);
__asm__ __volatile__(".chip 68040\n\t"
"pflush (%0)\n\t"
".chip 68k"
: : "a" (addr));
- set_fs(old_fs);
+ set_fc(USER_DATA);
} else if (CPU_IS_020_OR_030)
__asm__ __volatile__("pflush #4,#4,(%0)" : : "a" (addr));
}
@@ -84,12 +83,8 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
- if (vma->vm_mm == current->active_mm) {
- mm_segment_t old_fs = force_uaccess_begin();
-
+ if (vma->vm_mm == current->active_mm)
__flush_tlb_one(addr);
- force_uaccess_end(old_fs);
- }
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
diff --git a/arch/m68k/include/asm/traps.h b/arch/m68k/include/asm/traps.h
index 4aff3358fbaf..a9d5c1c870d3 100644
--- a/arch/m68k/include/asm/traps.h
+++ b/arch/m68k/include/asm/traps.h
@@ -267,6 +267,10 @@ struct frame {
} un;
};
+#ifdef CONFIG_M68040
+asmlinkage void berr_040cleanup(struct frame *fp);
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _M68K_TRAPS_H */
diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h
index f98208ccbbcd..ba670523885c 100644
--- a/arch/m68k/include/asm/uaccess.h
+++ b/arch/m68k/include/asm/uaccess.h
@@ -9,13 +9,16 @@
*/
#include <linux/compiler.h>
#include <linux/types.h>
-#include <asm/segment.h>
#include <asm/extable.h>
/* We let the MMU do all checking */
static inline int access_ok(const void __user *addr,
unsigned long size)
{
+ /*
+ * XXX: for !CONFIG_CPU_HAS_ADDRESS_SPACES this really needs to check
+ * for TASK_SIZE!
+ */
return 1;
}
@@ -35,12 +38,9 @@ static inline int access_ok(const void __user *addr,
#define MOVES "move"
#endif
-extern int __put_user_bad(void);
-extern int __get_user_bad(void);
-
-#define __put_user_asm(res, x, ptr, bwl, reg, err) \
+#define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \
asm volatile ("\n" \
- "1: "MOVES"."#bwl" %2,%1\n" \
+ "1: "inst"."#bwl" %2,%1\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
@@ -56,6 +56,31 @@ asm volatile ("\n" \
: "+d" (res), "=m" (*(ptr)) \
: #reg (x), "i" (err))
+#define __put_user_asm8(inst, res, x, ptr) \
+do { \
+ const void *__pu_ptr = (const void __force *)(ptr); \
+ \
+ asm volatile ("\n" \
+ "1: "inst".l %2,(%1)+\n" \
+ "2: "inst".l %R2,(%1)\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: movel %3,%0\n" \
+ " jra 3b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .long 3b,10b\n" \
+ " .previous" \
+ : "+d" (res), "+a" (__pu_ptr) \
+ : "r" (x), "i" (-EFAULT) \
+ : "memory"); \
+} while (0)
+
/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
@@ -68,51 +93,29 @@ asm volatile ("\n" \
__chk_user_ptr(ptr); \
switch (sizeof (*(ptr))) { \
case 1: \
- __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
+ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \
break; \
case 2: \
- __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \
+ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \
break; \
case 4: \
- __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
+ __put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \
break; \
case 8: \
- { \
- const void __user *__pu_ptr = (ptr); \
- asm volatile ("\n" \
- "1: "MOVES".l %2,(%1)+\n" \
- "2: "MOVES".l %R2,(%1)\n" \
- "3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .even\n" \
- "10: movel %3,%0\n" \
- " jra 3b\n" \
- " .previous\n" \
- "\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,10b\n" \
- " .long 2b,10b\n" \
- " .long 3b,10b\n" \
- " .previous" \
- : "+d" (__pu_err), "+a" (__pu_ptr) \
- : "r" (__pu_val), "i" (-EFAULT) \
- : "memory"); \
+ __put_user_asm8(MOVES, __pu_err, __pu_val, ptr); \
break; \
- } \
default: \
- __pu_err = __put_user_bad(); \
- break; \
+ BUILD_BUG(); \
} \
__pu_err; \
})
#define put_user(x, ptr) __put_user(x, ptr)
-#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
+#define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({ \
type __gu_val; \
asm volatile ("\n" \
- "1: "MOVES"."#bwl" %2,%1\n" \
+ "1: "inst"."#bwl" %2,%1\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .even\n" \
@@ -130,53 +133,57 @@ asm volatile ("\n" \
(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \
})
+#define __get_user_asm8(inst, res, x, ptr) \
+do { \
+ const void *__gu_ptr = (const void __force *)(ptr); \
+ union { \
+ u64 l; \
+ __typeof__(*(ptr)) t; \
+ } __gu_val; \
+ \
+ asm volatile ("\n" \
+ "1: "inst".l (%2)+,%1\n" \
+ "2: "inst".l (%2),%R1\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .even\n" \
+ "10: move.l %3,%0\n" \
+ " sub.l %1,%1\n" \
+ " sub.l %R1,%R1\n" \
+ " jra 3b\n" \
+ " .previous\n" \
+ "\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b,10b\n" \
+ " .long 2b,10b\n" \
+ " .previous" \
+ : "+d" (res), "=&r" (__gu_val.l), \
+ "+a" (__gu_ptr) \
+ : "i" (-EFAULT) \
+ : "memory"); \
+ (x) = __gu_val.t; \
+} while (0)
+
#define __get_user(x, ptr) \
({ \
int __gu_err = 0; \
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: \
- __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
+ __get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \
break; \
case 2: \
- __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \
+ __get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \
break; \
case 4: \
- __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
+ __get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \
break; \
- case 8: { \
- const void __user *__gu_ptr = (ptr); \
- union { \
- u64 l; \
- __typeof__(*(ptr)) t; \
- } __gu_val; \
- asm volatile ("\n" \
- "1: "MOVES".l (%2)+,%1\n" \
- "2: "MOVES".l (%2),%R1\n" \
- "3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .even\n" \
- "10: move.l %3,%0\n" \
- " sub.l %1,%1\n" \
- " sub.l %R1,%R1\n" \
- " jra 3b\n" \
- " .previous\n" \
- "\n" \
- " .section __ex_table,\"a\"\n" \
- " .align 4\n" \
- " .long 1b,10b\n" \
- " .long 2b,10b\n" \
- " .previous" \
- : "+d" (__gu_err), "=&r" (__gu_val.l), \
- "+a" (__gu_ptr) \
- : "i" (-EFAULT) \
- : "memory"); \
- (x) = __gu_val.t; \
+ case 8: \
+ __get_user_asm8(MOVES, __gu_err, x, ptr); \
break; \
- } \
default: \
- __gu_err = __get_user_bad(); \
- break; \
+ BUILD_BUG(); \
} \
__gu_err; \
})
@@ -322,16 +329,19 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
switch (n) {
case 1:
- __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
+ __put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to,
+ b, d, 1);
break;
case 2:
- __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
+ __put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to,
+ w, r, 2);
break;
case 3:
__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
break;
case 4:
- __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
+ __put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to,
+ l, r, 4);
break;
case 5:
__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
@@ -380,8 +390,65 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n)
#define INLINE_COPY_FROM_USER
#define INLINE_COPY_TO_USER
-#define user_addr_max() \
- (uaccess_kernel() ? ~0UL : TASK_SIZE)
+#define HAVE_GET_KERNEL_NOFAULT
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ type *__gk_dst = (type *)(dst); \
+ type *__gk_src = (type *)(src); \
+ int __gk_err = 0; \
+ \
+ switch (sizeof(type)) { \
+ case 1: \
+ __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
+ u8, b, d, -EFAULT); \
+ break; \
+ case 2: \
+ __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
+ u16, w, r, -EFAULT); \
+ break; \
+ case 4: \
+ __get_user_asm("move", __gk_err, *__gk_dst, __gk_src, \
+ u32, l, r, -EFAULT); \
+ break; \
+ case 8: \
+ __get_user_asm8("move", __gk_err, *__gk_dst, __gk_src); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ if (unlikely(__gk_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ type __pk_src = *(type *)(src); \
+ type *__pk_dst = (type *)(dst); \
+ int __pk_err = 0; \
+ \
+ switch (sizeof(type)) { \
+ case 1: \
+ __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
+ b, d, -EFAULT); \
+ break; \
+ case 2: \
+ __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
+ w, r, -EFAULT); \
+ break; \
+ case 4: \
+ __put_user_asm("move", __pk_err, __pk_src, __pk_dst, \
+ l, r, -EFAULT); \
+ break; \
+ case 8: \
+ __put_user_asm8("move", __pk_err, __pk_src, __pk_dst); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ if (unlikely(__pk_err)) \
+ goto err_label; \
+} while (0)
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
index ccea355052ef..906d73230537 100644
--- a/arch/m68k/kernel/asm-offsets.c
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -31,7 +31,7 @@ int main(void)
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
- DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
+ DEFINE(THREAD_FC, offsetof(struct thread_struct, fc));
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 9dd76fbb7c6b..9434fca68de5 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -36,7 +36,6 @@
#include <linux/linkage.h>
#include <asm/errno.h>
#include <asm/setup.h>
-#include <asm/segment.h>
#include <asm/traps.h>
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
@@ -78,20 +77,38 @@ ENTRY(__sys_clone3)
ENTRY(sys_sigreturn)
SAVE_SWITCH_STACK
- movel %sp,%sp@- | switch_stack pointer
- pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
+ movel %sp,%a1 | switch_stack pointer
+ lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
+ lea %sp@(-84),%sp | leave a gap
+ movel %a1,%sp@-
+ movel %a0,%sp@-
jbsr do_sigreturn
- addql #8,%sp
- RESTORE_SWITCH_STACK
- rts
+ jra 1f | shared with rt_sigreturn()
ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK
- movel %sp,%sp@- | switch_stack pointer
- pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
+ movel %sp,%a1 | switch_stack pointer
+ lea %sp@(SWITCH_STACK_SIZE),%a0 | pt_regs pointer
+ lea %sp@(-84),%sp | leave a gap
+ movel %a1,%sp@-
+ movel %a0,%sp@-
+ | stack contents:
+ | [original pt_regs address] [original switch_stack address]
+ | [gap] [switch_stack] [pt_regs] [exception frame]
jbsr do_rt_sigreturn
- addql #8,%sp
+
+1:
+ | stack contents now:
+ | [original pt_regs address] [original switch_stack address]
+ | [unused part of the gap] [moved switch_stack] [moved pt_regs]
+ | [replacement exception frame]
+ | return value of do_{rt_,}sigreturn() points to moved switch_stack.
+
+ movel %d0,%sp | discard the leftover junk
RESTORE_SWITCH_STACK
+ | stack contents now is just [syscall return address] [pt_regs] [frame]
+ | return pt_regs.d0
+ movel %sp@(PT_OFF_D0+4),%d0
rts
ENTRY(buserr)
@@ -182,25 +199,6 @@ do_trace_exit:
addql #4,%sp
jra .Lret_from_exception
-ENTRY(ret_from_signal)
- movel %curptr@(TASK_STACK),%a1
- tstb %a1@(TINFO_FLAGS+2)
- jge 1f
- jbsr syscall_trace
-1: RESTORE_SWITCH_STACK
- addql #4,%sp
-/* on 68040 complete pending writebacks if any */
-#ifdef CONFIG_M68040
- bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
- subql #7,%d0 | bus error frame ?
- jbne 1f
- movel %sp,%sp@-
- jbsr berr_040cleanup
- addql #4,%sp
-1:
-#endif
- jra .Lret_from_exception
-
ENTRY(system_call)
SAVE_ALL_SYS
@@ -338,7 +336,7 @@ resume:
/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
movec %sfc,%d0
- movew %d0,%a0@(TASK_THREAD+THREAD_FS)
+ movew %d0,%a0@(TASK_THREAD+THREAD_FC)
/* save usp */
/* it is better to use a movel here instead of a movew 8*) */
@@ -424,7 +422,7 @@ resume:
movel %a0,%usp
/* restore fs (sfc,%dfc) */
- movew %a1@(TASK_THREAD+THREAD_FS),%a0
+ movew %a1@(TASK_THREAD+THREAD_FC),%a0
movec %a0,%sfc
movec %a0,%dfc
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index db49f9091711..1ab692b952cd 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -92,7 +92,7 @@ void show_regs(struct pt_regs * regs)
void flush_thread(void)
{
- current->thread.fs = __USER_DS;
+ current->thread.fc = USER_DATA;
#ifdef CONFIG_FPU
if (!FPU_IS_EMU) {
unsigned long zero = 0;
@@ -155,7 +155,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
* Must save the current SFC/DFC value, NOT the value when
* the parent was last descheduled - RGH 10-08-96
*/
- p->thread.fs = get_fs().seg;
+ p->thread.fc = USER_DATA;
if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
/* kernel thread */
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index 8f215e79e70e..338817d0cb3f 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -447,7 +447,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
fpu_version = sc->sc_fpstate[0];
- if (CPU_IS_020_OR_030 &&
+ if (CPU_IS_020_OR_030 && !regs->stkadj &&
regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */
@@ -510,7 +510,7 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *
if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
context_size = fpstate[1];
fpu_version = fpstate[0];
- if (CPU_IS_020_OR_030 &&
+ if (CPU_IS_020_OR_030 && !regs->stkadj &&
regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */
@@ -641,56 +641,35 @@ static inline void siginfo_build_tests(void)
static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
void __user *fp)
{
- int fsize = frame_extra_sizes(formatvec >> 12);
- if (fsize < 0) {
+ int extra = frame_extra_sizes(formatvec >> 12);
+ char buf[sizeof_field(struct frame, un)];
+
+ if (extra < 0) {
/*
* user process trying to return with weird frame format
*/
pr_debug("user process returning with weird frame format\n");
- return 1;
+ return -1;
}
- if (!fsize) {
- regs->format = formatvec >> 12;
- regs->vector = formatvec & 0xfff;
- } else {
- struct switch_stack *sw = (struct switch_stack *)regs - 1;
- /* yes, twice as much as max(sizeof(frame.un.fmt<x>)) */
- unsigned long buf[sizeof_field(struct frame, un) / 2];
-
- /* that'll make sure that expansion won't crap over data */
- if (copy_from_user(buf + fsize / 4, fp, fsize))
- return 1;
-
- /* point of no return */
- regs->format = formatvec >> 12;
- regs->vector = formatvec & 0xfff;
-#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
- __asm__ __volatile__ (
-#ifdef CONFIG_COLDFIRE
- " movel %0,%/sp\n\t"
- " bra ret_from_signal\n"
-#else
- " movel %0,%/a0\n\t"
- " subl %1,%/a0\n\t" /* make room on stack */
- " movel %/a0,%/sp\n\t" /* set stack pointer */
- /* move switch_stack and pt_regs */
- "1: movel %0@+,%/a0@+\n\t"
- " dbra %2,1b\n\t"
- " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
- " lsrl #2,%1\n\t"
- " subql #1,%1\n\t"
- /* copy to the gap we'd made */
- "2: movel %4@+,%/a0@+\n\t"
- " dbra %1,2b\n\t"
- " bral ret_from_signal\n"
+ if (extra && copy_from_user(buf, fp, extra))
+ return -1;
+ regs->format = formatvec >> 12;
+ regs->vector = formatvec & 0xfff;
+ if (extra) {
+ void *p = (struct switch_stack *)regs - 1;
+ struct frame *new = (void *)regs - extra;
+ int size = sizeof(struct pt_regs)+sizeof(struct switch_stack);
+
+ memmove(p - extra, p, size);
+ memcpy(p - extra + size, buf, extra);
+ current->thread.esp0 = (unsigned long)&new->ptregs;
+#ifdef CONFIG_M68040
+ /* on 68040 complete pending writebacks if any */
+ if (new->ptregs.format == 7) // bus error frame
+ berr_040cleanup(new);
#endif
- : /* no outputs, it doesn't ever return */
- : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
- "n" (frame_offset), "a" (buf + fsize/4)
- : "a0");
-#undef frame_offset
}
- return 0;
+ return extra;
}
static inline int
@@ -698,7 +677,6 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
{
int formatvec;
struct sigcontext context;
- int err = 0;
siginfo_build_tests();
@@ -707,7 +685,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
/* get previous context */
if (copy_from_user(&context, usc, sizeof(context)))
- goto badframe;
+ return -1;
/* restore passed registers */
regs->d0 = context.sc_d0;
@@ -720,15 +698,10 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
wrusp(context.sc_usp);
formatvec = context.sc_formatvec;
- err = restore_fpu_state(&context);
+ if (restore_fpu_state(&context))
+ return -1;
- if (err || mangle_kernel_stack(regs, formatvec, fp))
- goto badframe;
-
- return 0;
-
-badframe:
- return 1;
+ return mangle_kernel_stack(regs, formatvec, fp);
}
static inline int
@@ -745,7 +718,7 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION)
- goto badframe;
+ return -1;
/* restore passed registers */
err |= __get_user(regs->d0, &gregs[0]);
err |= __get_user(regs->d1, &gregs[1]);
@@ -774,22 +747,17 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
err |= restore_altstack(&uc->uc_stack);
if (err)
- goto badframe;
+ return -1;
- if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
- goto badframe;
-
- return 0;
-
-badframe:
- return 1;
+ return mangle_kernel_stack(regs, temp, &uc->uc_extra);
}
-asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
+asmlinkage void *do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
{
unsigned long usp = rdusp();
struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
sigset_t set;
+ int size;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
@@ -801,20 +769,22 @@ asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
set_current_blocked(&set);
- if (restore_sigcontext(regs, &frame->sc, frame + 1))
+ size = restore_sigcontext(regs, &frame->sc, frame + 1);
+ if (size < 0)
goto badframe;
- return regs->d0;
+ return (void *)sw - size;
badframe:
force_sig(SIGSEGV);
- return 0;
+ return sw;
}
-asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
+asmlinkage void *do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
{
unsigned long usp = rdusp();
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
sigset_t set;
+ int size;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
@@ -823,27 +793,34 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
set_current_blocked(&set);
- if (rt_restore_ucontext(regs, sw, &frame->uc))
+ size = rt_restore_ucontext(regs, sw, &frame->uc);
+ if (size < 0)
goto badframe;
- return regs->d0;
+ return (void *)sw - size;
badframe:
force_sig(SIGSEGV);
- return 0;
+ return sw;
+}
+
+static inline struct pt_regs *rte_regs(struct pt_regs *regs)
+{
+ return (void *)regs + regs->stkadj;
}
static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
unsigned long mask)
{
+ struct pt_regs *tregs = rte_regs(regs);
sc->sc_mask = mask;
sc->sc_usp = rdusp();
sc->sc_d0 = regs->d0;
sc->sc_d1 = regs->d1;
sc->sc_a0 = regs->a0;
sc->sc_a1 = regs->a1;
- sc->sc_sr = regs->sr;
- sc->sc_pc = regs->pc;
- sc->sc_formatvec = regs->format << 12 | regs->vector;
+ sc->sc_sr = tregs->sr;
+ sc->sc_pc = tregs->pc;
+ sc->sc_formatvec = tregs->format << 12 | tregs->vector;
save_a5_state(sc, regs);
save_fpu_state(sc, regs);
}
@@ -851,6 +828,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
{
struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ struct pt_regs *tregs = rte_regs(regs);
greg_t __user *gregs = uc->uc_mcontext.gregs;
int err = 0;
@@ -871,9 +849,9 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *
err |= __put_user(sw->a5, &gregs[13]);
err |= __put_user(sw->a6, &gregs[14]);
err |= __put_user(rdusp(), &gregs[15]);
- err |= __put_user(regs->pc, &gregs[16]);
- err |= __put_user(regs->sr, &gregs[17]);
- err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
+ err |= __put_user(tregs->pc, &gregs[16]);
+ err |= __put_user(tregs->sr, &gregs[17]);
+ err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec);
err |= rt_save_fpu_state(uc, regs);
return err;
}
@@ -890,13 +868,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct sigframe __user *frame;
- int fsize = frame_extra_sizes(regs->format);
+ struct pt_regs *tregs = rte_regs(regs);
+ int fsize = frame_extra_sizes(tregs->format);
struct sigcontext context;
int err = 0, sig = ksig->sig;
if (fsize < 0) {
pr_debug("setup_frame: Unknown frame format %#x\n",
- regs->format);
+ tregs->format);
return -EFAULT;
}
@@ -907,7 +886,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
err |= __put_user(sig, &frame->sig);
- err |= __put_user(regs->vector, &frame->code);
+ err |= __put_user(tregs->vector, &frame->code);
err |= __put_user(&frame->sc, &frame->psc);
if (_NSIG_WORDS > 1)
@@ -934,33 +913,27 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set,
push_cache ((unsigned long) &frame->retcode);
/*
- * Set up registers for signal handler. All the state we are about
- * to destroy is successfully copied to sigframe.
- */
- wrusp ((unsigned long) frame);
- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
- adjustformat(regs);
-
- /*
* This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj.
*/
- if (fsize)
+ if (fsize) {
regs->stkadj = fsize;
-
- /* Prepare to skip over the extra stuff in the exception frame. */
- if (regs->stkadj) {
- struct pt_regs *tregs =
- (struct pt_regs *)((ulong)regs + regs->stkadj);
+ tregs = rte_regs(regs);
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
- /* This must be copied with decreasing addresses to
- handle overlaps. */
tregs->vector = 0;
tregs->format = 0;
- tregs->pc = regs->pc;
tregs->sr = regs->sr;
}
+
+ /*
+ * Set up registers for signal handler. All the state we are about
+ * to destroy is successfully copied to sigframe.
+ */
+ wrusp ((unsigned long) frame);
+ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+ adjustformat(regs);
+
return 0;
}
@@ -968,7 +941,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
- int fsize = frame_extra_sizes(regs->format);
+ struct pt_regs *tregs = rte_regs(regs);
+ int fsize = frame_extra_sizes(tregs->format);
int err = 0, sig = ksig->sig;
if (fsize < 0) {
@@ -1019,33 +993,26 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
push_cache ((unsigned long) &frame->retcode);
/*
- * Set up registers for signal handler. All the state we are about
- * to destroy is successfully copied to sigframe.
- */
- wrusp ((unsigned long) frame);
- regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
- adjustformat(regs);
-
- /*
* This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj.
*/
- if (fsize)
+ if (fsize) {
regs->stkadj = fsize;
-
- /* Prepare to skip over the extra stuff in the exception frame. */
- if (regs->stkadj) {
- struct pt_regs *tregs =
- (struct pt_regs *)((ulong)regs + regs->stkadj);
+ tregs = rte_regs(regs);
pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
- /* This must be copied with decreasing addresses to
- handle overlaps. */
tregs->vector = 0;
tregs->format = 0;
- tregs->pc = regs->pc;
tregs->sr = regs->sr;
}
+
+ /*
+ * Set up registers for signal handler. All the state we are about
+ * to destroy is successfully copied to sigframe.
+ */
+ wrusp ((unsigned long) frame);
+ tregs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+ adjustformat(regs);
return 0;
}
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 5b19fcdcd69e..9718ce94cc84 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -181,9 +181,8 @@ static inline void access_error060 (struct frame *fp)
static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
{
unsigned long mmusr;
- mm_segment_t old_fs = get_fs();
- set_fs(MAKE_MM_SEG(wbs));
+ set_fc(wbs);
if (iswrite)
asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
@@ -192,7 +191,7 @@ static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
- set_fs(old_fs);
+ set_fc(USER_DATA);
return mmusr;
}
@@ -201,10 +200,8 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
unsigned long wbd)
{
int res = 0;
- mm_segment_t old_fs = get_fs();
- /* set_fs can not be moved, otherwise put_user() may oops */
- set_fs(MAKE_MM_SEG(wbs));
+ set_fc(wbs);
switch (wbs & WBSIZ_040) {
case BA_SIZE_BYTE:
@@ -218,9 +215,7 @@ static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
break;
}
- /* set_fs can not be moved, otherwise put_user() may oops */
- set_fs(old_fs);
-
+ set_fc(USER_DATA);
pr_debug("do_040writeback1, res=%d\n", res);
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 90f4e9ca1276..4fab34791758 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -18,7 +18,6 @@
#include <linux/uaccess.h>
#include <asm/io.h>
-#include <asm/segment.h>
#include <asm/setup.h>
#include <asm/macintosh.h>
#include <asm/mac_via.h>
diff --git a/arch/m68k/mm/cache.c b/arch/m68k/mm/cache.c
index b486c0889eec..dde978e66f14 100644
--- a/arch/m68k/mm/cache.c
+++ b/arch/m68k/mm/cache.c
@@ -49,24 +49,7 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
if (mmusr & MMU_R_040)
return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
} else {
- unsigned short mmusr;
- unsigned long *descaddr;
-
- asm volatile ("ptestr %3,%2@,#7,%0\n\t"
- "pmove %%psr,%1"
- : "=a&" (descaddr), "=m" (mmusr)
- : "a" (vaddr), "d" (get_fs().seg));
- if (mmusr & (MMU_I|MMU_B|MMU_L))
- return 0;
- descaddr = phys_to_virt((unsigned long)descaddr);
- switch (mmusr & MMU_NUM) {
- case 1:
- return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff);
- case 2:
- return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff);
- case 3:
- return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK);
- }
+ WARN_ON_ONCE(!CPU_IS_040_OR_060);
}
return 0;
}
@@ -107,11 +90,9 @@ void flush_icache_user_range(unsigned long address, unsigned long endaddr)
void flush_icache_range(unsigned long address, unsigned long endaddr)
{
- mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
+ set_fc(SUPER_DATA);
flush_icache_user_range(address, endaddr);
- set_fs(old_fs);
+ set_fc(USER_DATA);
}
EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 5d749e188246..1b47bec15832 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -72,12 +72,6 @@ void __init paging_init(void)
if (!empty_zero_page)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
-
- /*
- * Set up SFC/DFC registers (user data space).
- */
- set_fs (USER_DS);
-
max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT;
free_area_init(max_zone_pfn);
}
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
index 1269d513b221..20ddf71b43d0 100644
--- a/arch/m68k/mm/kmap.c
+++ b/arch/m68k/mm/kmap.c
@@ -17,7 +17,6 @@
#include <linux/vmalloc.h>
#include <asm/setup.h>
-#include <asm/segment.h>
#include <asm/page.h>
#include <asm/io.h>
#include <asm/tlbflush.h>
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c
index fe75aecfb238..c2c03b0a1567 100644
--- a/arch/m68k/mm/memory.c
+++ b/arch/m68k/mm/memory.c
@@ -15,7 +15,6 @@
#include <linux/gfp.h>
#include <asm/setup.h>
-#include <asm/segment.h>
#include <asm/page.h>
#include <asm/traps.h>
#include <asm/machdep.h>
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 3a653f0a4188..9f3f77785aa7 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -467,7 +467,7 @@ void __init paging_init(void)
/*
* Set up SFC/DFC registers
*/
- set_fs(KERNEL_DS);
+ set_fc(USER_DATA);
#ifdef DEBUG
printk ("before free_area_init\n");
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c
index f7dd47232b6c..203f428a0344 100644
--- a/arch/m68k/sun3/config.c
+++ b/arch/m68k/sun3/config.c
@@ -31,7 +31,6 @@
#include <asm/intersil.h>
#include <asm/irq.h>
#include <asm/sections.h>
-#include <asm/segment.h>
#include <asm/sun3ints.h>
char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
@@ -89,7 +88,7 @@ void __init sun3_init(void)
sun3_reserved_pmeg[249] = 1;
sun3_reserved_pmeg[252] = 1;
sun3_reserved_pmeg[253] = 1;
- set_fs(KERNEL_DS);
+ set_fc(USER_DATA);
}
/* Without this, Bad Things happen when something calls arch_reset. */
diff --git a/arch/m68k/sun3/mmu_emu.c b/arch/m68k/sun3/mmu_emu.c
index 7aa879b7c7ff..7ec20817c0c9 100644
--- a/arch/m68k/sun3/mmu_emu.c
+++ b/arch/m68k/sun3/mmu_emu.c
@@ -23,7 +23,6 @@
#include <linux/uaccess.h>
#include <asm/page.h>
#include <asm/sun3mmu.h>
-#include <asm/segment.h>
#include <asm/oplib.h>
#include <asm/mmu_context.h>
#include <asm/dvma.h>
@@ -191,14 +190,13 @@ void __init mmu_emu_init(unsigned long bootmem_end)
for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
sun3_put_segmap(seg, SUN3_INVALID_PMEG);
- set_fs(MAKE_MM_SEG(3));
+ set_fc(3);
for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
i = sun3_get_segmap(seg);
for(j = 1; j < CONTEXTS_NUM; j++)
(*(romvec->pv_setctxt))(j, (void *)seg, i);
}
- set_fs(KERNEL_DS);
-
+ set_fc(USER_DATA);
}
/* erase the mappings for a dead context. Uses the pg_dir for hints
diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
index 41ae422119d3..36cc280a4505 100644
--- a/arch/m68k/sun3/sun3ints.c
+++ b/arch/m68k/sun3/sun3ints.c
@@ -11,7 +11,6 @@
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
-#include <asm/segment.h>
#include <asm/intersil.h>
#include <asm/oplib.h>
#include <asm/sun3ints.h>
diff --git a/arch/m68k/sun3x/prom.c b/arch/m68k/sun3x/prom.c
index 74d2fe57524b..64c23bfaa90c 100644
--- a/arch/m68k/sun3x/prom.c
+++ b/arch/m68k/sun3x/prom.c
@@ -14,7 +14,6 @@
#include <asm/traps.h>
#include <asm/sun3xprom.h>
#include <asm/idprom.h>
-#include <asm/segment.h>
#include <asm/sun3ints.h>
#include <asm/openprom.h>
#include <asm/machines.h>
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 0af88622c619..cb6d22439f71 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -662,6 +662,11 @@ static void build_epilogue(struct jit_ctx *ctx)
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
func##_positive)
+static bool is_bad_offset(int b_off)
+{
+ return b_off > 0x1ffff || b_off < -0x20000;
+}
+
static int build_body(struct jit_ctx *ctx)
{
const struct bpf_prog *prog = ctx->skf;
@@ -728,7 +733,10 @@ load_common:
/* Load return register on DS for failures */
emit_reg_move(r_ret, r_zero, ctx);
/* Return with error */
- emit_b(b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
emit_nop(ctx);
break;
case BPF_LD | BPF_W | BPF_IND:
@@ -775,8 +783,10 @@ load_ind:
emit_jalr(MIPS_R_RA, r_s0, ctx);
emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
/* Check the error value */
- emit_bcond(MIPS_COND_NE, r_ret, 0,
- b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
emit_reg_move(r_ret, r_zero, ctx);
/* We are good */
/* X <- P[1:K] & 0xf */
@@ -855,8 +865,10 @@ load_ind:
/* A /= X */
ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */
- emit_bcond(MIPS_COND_EQ, r_X, r_zero,
- b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_div(r_A, r_X, ctx);
break;
@@ -864,8 +876,10 @@ load_ind:
/* A %= X */
ctx->flags |= SEEN_X | SEEN_A;
/* Check if r_X is zero */
- emit_bcond(MIPS_COND_EQ, r_X, r_zero,
- b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
emit_load_imm(r_ret, 0, ctx); /* delay slot */
emit_mod(r_A, r_X, ctx);
break;
@@ -926,7 +940,10 @@ load_ind:
break;
case BPF_JMP | BPF_JA:
/* pc += K */
- emit_b(b_imm(i + k + 1, ctx), ctx);
+ b_off = b_imm(i + k + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
emit_nop(ctx);
break;
case BPF_JMP | BPF_JEQ | BPF_K:
@@ -1056,12 +1073,16 @@ jmp_cmp:
break;
case BPF_RET | BPF_A:
ctx->flags |= SEEN_A;
- if (i != prog->len - 1)
+ if (i != prog->len - 1) {
/*
* If this is not the last instruction
* then jump to the epilogue
*/
- emit_b(b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
+ }
emit_reg_move(r_ret, r_A, ctx); /* delay slot */
break;
case BPF_RET | BPF_K:
@@ -1075,7 +1096,10 @@ jmp_cmp:
* If this is not the last instruction
* then jump to the epilogue
*/
- emit_b(b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
emit_nop(ctx);
}
break;
@@ -1133,8 +1157,10 @@ jmp_cmp:
/* Load *dev pointer */
emit_load_ptr(r_s0, r_skb, off, ctx);
/* error (0) in the delay slot */
- emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
- b_imm(prog->len, ctx), ctx);
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
emit_reg_move(r_ret, r_zero, ctx);
if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
@@ -1244,7 +1270,10 @@ void bpf_jit_compile(struct bpf_prog *fp)
/* Generate the actual JIT code */
build_prologue(&ctx);
- build_body(&ctx);
+ if (build_body(&ctx)) {
+ module_memfree(ctx.target);
+ goto out;
+ }
build_epilogue(&ctx);
/* Update the icache */
diff --git a/arch/nios2/Kconfig.debug b/arch/nios2/Kconfig.debug
index a8bc06e96ef5..ca1beb87f987 100644
--- a/arch/nios2/Kconfig.debug
+++ b/arch/nios2/Kconfig.debug
@@ -3,9 +3,10 @@
config EARLY_PRINTK
bool "Activate early kernel debugging"
default y
+ depends on TTY
select SERIAL_CORE_CONSOLE
help
- Enable early printk on console
+ Enable early printk on console.
This is useful for kernel debugging when your machine crashes very
early before the console code is initialized.
You should normally say N here, unless you want to debug such a crash.
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index cf8d687a2644..40bc8fb75e0b 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -149,8 +149,6 @@ static void __init find_limits(unsigned long *min, unsigned long *max_low,
void __init setup_arch(char **cmdline_p)
{
- int dram_start;
-
console_verbose();
memory_start = memblock_start_of_DRAM();
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h
index 36dbf5043fc0..aa995d91cd1d 100644
--- a/arch/s390/include/asm/ccwgroup.h
+++ b/arch/s390/include/asm/ccwgroup.h
@@ -55,7 +55,7 @@ int ccwgroup_create_dev(struct device *root, struct ccwgroup_driver *gdrv,
int num_devices, const char *buf);
extern int ccwgroup_set_online(struct ccwgroup_device *gdev);
-extern int ccwgroup_set_offline(struct ccwgroup_device *gdev);
+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv);
extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 88419263a89a..840d8594437d 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -248,8 +248,7 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
#define EMIT6_PCREL(op1, op2, b1, b2, i, off, mask) \
({ \
- /* Branch instruction needs 6 bytes */ \
- int rel = (addrs[(i) + (off) + 1] - (addrs[(i) + 1] - 6)) / 2;\
+ int rel = (addrs[(i) + (off) + 1] - jit->prg) / 2; \
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), (op2) | (mask));\
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
@@ -761,10 +760,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9080000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_ADD | BPF_K: /* dst = (u32) dst + (u32) imm */
- if (!imm)
- break;
- /* alfi %dst,imm */
- EMIT6_IMM(0xc20b0000, dst_reg, imm);
+ if (imm != 0) {
+ /* alfi %dst,imm */
+ EMIT6_IMM(0xc20b0000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ADD | BPF_K: /* dst = dst + imm */
@@ -786,17 +785,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9090000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_SUB | BPF_K: /* dst = (u32) dst - (u32) imm */
- if (!imm)
- break;
- /* alfi %dst,-imm */
- EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+ if (imm != 0) {
+ /* alfi %dst,-imm */
+ EMIT6_IMM(0xc20b0000, dst_reg, -imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_SUB | BPF_K: /* dst = dst - imm */
if (!imm)
break;
- /* agfi %dst,-imm */
- EMIT6_IMM(0xc2080000, dst_reg, -imm);
+ if (imm == -0x80000000) {
+ /* algfi %dst,0x80000000 */
+ EMIT6_IMM(0xc20a0000, dst_reg, 0x80000000);
+ } else {
+ /* agfi %dst,-imm */
+ EMIT6_IMM(0xc2080000, dst_reg, -imm);
+ }
break;
/*
* BPF_MUL
@@ -811,10 +815,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb90c0000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_MUL | BPF_K: /* dst = (u32) dst * (u32) imm */
- if (imm == 1)
- break;
- /* msfi %r5,imm */
- EMIT6_IMM(0xc2010000, dst_reg, imm);
+ if (imm != 1) {
+ /* msfi %r5,imm */
+ EMIT6_IMM(0xc2010000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_MUL | BPF_K: /* dst = dst * imm */
@@ -867,6 +871,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
if (BPF_OP(insn->code) == BPF_MOD)
/* lhgi %dst,0 */
EMIT4_IMM(0xa7090000, dst_reg, 0);
+ else
+ EMIT_ZERO(dst_reg);
break;
}
/* lhi %w0,0 */
@@ -999,10 +1005,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9820000, dst_reg, src_reg);
break;
case BPF_ALU | BPF_XOR | BPF_K: /* dst = (u32) dst ^ (u32) imm */
- if (!imm)
- break;
- /* xilf %dst,imm */
- EMIT6_IMM(0xc0070000, dst_reg, imm);
+ if (imm != 0) {
+ /* xilf %dst,imm */
+ EMIT6_IMM(0xc0070000, dst_reg, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_XOR | BPF_K: /* dst = dst ^ imm */
@@ -1033,10 +1039,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000d, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_LSH | BPF_K: /* dst = (u32) dst << (u32) imm */
- if (imm == 0)
- break;
- /* sll %dst,imm(%r0) */
- EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* sll %dst,imm(%r0) */
+ EMIT4_DISP(0x89000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_LSH | BPF_K: /* dst = dst << imm */
@@ -1058,10 +1064,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000c, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_RSH | BPF_K: /* dst = (u32) dst >> (u32) imm */
- if (imm == 0)
- break;
- /* srl %dst,imm(%r0) */
- EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* srl %dst,imm(%r0) */
+ EMIT4_DISP(0x88000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_RSH | BPF_K: /* dst = dst >> imm */
@@ -1083,10 +1089,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xeb000000, 0x000a, dst_reg, dst_reg, src_reg, 0);
break;
case BPF_ALU | BPF_ARSH | BPF_K: /* ((s32) dst >> imm */
- if (imm == 0)
- break;
- /* sra %dst,imm(%r0) */
- EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+ if (imm != 0) {
+ /* sra %dst,imm(%r0) */
+ EMIT4_DISP(0x8a000000, dst_reg, REG_0, imm);
+ }
EMIT_ZERO(dst_reg);
break;
case BPF_ALU64 | BPF_ARSH | BPF_K: /* ((s64) dst) >>= imm */
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 56bf35c2f29c..cdced80a7ffa 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -34,7 +34,7 @@ typedef struct { unsigned long long pmd; } pmd_t;
static inline pmd_t *pud_pgtable(pud_t pud)
{
- return (pmd_t *)pud_val(pud);
+ return (pmd_t *)(unsigned long)pud_val(pud);
}
/* only used by the stubbed out hugetlb gup code, should never be called */
diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c
index c9da9f139694..f3a8cd491ce0 100644
--- a/arch/sparc/lib/iomap.c
+++ b/arch/sparc/lib/iomap.c
@@ -19,8 +19,10 @@ void ioport_unmap(void __iomem *addr)
EXPORT_SYMBOL(ioport_map);
EXPORT_SYMBOL(ioport_unmap);
+#ifdef CONFIG_PCI
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
/* nothing to do */
}
EXPORT_SYMBOL(pci_iounmap);
+#endif
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index dad7f85dcdea..ab83c22d274e 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2610,7 +2610,6 @@ config PCI_OLPC
config PCI_XEN
def_bool y
depends on PCI && XEN
- select SWIOTLB_XEN
config MMCONF_FAM10H
def_bool y
diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
index fa2c3f50aecb..18d2f5199194 100644
--- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
@@ -367,10 +367,11 @@ SYM_FUNC_START(sm4_aesni_avx_crypt8)
* %rdx: src (1..8 blocks)
* %rcx: num blocks (1..8)
*/
- FRAME_BEGIN
-
cmpq $5, %rcx;
jb sm4_aesni_avx_crypt4;
+
+ FRAME_BEGIN
+
vmovdqu (0 * 16)(%rdx), RA0;
vmovdqu (1 * 16)(%rdx), RA1;
vmovdqu (2 * 16)(%rdx), RA2;
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 2a57dbed4894..6dfa8ddaa60f 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2465,6 +2465,7 @@ static int x86_pmu_event_init(struct perf_event *event)
if (err) {
if (event->destroy)
event->destroy(event);
+ event->destroy = NULL;
}
if (READ_ONCE(x86_pmu.attr_rdpmc) &&
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 7011e87be6d0..9a044438072b 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -263,6 +263,7 @@ static struct event_constraint intel_icl_event_constraints[] = {
INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xe6, 0xf),
+ INTEL_EVENT_CONSTRAINT(0xef, 0xf),
INTEL_EVENT_CONSTRAINT_RANGE(0xf0, 0xf4, 0xf),
EVENT_CONSTRAINT_END
};
diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
index 5c7bcaa79623..1d5f14aff5f6 100644
--- a/arch/x86/include/asm/pkeys.h
+++ b/arch/x86/include/asm/pkeys.h
@@ -2,8 +2,6 @@
#ifndef _ASM_X86_PKEYS_H
#define _ASM_X86_PKEYS_H
-#define ARCH_DEFAULT_PKEY 0
-
/*
* If more than 16 keys are ever supported, a thorough audit
* will be necessary to ensure that the types that store key
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index f3fbb84ff8a7..68c257a3de0d 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -275,7 +275,7 @@ static inline int enqcmds(void __iomem *dst, const void *src)
{
const struct { char _[64]; } *__src = src;
struct { char _[64]; } __iomem *__dst = dst;
- int zf;
+ bool zf;
/*
* ENQCMDS %(rdx), rax
diff --git a/arch/x86/include/asm/xen/swiotlb-xen.h b/arch/x86/include/asm/xen/swiotlb-xen.h
index 6b56d0d45d15..66b4ddde7743 100644
--- a/arch/x86/include/asm/xen/swiotlb-xen.h
+++ b/arch/x86/include/asm/xen/swiotlb-xen.h
@@ -3,14 +3,10 @@
#define _ASM_X86_SWIOTLB_XEN_H
#ifdef CONFIG_SWIOTLB_XEN
-extern int xen_swiotlb;
extern int __init pci_xen_swiotlb_detect(void);
-extern void __init pci_xen_swiotlb_init(void);
extern int pci_xen_swiotlb_init_late(void);
#else
-#define xen_swiotlb (0)
-static inline int __init pci_xen_swiotlb_detect(void) { return 0; }
-static inline void __init pci_xen_swiotlb_init(void) { }
+#define pci_xen_swiotlb_detect NULL
static inline int pci_xen_swiotlb_init_late(void) { return -ENXIO; }
#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 79f164141116..40ed44ead063 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -830,6 +830,20 @@ void __init setup_arch(char **cmdline_p)
x86_init.oem.arch_setup();
+ /*
+ * Do some memory reservations *before* memory is added to memblock, so
+ * memblock allocations won't overwrite it.
+ *
+ * After this point, everything still needed from the boot loader or
+ * firmware or kernel text should be early reserved or marked not RAM in
+ * e820. All other memory is free game.
+ *
+ * This call needs to happen before e820__memory_setup() which calls the
+ * xen_memory_setup() on Xen dom0 which relies on the fact that those
+ * early reservations have happened already.
+ */
+ early_reserve_memory();
+
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
e820__memory_setup();
parse_setup_data();
@@ -876,18 +890,6 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
- /*
- * Do some memory reservations *before* memory is added to
- * memblock, so memblock allocations won't overwrite it.
- * Do it after early param, so we could get (unlikely) panic from
- * serial.
- *
- * After this point everything still needed from the boot loader or
- * firmware or kernel text should be early reserved or marked not
- * RAM in e820. All other memory is free game.
- */
- early_reserve_memory();
-
#ifdef CONFIG_MEMORY_HOTPLUG
/*
* Memory used by the kernel cannot be hot-removed because Linux
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 532791ffcbb9..28b1a4e57827 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -435,7 +435,6 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
__FOP_RET(#op)
asm(".pushsection .fixup, \"ax\"\n"
- ".global kvm_fastop_exception \n"
"kvm_fastop_exception: xor %esi, %esi; ret\n"
".popsection");
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 058f19b20465..c565def611e2 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -37,10 +37,10 @@
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \
- ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
+ ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
#define __peek_nbyte_next(t, insn, n) \
- ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
+ ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
#define get_next(t, insn) \
({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b2eefdefc108..84a2c8c4af73 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -710,7 +710,8 @@ oops:
static noinline void
kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, int signal, int si_code)
+ unsigned long address, int signal, int si_code,
+ u32 pkey)
{
WARN_ON_ONCE(user_mode(regs));
@@ -735,8 +736,12 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code,
set_signal_archinfo(address, error_code);
- /* XXX: hwpoison faults will set the wrong code. */
- force_sig_fault(signal, si_code, (void __user *)address);
+ if (si_code == SEGV_PKUERR) {
+ force_sig_pkuerr((void __user *)address, pkey);
+ } else {
+ /* XXX: hwpoison faults will set the wrong code. */
+ force_sig_fault(signal, si_code, (void __user *)address);
+ }
}
/*
@@ -798,7 +803,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
struct task_struct *tsk = current;
if (!user_mode(regs)) {
- kernelmode_fixup_or_oops(regs, error_code, address, pkey, si_code);
+ kernelmode_fixup_or_oops(regs, error_code, address,
+ SIGSEGV, si_code, pkey);
return;
}
@@ -930,7 +936,8 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
{
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) {
- kernelmode_fixup_or_oops(regs, error_code, address, SIGBUS, BUS_ADRERR);
+ kernelmode_fixup_or_oops(regs, error_code, address,
+ SIGBUS, BUS_ADRERR, ARCH_DEFAULT_PKEY);
return;
}
@@ -1396,7 +1403,8 @@ good_area:
*/
if (!user_mode(regs))
kernelmode_fixup_or_oops(regs, error_code, address,
- SIGBUS, BUS_ADRERR);
+ SIGBUS, BUS_ADRERR,
+ ARCH_DEFAULT_PKEY);
return;
}
@@ -1416,7 +1424,8 @@ good_area:
return;
if (fatal_signal_pending(current) && !user_mode(regs)) {
- kernelmode_fixup_or_oops(regs, error_code, address, 0, 0);
+ kernelmode_fixup_or_oops(regs, error_code, address,
+ 0, 0, ARCH_DEFAULT_PKEY);
return;
}
@@ -1424,7 +1433,8 @@ good_area:
/* Kernel mode? Handle exceptions or die: */
if (!user_mode(regs)) {
kernelmode_fixup_or_oops(regs, error_code, address,
- SIGSEGV, SEGV_MAPERR);
+ SIGSEGV, SEGV_MAPERR,
+ ARCH_DEFAULT_PKEY);
return;
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 0fe6aacef3db..9ea57389c554 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1341,9 +1341,10 @@ st: if (is_imm8(insn->off))
if (insn->imm == (BPF_AND | BPF_FETCH) ||
insn->imm == (BPF_OR | BPF_FETCH) ||
insn->imm == (BPF_XOR | BPF_FETCH)) {
- u8 *branch_target;
bool is64 = BPF_SIZE(insn->code) == BPF_DW;
u32 real_src_reg = src_reg;
+ u32 real_dst_reg = dst_reg;
+ u8 *branch_target;
/*
* Can't be implemented with a single x86 insn.
@@ -1354,11 +1355,13 @@ st: if (is_imm8(insn->off))
emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
if (src_reg == BPF_REG_0)
real_src_reg = BPF_REG_AX;
+ if (dst_reg == BPF_REG_0)
+ real_dst_reg = BPF_REG_AX;
branch_target = prog;
/* Load old value */
emit_ldx(&prog, BPF_SIZE(insn->code),
- BPF_REG_0, dst_reg, insn->off);
+ BPF_REG_0, real_dst_reg, insn->off);
/*
* Perform the (commutative) operation locally,
* put the result in the AUX_REG.
@@ -1369,7 +1372,8 @@ st: if (is_imm8(insn->off))
add_2reg(0xC0, AUX_REG, real_src_reg));
/* Attempt to swap in new value */
err = emit_atomic(&prog, BPF_CMPXCHG,
- dst_reg, AUX_REG, insn->off,
+ real_dst_reg, AUX_REG,
+ insn->off,
BPF_SIZE(insn->code));
if (WARN_ON(err))
return err;
@@ -1383,11 +1387,10 @@ st: if (is_imm8(insn->off))
/* Restore R0 after clobbering RAX */
emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
break;
-
}
err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
- insn->off, BPF_SIZE(insn->code));
+ insn->off, BPF_SIZE(insn->code));
if (err)
return err;
break;
@@ -1744,7 +1747,7 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
}
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
- struct bpf_prog *p, int stack_size, bool mod_ret)
+ struct bpf_prog *p, int stack_size, bool save_ret)
{
u8 *prog = *pprog;
u8 *jmp_insn;
@@ -1777,11 +1780,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
if (emit_call(&prog, p->bpf_func, prog))
return -EINVAL;
- /* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
+ /*
+ * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
* of the previous call which is then passed on the stack to
* the next BPF program.
+ *
+ * BPF_TRAMP_FENTRY trampoline may need to return the return
+ * value of BPF_PROG_TYPE_STRUCT_OPS prog.
*/
- if (mod_ret)
+ if (save_ret)
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
/* replace 2 nops with JE insn, since jmp target is known */
@@ -1828,13 +1835,15 @@ static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
}
static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
- struct bpf_tramp_progs *tp, int stack_size)
+ struct bpf_tramp_progs *tp, int stack_size,
+ bool save_ret)
{
int i;
u8 *prog = *pprog;
for (i = 0; i < tp->nr_progs; i++) {
- if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, false))
+ if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
+ save_ret))
return -EINVAL;
}
*pprog = prog;
@@ -1877,6 +1886,23 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
return 0;
}
+static bool is_valid_bpf_tramp_flags(unsigned int flags)
+{
+ if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
+ (flags & BPF_TRAMP_F_SKIP_FRAME))
+ return false;
+
+ /*
+ * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
+ * and it must be used alone.
+ */
+ if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
+ (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
+ return false;
+
+ return true;
+}
+
/* Example:
* __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
* its 'struct btf_func_model' will be nr_args=2
@@ -1949,17 +1975,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
u8 **branches = NULL;
u8 *prog;
+ bool save_ret;
/* x86-64 supports up to 6 arguments. 7+ can be added in the future */
if (nr_args > 6)
return -ENOTSUPP;
- if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
- (flags & BPF_TRAMP_F_SKIP_FRAME))
+ if (!is_valid_bpf_tramp_flags(flags))
return -EINVAL;
- if (flags & BPF_TRAMP_F_CALL_ORIG)
- stack_size += 8; /* room for return value of orig_call */
+ /* room for return value of orig_call or fentry prog */
+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+ if (save_ret)
+ stack_size += 8;
if (flags & BPF_TRAMP_F_IP_ARG)
stack_size += 8; /* room for IP address argument */
@@ -2005,7 +2033,8 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
}
if (fentry->nr_progs)
- if (invoke_bpf(m, &prog, fentry, stack_size))
+ if (invoke_bpf(m, &prog, fentry, stack_size,
+ flags & BPF_TRAMP_F_RET_FENTRY_RET))
return -EINVAL;
if (fmod_ret->nr_progs) {
@@ -2052,7 +2081,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
}
if (fexit->nr_progs)
- if (invoke_bpf(m, &prog, fexit, stack_size)) {
+ if (invoke_bpf(m, &prog, fexit, stack_size, false)) {
ret = -EINVAL;
goto cleanup;
}
@@ -2072,9 +2101,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
ret = -EINVAL;
goto cleanup;
}
- /* restore original return value back into RAX */
- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
}
+ /* restore return value of orig_call or fentry prog back into RAX */
+ if (save_ret)
+ emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
EMIT1(0x5B); /* pop rbx */
EMIT1(0xC9); /* leave */
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 349f780a1567..6e0d0754f94f 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -755,8 +755,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
preempt_enable();
}
-static void xen_convert_trap_info(const struct desc_ptr *desc,
- struct trap_info *traps)
+static unsigned xen_convert_trap_info(const struct desc_ptr *desc,
+ struct trap_info *traps, bool full)
{
unsigned in, out, count;
@@ -766,17 +766,18 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
for (in = out = 0; in < count; in++) {
gate_desc *entry = (gate_desc *)(desc->address) + in;
- if (cvt_gate_to_trap(in, entry, &traps[out]))
+ if (cvt_gate_to_trap(in, entry, &traps[out]) || full)
out++;
}
- traps[out].address = 0;
+
+ return out;
}
void xen_copy_trap_info(struct trap_info *traps)
{
const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
- xen_convert_trap_info(desc, traps);
+ xen_convert_trap_info(desc, traps, true);
}
/* Load a new IDT into Xen. In principle this can be per-CPU, so we
@@ -786,6 +787,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
{
static DEFINE_SPINLOCK(lock);
static struct trap_info traps[257];
+ unsigned out;
trace_xen_cpu_load_idt(desc);
@@ -793,7 +795,8 @@ static void xen_load_idt(const struct desc_ptr *desc)
memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
- xen_convert_trap_info(desc, traps);
+ out = xen_convert_trap_info(desc, traps, false);
+ memset(&traps[out], 0, sizeof(traps[0]));
xen_mc_flush();
if (HYPERVISOR_set_trap_table(traps))
diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c
index 54f9aa7e8457..46df59aeaa06 100644
--- a/arch/x86/xen/pci-swiotlb-xen.c
+++ b/arch/x86/xen/pci-swiotlb-xen.c
@@ -18,7 +18,7 @@
#endif
#include <linux/export.h>
-int xen_swiotlb __read_mostly;
+static int xen_swiotlb __read_mostly;
/*
* pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary
@@ -56,7 +56,7 @@ int __init pci_xen_swiotlb_detect(void)
return xen_swiotlb;
}
-void __init pci_xen_swiotlb_init(void)
+static void __init pci_xen_swiotlb_init(void)
{
if (xen_swiotlb) {
xen_swiotlb_init_early();
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 96afadf9878e..7ed56c6075b0 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -290,8 +290,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
gdt = get_cpu_gdt_rw(cpu);
- memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
-
/*
* Bring up the CPU in cpu_bringup_and_idle() with the stack
* pointing just below where pt_regs would be if it were a normal
@@ -308,8 +306,6 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
xen_copy_trap_info(ctxt->trap_ctxt);
- ctxt->ldt_ents = 0;
-
BUG_ON((unsigned long)gdt & ~PAGE_MASK);
gdt_mfn = arbitrary_virt_to_mfn(gdt);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index dd13c2bbc29c..480e1a134859 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -2662,15 +2662,6 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
* are likely to increase the throughput.
*/
bfqq->new_bfqq = new_bfqq;
- /*
- * The above assignment schedules the following redirections:
- * each time some I/O for bfqq arrives, the process that
- * generated that I/O is disassociated from bfqq and
- * associated with new_bfqq. Here we increases new_bfqq->ref
- * in advance, adding the number of processes that are
- * expected to be associated with new_bfqq as they happen to
- * issue I/O.
- */
new_bfqq->ref += process_refs;
return new_bfqq;
}
@@ -2733,10 +2724,6 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
{
struct bfq_queue *in_service_bfqq, *new_bfqq;
- /* if a merge has already been setup, then proceed with that first */
- if (bfqq->new_bfqq)
- return bfqq->new_bfqq;
-
/*
* Check delayed stable merge for rotational or non-queueing
* devs. For this branch to be executed, bfqq must not be
@@ -2838,6 +2825,9 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (bfq_too_late_for_merging(bfqq))
return NULL;
+ if (bfqq->new_bfqq)
+ return bfqq->new_bfqq;
+
if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq))
return NULL;
diff --git a/block/bio.c b/block/bio.c
index 5df3dd282e40..a6fb6a0b4295 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1466,7 +1466,7 @@ again:
if (!bio_integrity_endio(bio))
return;
- if (bio->bi_bdev)
+ if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED))
rq_qos_done_bio(bio->bi_bdev->bd_disk->queue, bio);
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
diff --git a/block/bsg.c b/block/bsg.c
index 351095193788..882f56bff14f 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -165,13 +165,20 @@ static const struct file_operations bsg_fops = {
.llseek = default_llseek,
};
+static void bsg_device_release(struct device *dev)
+{
+ struct bsg_device *bd = container_of(dev, struct bsg_device, device);
+
+ ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
+ kfree(bd);
+}
+
void bsg_unregister_queue(struct bsg_device *bd)
{
if (bd->queue->kobj.sd)
sysfs_remove_link(&bd->queue->kobj, "bsg");
cdev_device_del(&bd->cdev, &bd->device);
- ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
- kfree(bd);
+ put_device(&bd->device);
}
EXPORT_SYMBOL_GPL(bsg_unregister_queue);
@@ -193,11 +200,13 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
if (ret < 0) {
if (ret == -ENOSPC)
dev_err(parent, "bsg: too many bsg devices\n");
- goto out_kfree;
+ kfree(bd);
+ return ERR_PTR(ret);
}
bd->device.devt = MKDEV(bsg_major, ret);
bd->device.class = bsg_class;
bd->device.parent = parent;
+ bd->device.release = bsg_device_release;
dev_set_name(&bd->device, "%s", name);
device_initialize(&bd->device);
@@ -205,7 +214,7 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
bd->cdev.owner = THIS_MODULE;
ret = cdev_device_add(&bd->cdev, &bd->device);
if (ret)
- goto out_ida_remove;
+ goto out_put_device;
if (q->kobj.sd) {
ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
@@ -217,10 +226,8 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
out_device_del:
cdev_device_del(&bd->cdev, &bd->device);
-out_ida_remove:
- ida_simple_remove(&bsg_minor_ida, MINOR(bd->device.devt));
-out_kfree:
- kfree(bd);
+out_put_device:
+ put_device(&bd->device);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(bsg_register_queue);
diff --git a/block/fops.c b/block/fops.c
index ffce6f6c68dd..1e970c247e0e 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -14,6 +14,7 @@
#include <linux/task_io_accounting_ops.h>
#include <linux/falloc.h>
#include <linux/suspend.h>
+#include <linux/fs.h>
#include "blk.h"
static struct inode *bdev_file_inode(struct file *file)
@@ -553,7 +554,8 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
static long blkdev_fallocate(struct file *file, int mode, loff_t start,
loff_t len)
{
- struct block_device *bdev = I_BDEV(bdev_file_inode(file));
+ struct inode *inode = bdev_file_inode(file);
+ struct block_device *bdev = I_BDEV(inode);
loff_t end = start + len - 1;
loff_t isize;
int error;
@@ -580,10 +582,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
if ((start | len) & (bdev_logical_block_size(bdev) - 1))
return -EINVAL;
+ filemap_invalidate_lock(inode->i_mapping);
+
/* Invalidate the page cache, including dirty pages. */
error = truncate_bdev_range(bdev, file->f_mode, start, end);
if (error)
- return error;
+ goto fail;
switch (mode) {
case FALLOC_FL_ZERO_RANGE:
@@ -600,17 +604,12 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
GFP_KERNEL, 0);
break;
default:
- return -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
}
- if (error)
- return error;
- /*
- * Invalidate the page cache again; if someone wandered in and dirtied
- * a page, we just discard it - userspace has no way of knowing whether
- * the write happened before or after discard completing...
- */
- return truncate_bdev_range(bdev, file->f_mode, start, end);
+ fail:
+ filemap_invalidate_unlock(inode->i_mapping);
+ return error;
}
const struct file_operations def_blk_fops = {
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index a3ef6cce644c..7dd80acf92c7 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -3007,6 +3007,18 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
ndr_desc->target_node = NUMA_NO_NODE;
}
+ /* Fallback to address based numa information if node lookup failed */
+ if (ndr_desc->numa_node == NUMA_NO_NODE) {
+ ndr_desc->numa_node = memory_add_physaddr_to_nid(spa->address);
+ dev_info(acpi_desc->dev, "changing numa node from %d to %d for nfit region [%pa-%pa]",
+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ }
+ if (ndr_desc->target_node == NUMA_NO_NODE) {
+ ndr_desc->target_node = phys_to_target_node(spa->address);
+ dev_info(acpi_desc->dev, "changing target node from %d to %d for nfit region [%pa-%pa]",
+ NUMA_NO_NODE, ndr_desc->numa_node, &res.start, &res.end);
+ }
+
/*
* Persistence domain bits are hierarchical, if
* ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index a43f1521efe6..45c5c0e45e33 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -284,8 +284,7 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
#define should_use_kmap(pfn) page_is_ram(pfn)
#endif
-static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
- bool memory)
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
{
unsigned long pfn;
@@ -295,8 +294,7 @@ static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
return NULL;
return (void __iomem __force *)kmap(pfn_to_page(pfn));
} else
- return memory ? acpi_os_memmap(pg_off, pg_sz) :
- acpi_os_ioremap(pg_off, pg_sz);
+ return acpi_os_ioremap(pg_off, pg_sz);
}
static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
@@ -311,10 +309,9 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
}
/**
- * __acpi_os_map_iomem - Get a virtual address for a given physical address range.
+ * acpi_os_map_iomem - Get a virtual address for a given physical address range.
* @phys: Start of the physical address range to map.
* @size: Size of the physical address range to map.
- * @memory: true if remapping memory, false if IO
*
* Look up the given physical address range in the list of existing ACPI memory
* mappings. If found, get a reference to it and return a pointer to it (its
@@ -324,8 +321,8 @@ static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
* During early init (when acpi_permanent_mmap has not been set yet) this
* routine simply calls __acpi_map_table() to get the job done.
*/
-static void __iomem __ref
-*__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory)
+void __iomem __ref
+*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{
struct acpi_ioremap *map;
void __iomem *virt;
@@ -356,7 +353,7 @@ static void __iomem __ref
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
- virt = acpi_map(phys, size, memory);
+ virt = acpi_map(phys, size);
if (!virt) {
mutex_unlock(&acpi_ioremap_lock);
kfree(map);
@@ -375,17 +372,11 @@ out:
mutex_unlock(&acpi_ioremap_lock);
return map->virt + (phys - map->phys);
}
-
-void __iomem *__ref
-acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
-{
- return __acpi_os_map_iomem(phys, size, false);
-}
EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
- return (void *)__acpi_os_map_iomem(phys, size, true);
+ return (void *)acpi_os_map_iomem(phys, size);
}
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index d9030cb6b1e4..9edacc8b9768 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1852,6 +1852,7 @@ static void binder_deferred_fd_close(int fd)
}
static void binder_transaction_buffer_release(struct binder_proc *proc,
+ struct binder_thread *thread,
struct binder_buffer *buffer,
binder_size_t failed_at,
bool is_failure)
@@ -2011,8 +2012,16 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
&proc->alloc, &fd, buffer,
offset, sizeof(fd));
WARN_ON(err);
- if (!err)
+ if (!err) {
binder_deferred_fd_close(fd);
+ /*
+ * Need to make sure the thread goes
+ * back to userspace to complete the
+ * deferred close
+ */
+ if (thread)
+ thread->looper_need_return = true;
+ }
}
} break;
default:
@@ -3038,9 +3047,8 @@ static void binder_transaction(struct binder_proc *proc,
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
binder_inner_proc_lock(target_proc);
- if (target_thread->is_dead || target_proc->is_frozen) {
- return_error = target_thread->is_dead ?
- BR_DEAD_REPLY : BR_FROZEN_REPLY;
+ if (target_thread->is_dead) {
+ return_error = BR_DEAD_REPLY;
binder_inner_proc_unlock(target_proc);
goto err_dead_proc_or_thread;
}
@@ -3105,7 +3113,7 @@ err_bad_parent:
err_copy_data_failed:
binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
- binder_transaction_buffer_release(target_proc, t->buffer,
+ binder_transaction_buffer_release(target_proc, NULL, t->buffer,
buffer_offset, true);
if (target_node)
binder_dec_node_tmpref(target_node);
@@ -3184,7 +3192,9 @@ err_invalid_target_handle:
* Cleanup buffer and free it.
*/
static void
-binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
+binder_free_buf(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_buffer *buffer)
{
binder_inner_proc_lock(proc);
if (buffer->transaction) {
@@ -3212,7 +3222,7 @@ binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
- binder_transaction_buffer_release(proc, buffer, 0, false);
+ binder_transaction_buffer_release(proc, thread, buffer, 0, false);
binder_alloc_free_buf(&proc->alloc, buffer);
}
@@ -3414,7 +3424,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
- binder_free_buf(proc, buffer);
+ binder_free_buf(proc, thread, buffer);
break;
}
@@ -4107,7 +4117,7 @@ retry:
buffer->transaction = NULL;
binder_cleanup_transaction(t, "fd fixups failed",
BR_FAILED_REPLY);
- binder_free_buf(proc, buffer);
+ binder_free_buf(proc, thread, buffer);
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
proc->pid, thread->pid,
@@ -4648,6 +4658,22 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
return 0;
}
+static bool binder_txns_pending_ilocked(struct binder_proc *proc)
+{
+ struct rb_node *n;
+ struct binder_thread *thread;
+
+ if (proc->outstanding_txns > 0)
+ return true;
+
+ for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
+ thread = rb_entry(n, struct binder_thread, rb_node);
+ if (thread->transaction_stack)
+ return true;
+ }
+ return false;
+}
+
static int binder_ioctl_freeze(struct binder_freeze_info *info,
struct binder_proc *target_proc)
{
@@ -4679,8 +4705,13 @@ static int binder_ioctl_freeze(struct binder_freeze_info *info,
(!target_proc->outstanding_txns),
msecs_to_jiffies(info->timeout_ms));
- if (!ret && target_proc->outstanding_txns)
- ret = -EAGAIN;
+ /* Check pending transactions that wait for reply */
+ if (ret >= 0) {
+ binder_inner_proc_lock(target_proc);
+ if (binder_txns_pending_ilocked(target_proc))
+ ret = -EAGAIN;
+ binder_inner_proc_unlock(target_proc);
+ }
if (ret < 0) {
binder_inner_proc_lock(target_proc);
@@ -4696,6 +4727,7 @@ static int binder_ioctl_get_freezer_info(
{
struct binder_proc *target_proc;
bool found = false;
+ __u32 txns_pending;
info->sync_recv = 0;
info->async_recv = 0;
@@ -4705,7 +4737,9 @@ static int binder_ioctl_get_freezer_info(
if (target_proc->pid == info->pid) {
found = true;
binder_inner_proc_lock(target_proc);
- info->sync_recv |= target_proc->sync_recv;
+ txns_pending = binder_txns_pending_ilocked(target_proc);
+ info->sync_recv |= target_proc->sync_recv |
+ (txns_pending << 1);
info->async_recv |= target_proc->async_recv;
binder_inner_proc_unlock(target_proc);
}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 810c0b84d3f8..402c4d4362a8 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -378,6 +378,8 @@ struct binder_ref {
* binder transactions
* (protected by @inner_lock)
* @sync_recv: process received sync transactions since last frozen
+ * bit 0: received sync transaction after being frozen
+ * bit 1: new pending sync transaction during freezing
* (protected by @inner_lock)
* @async_recv: process received async transactions since last frozen
* (protected by @inner_lock)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index e65dd803a453..15986cc2fe5e 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -95,6 +95,8 @@ int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
list_add(&link->s_hook, &sup->consumers);
list_add(&link->c_hook, &con->suppliers);
+ pr_debug("%pfwP Linked as a fwnode consumer to %pfwP\n",
+ con, sup);
out:
mutex_unlock(&fwnode_link_lock);
@@ -102,6 +104,21 @@ out:
}
/**
+ * __fwnode_link_del - Delete a link between two fwnode_handles.
+ * @link: the fwnode_link to be deleted
+ *
+ * The fwnode_link_lock needs to be held when this function is called.
+ */
+static void __fwnode_link_del(struct fwnode_link *link)
+{
+ pr_debug("%pfwP Dropping the fwnode link to %pfwP\n",
+ link->consumer, link->supplier);
+ list_del(&link->s_hook);
+ list_del(&link->c_hook);
+ kfree(link);
+}
+
+/**
* fwnode_links_purge_suppliers - Delete all supplier links of fwnode_handle.
* @fwnode: fwnode whose supplier links need to be deleted
*
@@ -112,11 +129,8 @@ static void fwnode_links_purge_suppliers(struct fwnode_handle *fwnode)
struct fwnode_link *link, *tmp;
mutex_lock(&fwnode_link_lock);
- list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook) {
- list_del(&link->s_hook);
- list_del(&link->c_hook);
- kfree(link);
- }
+ list_for_each_entry_safe(link, tmp, &fwnode->suppliers, c_hook)
+ __fwnode_link_del(link);
mutex_unlock(&fwnode_link_lock);
}
@@ -131,11 +145,8 @@ static void fwnode_links_purge_consumers(struct fwnode_handle *fwnode)
struct fwnode_link *link, *tmp;
mutex_lock(&fwnode_link_lock);
- list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook) {
- list_del(&link->s_hook);
- list_del(&link->c_hook);
- kfree(link);
- }
+ list_for_each_entry_safe(link, tmp, &fwnode->consumers, s_hook)
+ __fwnode_link_del(link);
mutex_unlock(&fwnode_link_lock);
}
@@ -975,6 +986,7 @@ int device_links_check_suppliers(struct device *dev)
{
struct device_link *link;
int ret = 0;
+ struct fwnode_handle *sup_fw;
/*
* Device waiting for supplier to become available is not allowed to
@@ -983,10 +995,11 @@ int device_links_check_suppliers(struct device *dev)
mutex_lock(&fwnode_link_lock);
if (dev->fwnode && !list_empty(&dev->fwnode->suppliers) &&
!fw_devlink_is_permissive()) {
- dev_dbg(dev, "probe deferral - wait for supplier %pfwP\n",
- list_first_entry(&dev->fwnode->suppliers,
- struct fwnode_link,
- c_hook)->supplier);
+ sup_fw = list_first_entry(&dev->fwnode->suppliers,
+ struct fwnode_link,
+ c_hook)->supplier;
+ dev_err_probe(dev, -EPROBE_DEFER, "wait for supplier %pfwP\n",
+ sup_fw);
mutex_unlock(&fwnode_link_lock);
return -EPROBE_DEFER;
}
@@ -1001,8 +1014,9 @@ int device_links_check_suppliers(struct device *dev)
if (link->status != DL_STATE_AVAILABLE &&
!(link->flags & DL_FLAG_SYNC_STATE_ONLY)) {
device_links_missing_supplier(dev);
- dev_dbg(dev, "probe deferral - supplier %s not ready\n",
- dev_name(link->supplier));
+ dev_err_probe(dev, -EPROBE_DEFER,
+ "supplier %s not ready\n",
+ dev_name(link->supplier));
ret = -EPROBE_DEFER;
break;
}
@@ -1722,6 +1736,25 @@ static int fw_devlink_create_devlink(struct device *con,
struct device *sup_dev;
int ret = 0;
+ /*
+ * In some cases, a device P might also be a supplier to its child node
+ * C. However, this would defer the probe of C until the probe of P
+ * completes successfully. This is perfectly fine in the device driver
+ * model. device_add() doesn't guarantee probe completion of the device
+ * by the time it returns.
+ *
+ * However, there are a few drivers that assume C will finish probing
+ * as soon as it's added and before P finishes probing. So, we provide
+ * a flag to let fw_devlink know not to delay the probe of C until the
+ * probe of P completes successfully.
+ *
+ * When such a flag is set, we can't create device links where P is the
+ * supplier of C as that would delay the probe of C.
+ */
+ if (sup_handle->flags & FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD &&
+ fwnode_is_ancestor_of(sup_handle, con->fwnode))
+ return -EINVAL;
+
sup_dev = get_dev_from_fwnode(sup_handle);
if (sup_dev) {
/*
@@ -1772,14 +1805,21 @@ static int fw_devlink_create_devlink(struct device *con,
* be broken by applying logic. Check for these types of cycles and
* break them so that devices in the cycle probe properly.
*
- * If the supplier's parent is dependent on the consumer, then
- * the consumer-supplier dependency is a false dependency. So,
- * treat it as an invalid link.
+ * If the supplier's parent is dependent on the consumer, then the
+ * consumer and supplier have a cyclic dependency. Since fw_devlink
+ * can't tell which of the inferred dependencies are incorrect, don't
+ * enforce probe ordering between any of the devices in this cyclic
+ * dependency. Do this by relaxing all the fw_devlink device links in
+ * this cycle and by treating the fwnode link between the consumer and
+ * the supplier as an invalid dependency.
*/
sup_dev = fwnode_get_next_parent_dev(sup_handle);
if (sup_dev && device_is_dependent(con, sup_dev)) {
- dev_dbg(con, "Not linking to %pfwP - False link\n",
- sup_handle);
+ dev_info(con, "Fixing up cyclic dependency with %pfwP (%s)\n",
+ sup_handle, dev_name(sup_dev));
+ device_links_write_lock();
+ fw_devlink_relax_cycle(con, sup_dev);
+ device_links_write_unlock();
ret = -EINVAL;
} else {
/*
@@ -1858,9 +1898,7 @@ static void __fw_devlink_link_to_consumers(struct device *dev)
if (!own_link || ret == -EAGAIN)
continue;
- list_del(&link->s_hook);
- list_del(&link->c_hook);
- kfree(link);
+ __fwnode_link_del(link);
}
}
@@ -1912,9 +1950,7 @@ static void __fw_devlink_link_to_suppliers(struct device *dev,
if (!own_link || ret == -EAGAIN)
continue;
- list_del(&link->s_hook);
- list_del(&link->c_hook);
- kfree(link);
+ __fwnode_link_del(link);
/* If no device link was created, nothing more to do. */
if (ret)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 7bd0f3cfb7eb..c46f6a8e14d2 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -1116,6 +1116,9 @@ int device_create_managed_software_node(struct device *dev,
to_swnode(fwnode)->managed = true;
set_secondary_fwnode(dev, fwnode);
+ if (device_is_registered(dev))
+ software_node_notify(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(device_create_managed_software_node);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 5170a630778d..1183f7872b71 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -97,13 +97,18 @@ struct nbd_config {
atomic_t recv_threads;
wait_queue_head_t recv_wq;
- loff_t blksize;
+ unsigned int blksize_bits;
loff_t bytesize;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *dbg_dir;
#endif
};
+static inline unsigned int nbd_blksize(struct nbd_config *config)
+{
+ return 1u << config->blksize_bits;
+}
+
struct nbd_device {
struct blk_mq_tag_set tag_set;
@@ -146,7 +151,7 @@ static struct dentry *nbd_dbg_dir;
#define NBD_MAGIC 0x68797548
-#define NBD_DEF_BLKSIZE 1024
+#define NBD_DEF_BLKSIZE_BITS 10
static unsigned int nbds_max = 16;
static int max_part = 16;
@@ -317,12 +322,12 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
loff_t blksize)
{
if (!blksize)
- blksize = NBD_DEF_BLKSIZE;
+ blksize = 1u << NBD_DEF_BLKSIZE_BITS;
if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
return -EINVAL;
nbd->config->bytesize = bytesize;
- nbd->config->blksize = blksize;
+ nbd->config->blksize_bits = __ffs(blksize);
if (!nbd->task_recv)
return 0;
@@ -1337,7 +1342,7 @@ static int nbd_start_device(struct nbd_device *nbd)
args->index = i;
queue_work(nbd->recv_workq, &args->work);
}
- return nbd_set_size(nbd, config->bytesize, config->blksize);
+ return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
}
static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
@@ -1406,11 +1411,11 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_BLKSIZE:
return nbd_set_size(nbd, config->bytesize, arg);
case NBD_SET_SIZE:
- return nbd_set_size(nbd, arg, config->blksize);
+ return nbd_set_size(nbd, arg, nbd_blksize(config));
case NBD_SET_SIZE_BLOCKS:
- if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
+ if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
return -EINVAL;
- return nbd_set_size(nbd, bytesize, config->blksize);
+ return nbd_set_size(nbd, bytesize, nbd_blksize(config));
case NBD_SET_TIMEOUT:
nbd_set_cmd_timeout(nbd, arg);
return 0;
@@ -1476,7 +1481,7 @@ static struct nbd_config *nbd_alloc_config(void)
atomic_set(&config->recv_threads, 0);
init_waitqueue_head(&config->recv_wq);
init_waitqueue_head(&config->conn_wait);
- config->blksize = NBD_DEF_BLKSIZE;
+ config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
atomic_set(&config->live_connections, 0);
try_module_get(THIS_MODULE);
return config;
@@ -1604,7 +1609,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd)
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
- debugfs_create_u64("blocksize", 0444, dir, &config->blksize);
+ debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
return 0;
@@ -1826,7 +1831,7 @@ nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
{
struct nbd_config *config = nbd->config;
- u64 bsize = config->blksize;
+ u64 bsize = nbd_blksize(config);
u64 bytes = config->bytesize;
if (info->attrs[NBD_ATTR_SIZE_BYTES])
@@ -1835,7 +1840,7 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
- if (bytes != config->bytesize || bsize != config->blksize)
+ if (bytes != config->bytesize || bsize != nbd_blksize(config))
return nbd_set_size(nbd, bytes, bsize);
return 0;
}
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index df77b6bf5c64..763cea8418f8 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -3090,6 +3090,7 @@ static int compat_insnlist(struct file *file, unsigned long arg)
mutex_lock(&dev->mutex);
rc = do_insnlist_ioctl(dev, insns, insnlist32.n_insns, file);
mutex_unlock(&dev->mutex);
+ kfree(insns);
return rc;
}
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index bb88198c874e..aa4e1a500691 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -778,7 +778,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
in_place ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
if (ret)
- goto e_ctx;
+ goto e_aad;
if (in_place) {
dst = src;
@@ -863,7 +863,7 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
op.u.aes.size = 0;
ret = cmd_q->ccp->vdata->perform->aes(&op);
if (ret)
- goto e_dst;
+ goto e_final_wa;
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
@@ -873,17 +873,19 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
DMA_BIDIRECTIONAL);
if (ret)
- goto e_tag;
+ goto e_final_wa;
ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
- if (ret)
- goto e_tag;
+ if (ret) {
+ ccp_dm_free(&tag);
+ goto e_final_wa;
+ }
ret = crypto_memneq(tag.address, final_wa.address,
authsize) ? -EBADMSG : 0;
ccp_dm_free(&tag);
}
-e_tag:
+e_final_wa:
ccp_dm_free(&final_wa);
e_dst:
diff --git a/drivers/edac/dmc520_edac.c b/drivers/edac/dmc520_edac.c
index fc1153ab1ebb..b8a7d9594afd 100644
--- a/drivers/edac/dmc520_edac.c
+++ b/drivers/edac/dmc520_edac.c
@@ -464,7 +464,7 @@ static void dmc520_init_csrow(struct mem_ctl_info *mci)
dimm->grain = pvt->mem_width_in_bytes;
dimm->dtype = dt;
dimm->mtype = mt;
- dimm->edac_mode = EDAC_FLAG_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
dimm->nr_pages = pages_per_rank / csi->nr_channels;
}
}
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 7e7146b22c16..7d08627e738b 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -782,7 +782,7 @@ static void init_csrows(struct mem_ctl_info *mci)
for (j = 0; j < csi->nr_channels; j++) {
dimm = csi->channels[j]->dimm;
- dimm->edac_mode = EDAC_FLAG_SECDED;
+ dimm->edac_mode = EDAC_SECDED;
dimm->mtype = p_data->get_mtype(priv->baseaddr);
dimm->nr_pages = (size >> PAGE_SHIFT) / csi->nr_channels;
dimm->grain = SYNPS_EDAC_ERR_GRAIN;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index c99b78ee008a..f86666cf2c6a 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1019,16 +1019,18 @@ create_feature_instance(struct build_feature_devs_info *binfo,
{
unsigned int irq_base, nr_irqs;
struct dfl_feature_info *finfo;
+ u8 revision = 0;
int ret;
- u8 revision;
u64 v;
- v = readq(binfo->ioaddr + ofst);
- revision = FIELD_GET(DFH_REVISION, v);
+ if (fid != FEATURE_ID_AFU) {
+ v = readq(binfo->ioaddr + ofst);
+ revision = FIELD_GET(DFH_REVISION, v);
- /* read feature size and id if inputs are invalid */
- size = size ? size : feature_size(v);
- fid = fid ? fid : feature_id(v);
+ /* read feature size and id if inputs are invalid */
+ size = size ? size : feature_size(v);
+ fid = fid ? fid : feature_id(v);
+ }
if (binfo->len - ofst < size)
return -EINVAL;
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index 1afb41aa20d7..ea2ec3c6815c 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -225,8 +225,10 @@ static int machxo2_write_init(struct fpga_manager *mgr,
goto fail;
get_status(spi, &status);
- if (test_bit(FAIL, &status))
+ if (test_bit(FAIL, &status)) {
+ ret = -EINVAL;
goto fail;
+ }
dump_status_reg(&status);
spi_message_init(&msg);
@@ -313,6 +315,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
dump_status_reg(&status);
if (!test_bit(DONE, &status)) {
machxo2_cleanup(mgr);
+ ret = -EINVAL;
goto fail;
}
@@ -335,6 +338,7 @@ static int machxo2_write_complete(struct fpga_manager *mgr,
break;
if (++refreshloop == MACHXO2_MAX_REFRESH_LOOP) {
machxo2_cleanup(mgr);
+ ret = -EINVAL;
goto fail;
}
} while (1);
diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c
index 10f303d15225..3d6ef37a7702 100644
--- a/drivers/gpio/gpio-aspeed-sgpio.c
+++ b/drivers/gpio/gpio-aspeed-sgpio.c
@@ -395,7 +395,7 @@ static void aspeed_sgpio_irq_handler(struct irq_desc *desc)
reg = ioread32(bank_reg(data, bank, reg_irq_status));
for_each_set_bit(p, &reg, 32)
- generic_handle_domain_irq(gc->irq.domain, i * 32 + p);
+ generic_handle_domain_irq(gc->irq.domain, i * 32 + p * 2);
}
chained_irq_exit(ic, desc);
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index f5cfc0698799..8ebf369b3ba0 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -468,15 +468,8 @@ static int pca953x_gpio_get_value(struct gpio_chip *gc, unsigned off)
mutex_lock(&chip->i2c_lock);
ret = regmap_read(chip->regmap, inreg, &reg_val);
mutex_unlock(&chip->i2c_lock);
- if (ret < 0) {
- /*
- * NOTE:
- * diagnostic already emitted; that's all we should
- * do unless gpio_*_value_cansleep() calls become different
- * from their nonsleeping siblings (and report faults).
- */
- return 0;
- }
+ if (ret < 0)
+ return ret;
return !!(reg_val & bit);
}
diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c
index 036b2d959503..ce63cbd14d69 100644
--- a/drivers/gpio/gpio-rockchip.c
+++ b/drivers/gpio/gpio-rockchip.c
@@ -141,7 +141,7 @@ static int rockchip_gpio_get_direction(struct gpio_chip *chip,
u32 data;
data = rockchip_gpio_readl_bit(bank, offset, bank->gpio_regs->port_ddr);
- if (data & BIT(offset))
+ if (data)
return GPIO_LINE_DIRECTION_OUT;
return GPIO_LINE_DIRECTION_IN;
@@ -195,7 +195,7 @@ static int rockchip_gpio_set_debounce(struct gpio_chip *gc,
unsigned int cur_div_reg;
u64 div;
- if (!IS_ERR(bank->db_clk)) {
+ if (bank->gpio_type == GPIO_TYPE_V2 && !IS_ERR(bank->db_clk)) {
div_debounce_support = true;
freq = clk_get_rate(bank->db_clk);
max_debounce = (GENMASK(23, 0) + 1) * 2 * 1000000 / freq;
@@ -689,6 +689,7 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
struct device_node *pctlnp = of_get_parent(np);
struct pinctrl_dev *pctldev = NULL;
struct rockchip_pin_bank *bank = NULL;
+ struct rockchip_pin_output_deferred *cfg;
static int gpio;
int id, ret;
@@ -716,12 +717,33 @@ static int rockchip_gpio_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /*
+ * Prevent clashes with a deferred output setting
+ * being added right at this moment.
+ */
+ mutex_lock(&bank->deferred_lock);
+
ret = rockchip_gpiolib_register(bank);
if (ret) {
clk_disable_unprepare(bank->clk);
+ mutex_unlock(&bank->deferred_lock);
return ret;
}
+ while (!list_empty(&bank->deferred_output)) {
+ cfg = list_first_entry(&bank->deferred_output,
+ struct rockchip_pin_output_deferred, head);
+ list_del(&cfg->head);
+
+ ret = rockchip_gpio_direction_output(&bank->gpio_chip, cfg->pin, cfg->arg);
+ if (ret)
+ dev_warn(dev, "setting output pin %u to %u failed\n", cfg->pin, cfg->arg);
+
+ kfree(cfg);
+ }
+
+ mutex_unlock(&bank->deferred_lock);
+
platform_set_drvdata(pdev, bank);
dev_info(dev, "probed %pOF\n", np);
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index f99f3c10bed0..39dca147d587 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -184,7 +184,7 @@ static void uniphier_gpio_irq_mask(struct irq_data *data)
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0);
- return irq_chip_mask_parent(data);
+ irq_chip_mask_parent(data);
}
static void uniphier_gpio_irq_unmask(struct irq_data *data)
@@ -194,7 +194,7 @@ static void uniphier_gpio_irq_unmask(struct irq_data *data)
uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask);
- return irq_chip_unmask_parent(data);
+ irq_chip_unmask_parent(data);
}
static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type)
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 411525ac4cc4..47712b6903b5 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -313,9 +313,11 @@ static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
if (ret)
- gpiochip_free_own_desc(desc);
+ dev_warn(chip->parent,
+ "Failed to set debounce-timeout for pin 0x%04X, err %d\n",
+ pin, ret);
- return ret ? ERR_PTR(ret) : desc;
+ return desc;
}
static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 7a7316731911..dc50c05f23fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -837,6 +837,28 @@ static int convert_tiling_flags_to_modifier(struct amdgpu_framebuffer *afb)
return 0;
}
+/* Mirrors the is_displayable check in radeonsi's gfx6_compute_surface */
+static int check_tiling_flags_gfx6(struct amdgpu_framebuffer *afb)
+{
+ u64 micro_tile_mode;
+
+ /* Zero swizzle mode means linear */
+ if (AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0)
+ return 0;
+
+ micro_tile_mode = AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE);
+ switch (micro_tile_mode) {
+ case 0: /* DISPLAY */
+ case 3: /* RENDER */
+ return 0;
+ default:
+ drm_dbg_kms(afb->base.dev,
+ "Micro tile mode %llu not supported for scanout\n",
+ micro_tile_mode);
+ return -EINVAL;
+ }
+}
+
static void get_block_dimensions(unsigned int block_log2, unsigned int cpp,
unsigned int *width, unsigned int *height)
{
@@ -1103,6 +1125,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
+ struct amdgpu_device *adev = drm_to_adev(dev);
int ret, i;
/*
@@ -1122,6 +1145,14 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev,
if (ret)
return ret;
+ if (!dev->mode_config.allow_fb_modifiers) {
+ drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI,
+ "GFX9+ requires FB check based on format modifier\n");
+ ret = check_tiling_flags_gfx6(rfb);
+ if (ret)
+ return ret;
+ }
+
if (dev->mode_config.allow_fb_modifiers &&
!(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) {
ret = convert_tiling_flags_to_modifier(rfb);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 603c259b073b..025184a556ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3599,7 +3599,7 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
/* set static priority for a queue/ring */
gfx_v9_0_mqd_set_priority(ring, mqd);
- mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+ mqd->cp_hqd_quantum = RREG32_SOC15(GC, 0, mmCP_HQD_QUANTUM);
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 41c3a0d70b7c..e47104a1f559 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1098,6 +1098,8 @@ static int gmc_v10_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v10_0_gart_disable(adev);
+
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
@@ -1106,7 +1108,6 @@ static int gmc_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
- gmc_v10_0_gart_disable(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index d90c16a6b2b8..5551359d5dfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1794,6 +1794,8 @@ static int gmc_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v9_0_gart_disable(adev);
+
if (amdgpu_sriov_vf(adev)) {
/* full access mode, so don't touch any GMC register */
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
@@ -1802,7 +1804,6 @@ static int gmc_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
- gmc_v9_0_gart_disable(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 779f5c911e11..e32efcfb0c8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -868,6 +868,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000);
}
+ /* TODO: check whether can submit a doorbell request to raise
+ * a doorbell fence to exit gfxoff.
+ */
+ if (adev->in_s0ix)
+ amdgpu_gfx_off_ctrl(adev, false);
+
sdma_v5_2_soft_reset(adev);
/* unhalt the MEs */
sdma_v5_2_enable(adev, true);
@@ -876,6 +882,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
/* start the gfx rings and rlc compute queues */
r = sdma_v5_2_gfx_resume(adev);
+ if (adev->in_s0ix)
+ amdgpu_gfx_off_ctrl(adev, true);
if (r)
return r;
r = sdma_v5_2_rlc_resume(adev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 98d1b3ab3a46..c2a4d920da40 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -971,7 +971,6 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
- svm_migrate_fini((struct amdgpu_device *)kfd->kgd);
device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index dab290a4d19d..4a16e3c257b9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -891,9 +891,16 @@ int svm_migrate_init(struct amdgpu_device *adev)
pgmap->ops = &svm_migrate_pgmap_ops;
pgmap->owner = SVM_ADEV_PGMAP_OWNER(adev);
pgmap->flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
+
+ /* Device manager releases device-specific resources, memory region and
+ * pgmap when driver disconnects from device.
+ */
r = devm_memremap_pages(adev->dev, pgmap);
if (IS_ERR(r)) {
pr_err("failed to register HMM device memory\n");
+
+ /* Disable SVM support capability */
+ pgmap->type = 0;
devm_release_mem_region(adev->dev, res->start,
res->end - res->start + 1);
return PTR_ERR(r);
@@ -908,12 +915,3 @@ int svm_migrate_init(struct amdgpu_device *adev)
return 0;
}
-
-void svm_migrate_fini(struct amdgpu_device *adev)
-{
- struct dev_pagemap *pgmap = &adev->kfd.dev->pgmap;
-
- devm_memunmap_pages(adev->dev, pgmap);
- devm_release_mem_region(adev->dev, pgmap->range.start,
- pgmap->range.end - pgmap->range.start + 1);
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
index 0de76b5d4973..2f5b3394c9ed 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h
@@ -47,7 +47,6 @@ unsigned long
svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr);
int svm_migrate_init(struct amdgpu_device *adev);
-void svm_migrate_fini(struct amdgpu_device *adev);
#else
@@ -55,10 +54,6 @@ static inline int svm_migrate_init(struct amdgpu_device *adev)
{
return 0;
}
-static inline void svm_migrate_fini(struct amdgpu_device *adev)
-{
- /* empty */
-}
#endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9fc8021bb0ab..9d0f65a90002 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -118,6 +118,13 @@ static void svm_range_remove_notifier(struct svm_range *prange)
mmu_interval_notifier_remove(&prange->notifier);
}
+static bool
+svm_is_valid_dma_mapping_addr(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr && !dma_mapping_error(dev, dma_addr) &&
+ !(dma_addr & SVM_RANGE_VRAM_DOMAIN);
+}
+
static int
svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
unsigned long offset, unsigned long npages,
@@ -139,8 +146,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
addr += offset;
for (i = 0; i < npages; i++) {
- if (WARN_ONCE(addr[i] && !dma_mapping_error(dev, addr[i]),
- "leaking dma mapping\n"))
+ if (svm_is_valid_dma_mapping_addr(dev, addr[i]))
dma_unmap_page(dev, addr[i], PAGE_SIZE, dir);
page = hmm_pfn_to_page(hmm_pfns[i]);
@@ -209,7 +215,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
return;
for (i = offset; i < offset + npages; i++) {
- if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
+ if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
continue;
pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
@@ -1165,7 +1171,7 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
unsigned long last_start;
int last_domain;
int r = 0;
- int64_t i;
+ int64_t i, j;
last_start = prange->start + offset;
@@ -1178,7 +1184,11 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
for (i = offset; i < offset + npages; i++) {
last_domain = dma_addr[i] & SVM_RANGE_VRAM_DOMAIN;
dma_addr[i] &= ~SVM_RANGE_VRAM_DOMAIN;
- if ((prange->start + i) < prange->last &&
+
+ /* Collect all pages in the same address range and memory domain
+ * that can be mapped with a single call to update mapping.
+ */
+ if (i < offset + npages - 1 &&
last_domain == (dma_addr[i + 1] & SVM_RANGE_VRAM_DOMAIN))
continue;
@@ -1201,6 +1211,10 @@ svm_range_map_to_gpu(struct amdgpu_device *adev, struct amdgpu_vm *vm,
NULL, dma_addr,
&vm->last_update,
&table_freed);
+
+ for (j = last_start - prange->start; j <= i; j++)
+ dma_addr[j] |= last_domain;
+
if (r) {
pr_debug("failed %d to map to gpu 0x%lx\n", r, prange->start);
goto out;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 66c799f5c7cf..1ea31dcc7a8b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1115,6 +1115,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.asic_id.pci_revision_id = adev->pdev->revision;
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+ init_data.asic_id.chip_id = adev->pdev->device;
init_data.asic_id.vram_width = adev->gmc.vram_width;
/* TODO: initialize init_data.asic_id.vram_type here!!!! */
@@ -1719,6 +1720,7 @@ static int dm_late_init(void *handle)
linear_lut[i] = 0xFFFF * i / 15;
params.set = 0;
+ params.backlight_ramping_override = false;
params.backlight_ramping_start = 0xCCCC;
params.backlight_ramping_reduction = 0xCCCCCCCC;
params.backlight_lut_array_size = 16;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index f6dbc5a74757..05eaec03d9f7 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1826,14 +1826,13 @@ bool perform_link_training_with_retries(
if (panel_mode == DP_PANEL_MODE_EDP) {
struct cp_psp *cp_psp = &stream->ctx->cp_psp;
- if (cp_psp && cp_psp->funcs.enable_assr) {
- if (!cp_psp->funcs.enable_assr(cp_psp->handle, link)) {
- /* since eDP implies ASSR on, change panel
- * mode to disable ASSR
- */
- panel_mode = DP_PANEL_MODE_DEFAULT;
- }
- }
+ if (cp_psp && cp_psp->funcs.enable_assr)
+ /* ASSR is bound to fail with unsigned PSP
+ * verstage used during devlopment phase.
+ * Report and continue with eDP panel mode to
+ * perform eDP link training with right settings
+ */
+ cp_psp->funcs.enable_assr(cp_psp->handle, link);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index e14f99b4b0c3..3c3347341103 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -42,7 +42,7 @@
#define DC_LOGGER \
engine->ctx->logger
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
#define IS_DC_I2CAUX_LOGGING_ENABLED() (false)
#define LOG_FLAG_Error_I2cAux LOG_ERROR
#define LOG_FLAG_I2cAux_DceAux LOG_I2C_AUX
@@ -76,7 +76,7 @@ enum {
#define DEFAULT_AUX_ENGINE_MULT 0
#define DEFAULT_AUX_ENGINE_LENGTH 69
-#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
+#define DC_TRACE_LEVEL_MESSAGE(...) do { } while (0)
static void release_engine(
struct dce_aux *engine)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
index bdbbeb959c68..81f82aa05ec2 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
@@ -6867,6 +6867,8 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev);
+ ni_update_current_ps(adev, boot_ps);
+
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 9870c4e6af36..b5001db7a95c 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -793,7 +793,6 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct decon_context *ctx;
- struct resource *res;
int ret;
int i;
@@ -818,8 +817,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev)
ctx->clks[i] = clk;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->addr = devm_ioremap_resource(dev, res);
+ ctx->addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->addr))
return PTR_ERR(ctx->addr);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e39fac889edc..8d137857818c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1738,7 +1738,6 @@ static const struct component_ops exynos_dsi_component_ops = {
static int exynos_dsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct exynos_dsi *dsi;
int ret, i;
@@ -1789,8 +1788,7 @@ static int exynos_dsi_probe(struct platform_device *pdev)
}
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->reg_base = devm_ioremap_resource(dev, res);
+ dsi->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->reg_base))
return PTR_ERR(dsi->reg_base);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index a3c718148c45..ecfd82d0afb7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -85,7 +85,6 @@ struct fimc_scaler {
/*
* A structure of fimc context.
*
- * @regs_res: register resources.
* @regs: memory mapped io registers.
* @lock: locking of operations.
* @clocks: fimc clocks.
@@ -103,7 +102,6 @@ struct fimc_context {
struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
- struct resource *regs_res;
void __iomem *regs;
spinlock_t lock;
struct clk *clocks[FIMC_CLKS_MAX];
@@ -1327,8 +1325,7 @@ static int fimc_probe(struct platform_device *pdev)
ctx->num_formats = num_formats;
/* resource memory */
- ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+ ctx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 700ca4fa6665..c735e53939d8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1202,9 +1202,7 @@ static int fimd_probe(struct platform_device *pdev)
return PTR_ERR(ctx->lcd_clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- ctx->regs = devm_ioremap_resource(dev, res);
+ ctx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index b00230626c6a..471fd6c8135f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1449,7 +1449,6 @@ static const struct component_ops g2d_component_ops = {
static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct g2d_data *g2d;
int ret;
@@ -1491,9 +1490,7 @@ static int g2d_probe(struct platform_device *pdev)
clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- g2d->regs = devm_ioremap_resource(dev, res);
+ g2d->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(g2d->regs)) {
ret = PTR_ERR(g2d->regs);
goto err_put_clk;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 90d7bf906885..166a80262896 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -86,7 +86,6 @@ struct gsc_scaler {
/*
* A structure of gsc context.
*
- * @regs_res: register resources.
* @regs: memory mapped io registers.
* @gsc_clk: gsc gate clock.
* @sc: scaler infomations.
@@ -103,7 +102,6 @@ struct gsc_context {
struct exynos_drm_ipp_formats *formats;
unsigned int num_formats;
- struct resource *regs_res;
void __iomem *regs;
const char **clk_names;
struct clk *clocks[GSC_MAX_CLOCKS];
@@ -1272,9 +1270,7 @@ static int gsc_probe(struct platform_device *pdev)
}
}
- /* resource memory */
- ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ctx->regs = devm_ioremap_resource(dev, ctx->regs_res);
+ ctx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ctx->regs))
return PTR_ERR(ctx->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index ee61be4cf152..dec7df35baa9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -278,7 +278,6 @@ static const struct component_ops rotator_component_ops = {
static int rotator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *regs_res;
struct rot_context *rot;
const struct rot_variant *variant;
int irq;
@@ -292,8 +291,7 @@ static int rotator_probe(struct platform_device *pdev)
rot->formats = variant->formats;
rot->num_formats = variant->num_formats;
rot->dev = dev;
- regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rot->regs = devm_ioremap_resource(dev, regs_res);
+ rot->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rot->regs))
return PTR_ERR(rot->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index f9ae5b038d59..3a7851b7dc66 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -485,7 +485,6 @@ static const struct component_ops scaler_component_ops = {
static int scaler_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *regs_res;
struct scaler_context *scaler;
int irq;
int ret, i;
@@ -498,8 +497,7 @@ static int scaler_probe(struct platform_device *pdev)
(struct scaler_data *)of_device_get_match_data(dev);
scaler->dev = dev;
- regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- scaler->regs = devm_ioremap_resource(dev, regs_res);
+ scaler->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(scaler->regs))
return PTR_ERR(scaler->regs);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index c769dec576de..7655142a4651 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1957,7 +1957,6 @@ static int hdmi_probe(struct platform_device *pdev)
struct hdmi_audio_infoframe *audio_infoframe;
struct device *dev = &pdev->dev;
struct hdmi_context *hdata;
- struct resource *res;
int ret;
hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
@@ -1979,8 +1978,7 @@ static int hdmi_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdata->regs = devm_ioremap_resource(dev, res);
+ hdata->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdata->regs)) {
ret = PTR_ERR(hdata->regs);
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index e91e0e0191fb..4b94256d7319 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -222,31 +222,42 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
struct intel_sa_info {
u16 displayrtids;
- u8 deburst, deprogbwlimit;
+ u8 deburst, deprogbwlimit, derating;
};
static const struct intel_sa_info icl_sa_info = {
.deburst = 8,
.deprogbwlimit = 25, /* GB/s */
.displayrtids = 128,
+ .derating = 10,
};
static const struct intel_sa_info tgl_sa_info = {
.deburst = 16,
.deprogbwlimit = 34, /* GB/s */
.displayrtids = 256,
+ .derating = 10,
};
static const struct intel_sa_info rkl_sa_info = {
.deburst = 16,
.deprogbwlimit = 20, /* GB/s */
.displayrtids = 128,
+ .derating = 10,
};
static const struct intel_sa_info adls_sa_info = {
.deburst = 16,
.deprogbwlimit = 38, /* GB/s */
.displayrtids = 256,
+ .derating = 10,
+};
+
+static const struct intel_sa_info adlp_sa_info = {
+ .deburst = 16,
+ .deprogbwlimit = 38, /* GB/s */
+ .displayrtids = 256,
+ .derating = 20,
};
static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa)
@@ -302,7 +313,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bw = icl_calc_bw(sp->dclk, clpchgroup * 32 * num_channels, ct);
bi->deratedbw[j] = min(maxdebw,
- bw * 9 / 10); /* 90% */
+ bw * (100 - sa->derating) / 100);
drm_dbg_kms(&dev_priv->drm,
"BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
@@ -400,7 +411,9 @@ void intel_bw_init_hw(struct drm_i915_private *dev_priv)
if (IS_DG2(dev_priv))
dg2_get_bw_info(dev_priv);
- else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+ else if (IS_ALDERLAKE_P(dev_priv))
+ icl_get_bw_info(dev_priv, &adlp_sa_info);
+ else if (IS_ALDERLAKE_S(dev_priv))
icl_get_bw_info(dev_priv, &adls_sa_info);
else if (IS_ROCKETLAKE(dev_priv))
icl_get_bw_info(dev_priv, &rkl_sa_info);
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 3c3c6cb5c0df..b3c8e1c450ef 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -805,11 +805,14 @@ void intel_dmc_ucode_resume(struct drm_i915_private *dev_priv)
*/
void intel_dmc_ucode_fini(struct drm_i915_private *dev_priv)
{
+ int id;
+
if (!HAS_DMC(dev_priv))
return;
intel_dmc_ucode_suspend(dev_priv);
drm_WARN_ON(&dev_priv->drm, dev_priv->dmc.wakeref);
- kfree(dev_priv->dmc.dmc_info[DMC_FW_MAIN].payload);
+ for (id = 0; id < DMC_FW_MAX; id++)
+ kfree(dev_priv->dmc.dmc_info[id].payload);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 35eedc14f522..6ea13159bffc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -356,11 +356,8 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (likely(obj)) {
- /* This releases all gem object bindings to the backend. */
+ if (likely(obj))
i915_ttm_free_cached_io_st(obj);
- __i915_gem_free_object(obj);
- }
}
static struct intel_memory_region *
@@ -875,8 +872,12 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ /* This releases all gem object bindings to the backend. */
+ __i915_gem_free_object(obj);
+
i915_gem_object_release_memory_region(obj);
mutex_destroy(&obj->ttm.get_io_page.lock);
+
if (obj->ttm.created)
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 745e84c72c90..e866105dd174 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -362,8 +362,9 @@ static int __intel_context_active(struct i915_active *active)
return 0;
}
-static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
- enum i915_sw_fence_notify state)
+static int __i915_sw_fence_call
+sw_fence_dummy_notify(struct i915_sw_fence *sf,
+ enum i915_sw_fence_notify state)
{
return NOTIFY_DONE;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 591a5224287e..0a03fbed9f9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -882,8 +882,6 @@ void intel_rps_park(struct intel_rps *rps)
if (!intel_rps_is_enabled(rps))
return;
- GEM_BUG_ON(atomic_read(&rps->num_waiters));
-
if (!intel_rps_clear_active(rps))
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
index 99e1fad5ca20..c9086a600bce 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
@@ -102,11 +102,11 @@ static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
* | +-------+--------------------------------------------------------------+
* | | 7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message |
* +---+-------+--------------------------------------------------------------+
- * | 1 | 31:0 | +--------------------------------------------------------+ |
- * +---+-------+ | | |
- * |...| | | Embedded `HXG Message`_ | |
- * +---+-------+ | | |
- * | n | 31:0 | +--------------------------------------------------------+ |
+ * | 1 | 31:0 | |
+ * +---+-------+ |
+ * |...| | [Embedded `HXG Message`_] |
+ * +---+-------+ |
+ * | n | 31:0 | |
* +---+-------+--------------------------------------------------------------+
*/
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
index bbf1ddb77434..9baa3cb07d13 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
@@ -38,11 +38,11 @@
* +---+-------+--------------------------------------------------------------+
* | | Bits | Description |
* +===+=======+==============================================================+
- * | 0 | 31:0 | +--------------------------------------------------------+ |
- * +---+-------+ | | |
- * |...| | | Embedded `HXG Message`_ | |
- * +---+-------+ | | |
- * | n | 31:0 | +--------------------------------------------------------+ |
+ * | 0 | 31:0 | |
+ * +---+-------+ |
+ * |...| | [Embedded `HXG Message`_] |
+ * +---+-------+ |
+ * | n | 31:0 | |
* +---+-------+--------------------------------------------------------------+
*/
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b56a8e37a3cd..1bb1be5c48c8 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -576,7 +576,7 @@ retry:
/* No one is going to touch shadow bb from now on. */
i915_gem_object_flush_map(bb->obj);
- i915_gem_object_unlock(bb->obj);
+ i915_gem_ww_ctx_fini(&ww);
}
}
return 0;
@@ -630,7 +630,7 @@ retry:
return ret;
}
- i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
+ i915_gem_ww_ctx_fini(&ww);
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index ce446716d092..79da5eca60af 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -829,8 +829,6 @@ static void __i915_request_ctor(void *arg)
i915_sw_fence_init(&rq->submit, submit_notify);
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
- dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
-
rq->capture_list = NULL;
init_llist_head(&rq->execute_cb);
@@ -905,17 +903,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
- kref_init(&rq->fence.refcount);
- rq->fence.flags = 0;
- rq->fence.error = 0;
- INIT_LIST_HEAD(&rq->fence.cb_list);
-
ret = intel_timeline_get_seqno(tl, rq, &seqno);
if (ret)
goto err_free;
- rq->fence.context = tl->fence_context;
- rq->fence.seqno = seqno;
+ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+ tl->fence_context, seqno);
RCU_INIT_POINTER(rq->timeline, tl);
rq->hwsp_seqno = tl->hwsp_seqno;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
index b0ece71aefde..ce774579c89d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/ctrl.c
@@ -57,7 +57,7 @@ nvkm_control_mthd_pstate_info(struct nvkm_control *ctrl, void *data, u32 size)
args->v0.count = 0;
args->v0.ustate_ac = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
args->v0.ustate_dc = NVIF_CONTROL_PSTATE_INFO_V0_USTATE_DISABLE;
- args->v0.pwrsrc = -ENOSYS;
+ args->v0.pwrsrc = -ENODEV;
args->v0.pstate = NVIF_CONTROL_PSTATE_INFO_V0_PSTATE_UNKNOWN;
}
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 16c7aabb94d3..a29d64f87563 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1845,7 +1845,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
bool prepare_bandwidth_transition)
{
const struct tegra_plane_state *old_tegra_state, *new_tegra_state;
- const struct tegra_dc_state *old_dc_state, *new_dc_state;
u32 i, new_avg_bw, old_avg_bw, new_peak_bw, old_peak_bw;
const struct drm_plane_state *old_plane_state;
const struct drm_crtc_state *old_crtc_state;
@@ -1858,8 +1857,6 @@ tegra_crtc_update_memory_bandwidth(struct drm_crtc *crtc,
return;
old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
- old_dc_state = to_const_dc_state(old_crtc_state);
- new_dc_state = to_const_dc_state(crtc->state);
if (!crtc->state->active) {
if (!old_crtc_state->active)
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index f0cb691852a1..40378308d527 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -35,12 +35,6 @@ static inline struct tegra_dc_state *to_dc_state(struct drm_crtc_state *state)
return NULL;
}
-static inline const struct tegra_dc_state *
-to_const_dc_state(const struct drm_crtc_state *state)
-{
- return to_dc_state((struct drm_crtc_state *)state);
-}
-
struct tegra_dc_stats {
unsigned long frames;
unsigned long vblank;
diff --git a/drivers/gpu/drm/tegra/uapi.c b/drivers/gpu/drm/tegra/uapi.c
index dc16a24f4dbe..690a339c52ec 100644
--- a/drivers/gpu/drm/tegra/uapi.c
+++ b/drivers/gpu/drm/tegra/uapi.c
@@ -222,7 +222,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
mapping->iova = sg_dma_address(mapping->sgt->sgl);
}
- mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->size;
+ mapping->iova_end = mapping->iova + host1x_to_tegra_bo(mapping->bo)->gem.size;
err = xa_alloc(&context->mappings, &args->mapping, mapping, XA_LIMIT(1, U32_MAX),
GFP_KERNEL);
diff --git a/drivers/gpu/host1x/fence.c b/drivers/gpu/host1x/fence.c
index 6941add95d0f..ecab72882192 100644
--- a/drivers/gpu/host1x/fence.c
+++ b/drivers/gpu/host1x/fence.c
@@ -15,7 +15,7 @@
#include "intr.h"
#include "syncpt.h"
-DEFINE_SPINLOCK(lock);
+static DEFINE_SPINLOCK(lock);
struct host1x_syncpt_fence {
struct dma_fence base;
@@ -152,8 +152,10 @@ struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold)
return ERR_PTR(-ENOMEM);
fence->waiter = kzalloc(sizeof(*fence->waiter), GFP_KERNEL);
- if (!fence->waiter)
+ if (!fence->waiter) {
+ kfree(fence);
return ERR_PTR(-ENOMEM);
+ }
fence->sp = sp;
fence->threshold = threshold;
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 79b138fd4261..05c007b213f2 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -255,13 +255,13 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
if (!privdata->cl_data)
return -ENOMEM;
- rc = devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
+ mp2_select_ops(privdata);
+
+ rc = amd_sfh_hid_client_init(privdata);
if (rc)
return rc;
- mp2_select_ops(privdata);
-
- return amd_sfh_hid_client_init(privdata);
+ return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
}
static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 833fcf07ff35..6ccfa0cb997a 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -336,12 +336,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field,
/*
* MacBook JIS keyboard has wrong logical maximum
+ * Magic Keyboard JIS has wrong logical maximum
*/
static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct apple_sc *asc = hid_get_drvdata(hdev);
+ if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) {
+ hid_info(hdev,
+ "fixing up Magic Keyboard JIS report descriptor\n");
+ rdesc[64] = rdesc[70] = 0xe7;
+ }
+
if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 &&
rdesc[53] == 0x65 && rdesc[59] == 0x65) {
hid_info(hdev,
diff --git a/drivers/hid/hid-betopff.c b/drivers/hid/hid-betopff.c
index 0790fbd3fc9a..467d789f9bc2 100644
--- a/drivers/hid/hid-betopff.c
+++ b/drivers/hid/hid-betopff.c
@@ -56,15 +56,22 @@ static int betopff_init(struct hid_device *hid)
{
struct betopff_device *betopff;
struct hid_report *report;
- struct hid_input *hidinput =
- list_first_entry(&hid->inputs, struct hid_input, list);
+ struct hid_input *hidinput;
struct list_head *report_list =
&hid->report_enum[HID_OUTPUT_REPORT].report_list;
- struct input_dev *dev = hidinput->input;
+ struct input_dev *dev;
int field_count = 0;
int error;
int i, j;
+ if (list_empty(&hid->inputs)) {
+ hid_err(hid, "no inputs found\n");
+ return -ENODEV;
+ }
+
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ dev = hidinput->input;
+
if (list_empty(report_list)) {
hid_err(hid, "no output reports found\n");
return -ENODEV;
diff --git a/drivers/hid/hid-u2fzero.c b/drivers/hid/hid-u2fzero.c
index 95e0807878c7..d70cd3d7f583 100644
--- a/drivers/hid/hid-u2fzero.c
+++ b/drivers/hid/hid-u2fzero.c
@@ -198,7 +198,9 @@ static int u2fzero_rng_read(struct hwrng *rng, void *data,
}
ret = u2fzero_recv(dev, &req, &resp);
- if (ret < 0)
+
+ /* ignore errors or packets without data */
+ if (ret < offsetof(struct u2f_hid_msg, init.data))
return 0;
/* only take the minimum amount of data it is safe to take */
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index fd51769d0994..33a6908995b1 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -4746,6 +4746,12 @@ static const struct wacom_features wacom_features_0x393 =
{ "Wacom Intuos Pro S", 31920, 19950, 8191, 63,
INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7,
.touch_max = 10 };
+static const struct wacom_features wacom_features_0x3c6 =
+ { "Wacom Intuos BT S", 15200, 9500, 4095, 63,
+ INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
+static const struct wacom_features wacom_features_0x3c8 =
+ { "Wacom Intuos BT M", 21600, 13500, 4095, 63,
+ INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
static const struct wacom_features wacom_features_HID_ANY_ID =
{ "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID };
@@ -4919,6 +4925,8 @@ const struct hid_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x37A) },
{ USB_DEVICE_WACOM(0x37B) },
{ BT_DEVICE_WACOM(0x393) },
+ { BT_DEVICE_WACOM(0x3c6) },
+ { BT_DEVICE_WACOM(0x3c8) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x4004) },
{ USB_DEVICE_WACOM(0x5000) },
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 38bc35ac8135..3618a924e78e 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -362,12 +362,6 @@ static const struct hwmon_channel_info *k10temp_info[] = {
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
- HWMON_CHANNEL_INFO(in,
- HWMON_I_INPUT | HWMON_I_LABEL,
- HWMON_I_INPUT | HWMON_I_LABEL),
- HWMON_CHANNEL_INFO(curr,
- HWMON_C_INPUT | HWMON_C_LABEL,
- HWMON_C_INPUT | HWMON_C_LABEL),
NULL
};
diff --git a/drivers/hwmon/ltc2947-core.c b/drivers/hwmon/ltc2947-core.c
index bb3f7749a0b0..5423466de697 100644
--- a/drivers/hwmon/ltc2947-core.c
+++ b/drivers/hwmon/ltc2947-core.c
@@ -989,8 +989,12 @@ static int ltc2947_setup(struct ltc2947_data *st)
return ret;
/* check external clock presence */
- extclk = devm_clk_get(st->dev, NULL);
- if (!IS_ERR(extclk)) {
+ extclk = devm_clk_get_optional(st->dev, NULL);
+ if (IS_ERR(extclk))
+ return dev_err_probe(st->dev, PTR_ERR(extclk),
+ "Failed to get external clock\n");
+
+ if (extclk) {
unsigned long rate_hz;
u8 pre = 0, div, tbctl;
u64 aux;
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index 116681fde33d..89fe7b9fe26b 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -315,8 +315,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
{
struct mlxreg_fan *fan = cdev->devdata;
unsigned long cur_state;
+ int i, config = 0;
u32 regval;
- int i;
int err;
/*
@@ -329,6 +329,12 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
* overwritten.
*/
if (state >= MLXREG_FAN_SPEED_MIN && state <= MLXREG_FAN_SPEED_MAX) {
+ /*
+ * This is configuration change, which is only supported through sysfs.
+ * For configuration non-zero value is to be returned to avoid thermal
+ * statistics update.
+ */
+ config = 1;
state -= MLXREG_FAN_MAX_STATE;
for (i = 0; i < state; i++)
fan->cooling_levels[i] = state;
@@ -343,7 +349,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
cur_state = MLXREG_FAN_PWM_DUTY2STATE(regval);
if (state < cur_state)
- return 0;
+ return config;
state = cur_state;
}
@@ -359,7 +365,7 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
dev_err(fan->dev, "Failed to write PWM duty\n");
return err;
}
- return 0;
+ return config;
}
static const struct thermal_cooling_device_ops mlxreg_fan_cooling_ops = {
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index 0d68a78be980..ae664613289c 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -340,18 +340,11 @@ static ssize_t occ_show_temp_10(struct device *dev,
if (val == OCC_TEMP_SENSOR_FAULT)
return -EREMOTEIO;
- /*
- * VRM doesn't return temperature, only alarm bit. This
- * attribute maps to tempX_alarm instead of tempX_input for
- * VRM
- */
- if (temp->fru_type != OCC_FRU_TYPE_VRM) {
- /* sensor not ready */
- if (val == 0)
- return -EAGAIN;
+ /* sensor not ready */
+ if (val == 0)
+ return -EAGAIN;
- val *= 1000;
- }
+ val *= 1000;
break;
case 2:
val = temp->fru_type;
@@ -886,7 +879,7 @@ static int occ_setup_sensor_attrs(struct occ *occ)
0, i);
attr++;
- if (sensors->temp.version > 1 &&
+ if (sensors->temp.version == 2 &&
temp->fru_type == OCC_FRU_TYPE_VRM) {
snprintf(attr->name, sizeof(attr->name),
"temp%d_alarm", s);
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index df712ce4b164..53f7d1418bc9 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -171,8 +171,14 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
cmd = CFFPS_SN_CMD;
break;
case CFFPS_DEBUGFS_MAX_POWER_OUT:
- rc = i2c_smbus_read_word_swapped(psu->client,
- CFFPS_MAX_POWER_OUT_CMD);
+ if (psu->version == cffps1) {
+ rc = i2c_smbus_read_word_swapped(psu->client,
+ CFFPS_MAX_POWER_OUT_CMD);
+ } else {
+ rc = i2c_smbus_read_word_data(psu->client,
+ CFFPS_MAX_POWER_OUT_CMD);
+ }
+
if (rc < 0)
return rc;
diff --git a/drivers/hwmon/pmbus/mp2975.c b/drivers/hwmon/pmbus/mp2975.c
index eb94bd5f4e2a..51986adfbf47 100644
--- a/drivers/hwmon/pmbus/mp2975.c
+++ b/drivers/hwmon/pmbus/mp2975.c
@@ -54,7 +54,7 @@
#define MP2975_RAIL2_FUNC (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | \
PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | \
- PMBUS_PHASE_VIRTUAL)
+ PMBUS_HAVE_POUT | PMBUS_PHASE_VIRTUAL)
struct mp2975_data {
struct pmbus_driver_info info;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index ede66ea6a730..b963a369c5ab 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -100,71 +100,81 @@ struct tmp421_data {
s16 temp[4];
};
-static int temp_from_s16(s16 reg)
+static int temp_from_raw(u16 reg, bool extended)
{
/* Mask out status bits */
int temp = reg & ~0xf;
- return (temp * 1000 + 128) / 256;
-}
-
-static int temp_from_u16(u16 reg)
-{
- /* Mask out status bits */
- int temp = reg & ~0xf;
-
- /* Add offset for extended temperature range. */
- temp -= 64 * 256;
+ if (extended)
+ temp = temp - 64 * 256;
+ else
+ temp = (s16)temp;
- return (temp * 1000 + 128) / 256;
+ return DIV_ROUND_CLOSEST(temp * 1000, 256);
}
-static struct tmp421_data *tmp421_update_device(struct device *dev)
+static int tmp421_update_device(struct tmp421_data *data)
{
- struct tmp421_data *data = dev_get_drvdata(dev);
struct i2c_client *client = data->client;
+ int ret = 0;
int i;
mutex_lock(&data->update_lock);
if (time_after(jiffies, data->last_updated + (HZ / 2)) ||
!data->valid) {
- data->config = i2c_smbus_read_byte_data(client,
- TMP421_CONFIG_REG_1);
+ ret = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_1);
+ if (ret < 0)
+ goto exit;
+ data->config = ret;
for (i = 0; i < data->channels; i++) {
- data->temp[i] = i2c_smbus_read_byte_data(client,
- TMP421_TEMP_MSB[i]) << 8;
- data->temp[i] |= i2c_smbus_read_byte_data(client,
- TMP421_TEMP_LSB[i]);
+ ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]);
+ if (ret < 0)
+ goto exit;
+ data->temp[i] = ret << 8;
+
+ ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]);
+ if (ret < 0)
+ goto exit;
+ data->temp[i] |= ret;
}
data->last_updated = jiffies;
data->valid = 1;
}
+exit:
mutex_unlock(&data->update_lock);
- return data;
+ if (ret < 0) {
+ data->valid = 0;
+ return ret;
+ }
+
+ return 0;
}
static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
- struct tmp421_data *tmp421 = tmp421_update_device(dev);
+ struct tmp421_data *tmp421 = dev_get_drvdata(dev);
+ int ret = 0;
+
+ ret = tmp421_update_device(tmp421);
+ if (ret)
+ return ret;
switch (attr) {
case hwmon_temp_input:
- if (tmp421->config & TMP421_CONFIG_RANGE)
- *val = temp_from_u16(tmp421->temp[channel]);
- else
- *val = temp_from_s16(tmp421->temp[channel]);
+ *val = temp_from_raw(tmp421->temp[channel],
+ tmp421->config & TMP421_CONFIG_RANGE);
return 0;
case hwmon_temp_fault:
/*
- * The OPEN bit signals a fault. This is bit 0 of the temperature
- * register (low byte).
+ * Any of OPEN or /PVLD bits indicate a hardware mulfunction
+ * and the conversion result may be incorrect
*/
- *val = tmp421->temp[channel] & 0x01;
+ *val = !!(tmp421->temp[channel] & 0x03);
return 0;
default:
return -EOPNOTSUPP;
@@ -177,9 +187,6 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
{
switch (attr) {
case hwmon_temp_fault:
- if (channel == 0)
- return 0;
- return 0444;
case hwmon_temp_input:
return 0444;
default:
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 37b25a1474c4..3c1be2c11fdf 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -273,9 +273,6 @@ struct w83791d_data {
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
- /* array of 2 pointers to subclients */
- struct i2c_client *lm75[2];
-
/* volts */
u8 in[NUMBER_OF_VIN]; /* Register value */
u8 in_max[NUMBER_OF_VIN]; /* Register value */
@@ -1257,7 +1254,6 @@ static const struct attribute_group w83791d_group_fanpwm45 = {
static int w83791d_detect_subclients(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
- struct w83791d_data *data = i2c_get_clientdata(client);
int address = client->addr;
int i, id;
u8 val;
@@ -1280,22 +1276,19 @@ static int w83791d_detect_subclients(struct i2c_client *client)
}
val = w83791d_read(client, W83791D_REG_I2C_SUBADDR);
- if (!(val & 0x08))
- data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
- 0x48 + (val & 0x7));
- if (!(val & 0x80)) {
- if (!IS_ERR(data->lm75[0]) &&
- ((val & 0x7) == ((val >> 4) & 0x7))) {
- dev_err(&client->dev,
- "duplicate addresses 0x%x, "
- "use force_subclient\n",
- data->lm75[0]->addr);
- return -ENODEV;
- }
- data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
- 0x48 + ((val >> 4) & 0x7));
+
+ if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
+ dev_err(&client->dev,
+ "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
+ return -ENODEV;
}
+ if (!(val & 0x08))
+ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (val & 0x7));
+
+ if (!(val & 0x80))
+ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
+
return 0;
}
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index abd5c3a722b9..1f175f381350 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -264,9 +264,6 @@ struct w83792d_data {
char valid; /* !=0 if following fields are valid */
unsigned long last_updated; /* In jiffies */
- /* array of 2 pointers to subclients */
- struct i2c_client *lm75[2];
-
u8 in[9]; /* Register value */
u8 in_max[9]; /* Register value */
u8 in_min[9]; /* Register value */
@@ -927,7 +924,6 @@ w83792d_detect_subclients(struct i2c_client *new_client)
int address = new_client->addr;
u8 val;
struct i2c_adapter *adapter = new_client->adapter;
- struct w83792d_data *data = i2c_get_clientdata(new_client);
id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) {
@@ -946,21 +942,19 @@ w83792d_detect_subclients(struct i2c_client *new_client)
}
val = w83792d_read_value(new_client, W83792D_REG_I2C_SUBADDR);
- if (!(val & 0x08))
- data->lm75[0] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
- 0x48 + (val & 0x7));
- if (!(val & 0x80)) {
- if (!IS_ERR(data->lm75[0]) &&
- ((val & 0x7) == ((val >> 4) & 0x7))) {
- dev_err(&new_client->dev,
- "duplicate addresses 0x%x, use force_subclient\n",
- data->lm75[0]->addr);
- return -ENODEV;
- }
- data->lm75[1] = devm_i2c_new_dummy_device(&new_client->dev, adapter,
- 0x48 + ((val >> 4) & 0x7));
+
+ if (!(val & 0x88) && (val & 0x7) == ((val >> 4) & 0x7)) {
+ dev_err(&new_client->dev,
+ "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (val & 0x7));
+ return -ENODEV;
}
+ if (!(val & 0x08))
+ devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + (val & 0x7));
+
+ if (!(val & 0x80))
+ devm_i2c_new_dummy_device(&new_client->dev, adapter, 0x48 + ((val >> 4) & 0x7));
+
return 0;
}
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index e7d0484eabe4..1d2854de1cfc 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -202,7 +202,6 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
}
struct w83793_data {
- struct i2c_client *lm75[2];
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* !=0 if following fields are valid */
@@ -1566,7 +1565,6 @@ w83793_detect_subclients(struct i2c_client *client)
int address = client->addr;
u8 tmp;
struct i2c_adapter *adapter = client->adapter;
- struct w83793_data *data = i2c_get_clientdata(client);
id = i2c_adapter_id(adapter);
if (force_subclients[0] == id && force_subclients[1] == address) {
@@ -1586,21 +1584,19 @@ w83793_detect_subclients(struct i2c_client *client)
}
tmp = w83793_read_value(client, W83793_REG_I2C_SUBADDR);
- if (!(tmp & 0x08))
- data->lm75[0] = devm_i2c_new_dummy_device(&client->dev, adapter,
- 0x48 + (tmp & 0x7));
- if (!(tmp & 0x80)) {
- if (!IS_ERR(data->lm75[0])
- && ((tmp & 0x7) == ((tmp >> 4) & 0x7))) {
- dev_err(&client->dev,
- "duplicate addresses 0x%x, "
- "use force_subclients\n", data->lm75[0]->addr);
- return -ENODEV;
- }
- data->lm75[1] = devm_i2c_new_dummy_device(&client->dev, adapter,
- 0x48 + ((tmp >> 4) & 0x7));
+
+ if (!(tmp & 0x88) && (tmp & 0x7) == ((tmp >> 4) & 0x7)) {
+ dev_err(&client->dev,
+ "duplicate addresses 0x%x, use force_subclient\n", 0x48 + (tmp & 0x7));
+ return -ENODEV;
}
+ if (!(tmp & 0x08))
+ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + (tmp & 0x7));
+
+ if (!(tmp & 0x80))
+ devm_i2c_new_dummy_device(&client->dev, adapter, 0x48 + ((tmp >> 4) & 0x7));
+
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
index fc0760f55c53..43054568430f 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg.c
@@ -5,6 +5,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "coresight-config.h"
#include "coresight-etm-perf.h"
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index c40791baced5..704ce595542c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1746,15 +1746,16 @@ static void cma_cancel_route(struct rdma_id_private *id_priv)
}
}
-static void cma_cancel_listens(struct rdma_id_private *id_priv)
+static void _cma_cancel_listens(struct rdma_id_private *id_priv)
{
struct rdma_id_private *dev_id_priv;
+ lockdep_assert_held(&lock);
+
/*
* Remove from listen_any_list to prevent added devices from spawning
* additional listen requests.
*/
- mutex_lock(&lock);
list_del(&id_priv->list);
while (!list_empty(&id_priv->listen_list)) {
@@ -1768,6 +1769,12 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)
rdma_destroy_id(&dev_id_priv->id);
mutex_lock(&lock);
}
+}
+
+static void cma_cancel_listens(struct rdma_id_private *id_priv)
+{
+ mutex_lock(&lock);
+ _cma_cancel_listens(id_priv);
mutex_unlock(&lock);
}
@@ -1776,6 +1783,14 @@ static void cma_cancel_operation(struct rdma_id_private *id_priv,
{
switch (state) {
case RDMA_CM_ADDR_QUERY:
+ /*
+ * We can avoid doing the rdma_addr_cancel() based on state,
+ * only RDMA_CM_ADDR_QUERY has a work that could still execute.
+ * Notice that the addr_handler work could still be exiting
+ * outside this state, however due to the interaction with the
+ * handler_mutex the work is guaranteed not to touch id_priv
+ * during exit.
+ */
rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
break;
case RDMA_CM_ROUTE_QUERY:
@@ -1810,6 +1825,8 @@ static void cma_release_port(struct rdma_id_private *id_priv)
static void destroy_mc(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
+ bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
+
if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
ib_sa_free_multicast(mc->sa_mc);
@@ -1826,7 +1843,10 @@ static void destroy_mc(struct rdma_id_private *id_priv,
cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
&mgid);
- cma_igmp_send(ndev, &mgid, false);
+
+ if (!send_only)
+ cma_igmp_send(ndev, &mgid, false);
+
dev_put(ndev);
}
@@ -2574,7 +2594,7 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
return 0;
err_listen:
- list_del(&id_priv->list);
+ _cma_cancel_listens(id_priv);
mutex_unlock(&lock);
if (to_destroy)
rdma_destroy_id(&to_destroy->id);
@@ -3413,6 +3433,21 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
+ /*
+ * The FSM can return back to RDMA_CM_ADDR_BOUND after
+ * rdma_resolve_ip() is called, eg through the error
+ * path in addr_handler(). If this happens the existing
+ * request must be canceled before issuing a new one.
+ * Since canceling a request is a bit slow and this
+ * oddball path is rare, keep track once a request has
+ * been issued. The track turns out to be a permanent
+ * state since this is the only cancel as it is
+ * immediately before rdma_resolve_ip().
+ */
+ if (id_priv->used_resolve_ip)
+ rdma_addr_cancel(&id->route.addr.dev_addr);
+ else
+ id_priv->used_resolve_ip = 1;
ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
&id->route.addr.dev_addr,
timeout_ms, addr_handler,
@@ -3771,9 +3806,13 @@ int rdma_listen(struct rdma_cm_id *id, int backlog)
int ret;
if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
+ struct sockaddr_in any_in = {
+ .sin_family = AF_INET,
+ .sin_addr.s_addr = htonl(INADDR_ANY),
+ };
+
/* For a well behaved ULP state will be RDMA_CM_IDLE */
- id->route.addr.src_addr.ss_family = AF_INET;
- ret = rdma_bind_addr(id, cma_src_addr(id_priv));
+ ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
if (ret)
return ret;
if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index 5c463da99845..f92f101ea981 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -91,6 +91,7 @@ struct rdma_id_private {
u8 afonly;
u8 timeout;
u8 min_rnr_timer;
+ u8 used_resolve_ip;
enum ib_gid_type gid_type;
/*
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index e74ddbe46589..15b0cb0f363f 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -876,14 +876,14 @@ void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
struct hfi1_ipoib_txq *txq = &priv->txqs[q];
u64 completed = atomic64_read(&txq->complete_txreqs);
- dd_dev_info(priv->dd, "timeout txq %llx q %u stopped %u stops %d no_desc %d ring_full %d\n",
- (unsigned long long)txq, q,
+ dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
+ txq, q,
__netif_subqueue_stopped(dev, txq->q_idx),
atomic_read(&txq->stops),
atomic_read(&txq->no_desc),
atomic_read(&txq->ring_full));
- dd_dev_info(priv->dd, "sde %llx engine %u\n",
- (unsigned long long)txq->sde,
+ dd_dev_info(priv->dd, "sde %p engine %u\n",
+ txq->sde,
txq->sde ? txq->sde->this_idx : 0);
dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 1e9c3c5bee68..d763f097599f 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -326,19 +326,30 @@ static void set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector,
INIT_LIST_HEAD(&hr_cq->rq_list);
}
-static void set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
- struct hns_roce_ib_create_cq *ucmd)
+static int set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata,
+ struct hns_roce_ib_create_cq *ucmd)
{
struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
- if (udata) {
- if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size))
- hr_cq->cqe_size = ucmd->cqe_size;
- else
- hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
- } else {
+ if (!udata) {
hr_cq->cqe_size = hr_dev->caps.cqe_sz;
+ return 0;
+ }
+
+ if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) {
+ if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE &&
+ ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) {
+ ibdev_err(&hr_dev->ib_dev,
+ "invalid cqe size %u.\n", ucmd->cqe_size);
+ return -EINVAL;
+ }
+
+ hr_cq->cqe_size = ucmd->cqe_size;
+ } else {
+ hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE;
}
+
+ return 0;
}
int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
@@ -366,7 +377,9 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
set_cq_param(hr_cq, attr->cqe, attr->comp_vector, &ucmd);
- set_cqe_size(hr_cq, udata, &ucmd);
+ ret = set_cqe_size(hr_cq, udata, &ucmd);
+ if (ret)
+ return ret;
ret = alloc_cq_buf(hr_dev, hr_cq, udata, ucmd.buf_addr);
if (ret) {
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 5b9953105752..d5f3faa1627a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -3299,7 +3299,7 @@ static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
dest = get_cqe_v2(hr_cq, (prod_index + nfreed) &
hr_cq->ib_cq.cqe);
owner_bit = hr_reg_read(dest, CQE_OWNER);
- memcpy(dest, cqe, sizeof(*cqe));
+ memcpy(dest, cqe, hr_cq->cqe_size);
hr_reg_write(dest, CQE_OWNER, owner_bit);
}
}
@@ -4397,7 +4397,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_qp->path_mtu = ib_mtu;
mtu = ib_mtu_enum_to_int(ib_mtu);
- if (WARN_ON(mtu < 0))
+ if (WARN_ON(mtu <= 0))
+ return -EINVAL;
+#define MAX_LP_MSG_LEN 65536
+ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
+ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
+ if (WARN_ON(lp_pktn_ini >= 0xF))
return -EINVAL;
if (attr_mask & IB_QP_PATH_MTU) {
@@ -4405,10 +4410,6 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
hr_reg_clear(qpc_mask, QPC_MTU);
}
-#define MAX_LP_MSG_LEN 65536
- /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
- lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
-
hr_reg_write(context, QPC_LP_PKTN_INI, lp_pktn_ini);
hr_reg_clear(qpc_mask, QPC_LP_PKTN_INI);
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
index 6b62299abfbb..6dea0a49d171 100644
--- a/drivers/infiniband/hw/irdma/cm.c
+++ b/drivers/infiniband/hw/irdma/cm.c
@@ -3496,7 +3496,7 @@ static void irdma_cm_disconn_true(struct irdma_qp *iwqp)
original_hw_tcp_state == IRDMA_TCP_STATE_TIME_WAIT ||
last_ae == IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE ||
last_ae == IRDMA_AE_BAD_CLOSE ||
- last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->reset)) {
+ last_ae == IRDMA_AE_LLP_CONNECTION_RESET || iwdev->rf->reset)) {
issue_close = 1;
iwqp->cm_id = NULL;
qp->term_flags = 0;
@@ -4250,7 +4250,7 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
teardown_entry);
attr.qp_state = IB_QPS_ERR;
irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
- if (iwdev->reset)
+ if (iwdev->rf->reset)
irdma_cm_disconn(cm_node->iwqp);
irdma_rem_ref_cm_node(cm_node);
}
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 00de5ee9a260..7de525a5ccf8 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -176,6 +176,14 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp,
case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR:
qp->flush_code = FLUSH_GENERAL_ERR;
break;
+ case IRDMA_AE_LLP_TOO_MANY_RETRIES:
+ qp->flush_code = FLUSH_RETRY_EXC_ERR;
+ break;
+ case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS:
+ case IRDMA_AE_AMP_MWBIND_BIND_DISABLED:
+ case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS:
+ qp->flush_code = FLUSH_MW_BIND_ERR;
+ break;
default:
qp->flush_code = FLUSH_FATAL_ERR;
break;
@@ -1489,7 +1497,7 @@ void irdma_reinitialize_ieq(struct irdma_sc_vsi *vsi)
irdma_puda_dele_rsrc(vsi, IRDMA_PUDA_RSRC_TYPE_IEQ, false);
if (irdma_initialize_ieq(iwdev)) {
- iwdev->reset = true;
+ iwdev->rf->reset = true;
rf->gen_ops.request_reset(rf);
}
}
@@ -1632,13 +1640,13 @@ void irdma_rt_deinit_hw(struct irdma_device *iwdev)
case IEQ_CREATED:
if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi, IRDMA_PUDA_RSRC_TYPE_IEQ,
- iwdev->reset);
+ iwdev->rf->reset);
fallthrough;
case ILQ_CREATED:
if (!iwdev->roce_mode)
irdma_puda_dele_rsrc(&iwdev->vsi,
IRDMA_PUDA_RSRC_TYPE_ILQ,
- iwdev->reset);
+ iwdev->rf->reset);
break;
default:
ibdev_warn(&iwdev->ibdev, "bad init_state = %d\n", iwdev->init_state);
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
index bddf88194d09..d219f64b2c3d 100644
--- a/drivers/infiniband/hw/irdma/i40iw_if.c
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -55,7 +55,7 @@ static void i40iw_close(struct i40e_info *cdev_info, struct i40e_client *client,
iwdev = to_iwdev(ibdev);
if (reset)
- iwdev->reset = true;
+ iwdev->rf->reset = true;
iwdev->iw_status = 0;
irdma_port_ibevent(iwdev);
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index 743d9e143a99..b678fe712447 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -346,7 +346,6 @@ struct irdma_device {
bool roce_mode:1;
bool roce_dcqcn_en:1;
bool dcb:1;
- bool reset:1;
bool iw_ooo:1;
enum init_completion_state init_state;
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index ff705f323233..3dcbb1fbf2c6 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -102,6 +102,8 @@ enum irdma_flush_opcode {
FLUSH_REM_OP_ERR,
FLUSH_LOC_LEN_ERR,
FLUSH_FATAL_ERR,
+ FLUSH_RETRY_EXC_ERR,
+ FLUSH_MW_BIND_ERR,
};
enum irdma_cmpl_status {
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index e94470991fe0..ac91ea5296db 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -2507,7 +2507,7 @@ void irdma_modify_qp_to_err(struct irdma_sc_qp *sc_qp)
struct irdma_qp *qp = sc_qp->qp_uk.back_qp;
struct ib_qp_attr attr;
- if (qp->iwdev->reset)
+ if (qp->iwdev->rf->reset)
return;
attr.qp_state = IB_QPS_ERR;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 4fc323402073..7110ebf834f9 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -535,8 +535,7 @@ static int irdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
irdma_qp_rem_ref(&iwqp->ibqp);
wait_for_completion(&iwqp->free_qp);
irdma_free_lsmm_rsrc(iwqp);
- if (!iwdev->reset)
- irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
+ irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
if (!iwqp->user_mode) {
if (iwqp->iwscq) {
@@ -2035,7 +2034,7 @@ static int irdma_create_cq(struct ib_cq *ibcq,
/* Kmode allocations */
int rsize;
- if (entries > rf->max_cqe) {
+ if (entries < 1 || entries > rf->max_cqe) {
err_code = -EINVAL;
goto cq_free_rsrc;
}
@@ -3353,6 +3352,10 @@ static enum ib_wc_status irdma_flush_err_to_ib_wc_status(enum irdma_flush_opcode
return IB_WC_LOC_LEN_ERR;
case FLUSH_GENERAL_ERR:
return IB_WC_WR_FLUSH_ERR;
+ case FLUSH_RETRY_EXC_ERR:
+ return IB_WC_RETRY_EXC_ERR;
+ case FLUSH_MW_BIND_ERR:
+ return IB_WC_MW_BIND_ERR;
case FLUSH_FATAL_ERR:
default:
return IB_WC_FATAL_ERR;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 452e2355d24e..0a3b28142c05 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -403,7 +403,7 @@ static ssize_t diagc_attr_store(struct ib_device *ibdev, u32 port_num,
}
#define QIB_DIAGC_ATTR(N) \
- static_assert(&((struct qib_ibport *)0)->rvp.n_##N != (u64 *)NULL); \
+ static_assert(__same_type(((struct qib_ibport *)0)->rvp.n_##N, u64)); \
static struct qib_diagc_attr qib_diagc_attr_##N = { \
.attr = __ATTR(N, 0664, diagc_attr_show, diagc_attr_store), \
.counter = \
diff --git a/drivers/infiniband/hw/usnic/usnic_ib.h b/drivers/infiniband/hw/usnic/usnic_ib.h
index 84dd682d2334..b350081aeb5a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib.h
@@ -90,7 +90,7 @@ struct usnic_ib_dev {
struct usnic_ib_vf {
struct usnic_ib_dev *pf;
- spinlock_t lock;
+ struct mutex lock;
struct usnic_vnic *vnic;
unsigned int qp_grp_ref_cnt;
struct usnic_ib_pd *pd;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index 228e9a36dad0..d346dd48e731 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -572,7 +572,7 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
}
vf->pf = pf;
- spin_lock_init(&vf->lock);
+ mutex_init(&vf->lock);
mutex_lock(&pf->usdev_lock);
list_add_tail(&vf->link, &pf->vf_dev_list);
/*
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 06a4e9d4545d..756a83bcff58 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -196,7 +196,7 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
for (i = 0; dev_list[i]; i++) {
dev = dev_list[i];
vf = dev_get_drvdata(dev);
- spin_lock(&vf->lock);
+ mutex_lock(&vf->lock);
vnic = vf->vnic;
if (!usnic_vnic_check_room(vnic, res_spec)) {
usnic_dbg("Found used vnic %s from %s\n",
@@ -208,10 +208,10 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
vf, pd, res_spec,
trans_spec);
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
goto qp_grp_check;
}
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
}
usnic_uiom_free_dev_list(dev_list);
@@ -220,7 +220,7 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
/* Try to find resources on an unused vf */
list_for_each_entry(vf, &us_ibdev->vf_dev_list, link) {
- spin_lock(&vf->lock);
+ mutex_lock(&vf->lock);
vnic = vf->vnic;
if (vf->qp_grp_ref_cnt == 0 &&
usnic_vnic_check_room(vnic, res_spec) == 0) {
@@ -228,10 +228,10 @@ find_free_vf_and_create_qp_grp(struct ib_qp *qp,
vf, pd, res_spec,
trans_spec);
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
goto qp_grp_check;
}
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
}
usnic_info("No free qp grp found on %s\n",
@@ -253,9 +253,9 @@ static void qp_grp_destroy(struct usnic_ib_qp_grp *qp_grp)
WARN_ON(qp_grp->state != IB_QPS_RESET);
- spin_lock(&vf->lock);
+ mutex_lock(&vf->lock);
usnic_ib_qp_grp_destroy(qp_grp);
- spin_unlock(&vf->lock);
+ mutex_unlock(&vf->lock);
}
static int create_qp_validate_user_data(struct usnic_ib_create_qp_cmd cmd)
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index 632dbdd21915..fb23a5b780a4 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -44,9 +44,9 @@
#define NOC_PERM_MODE_BYPASS (1 << NOC_QOS_MODE_BYPASS)
#define NOC_QOS_PRIORITYn_ADDR(n) (0x8 + (n * 0x1000))
-#define NOC_QOS_PRIORITY_MASK 0xf
+#define NOC_QOS_PRIORITY_P1_MASK 0xc
+#define NOC_QOS_PRIORITY_P0_MASK 0x3
#define NOC_QOS_PRIORITY_P1_SHIFT 0x2
-#define NOC_QOS_PRIORITY_P0_SHIFT 0x3
#define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
#define NOC_QOS_MODEn_MASK 0x3
@@ -173,6 +173,16 @@ static const struct clk_bulk_data bus_mm_clocks[] = {
{ .id = "iface" },
};
+static const struct clk_bulk_data bus_a2noc_clocks[] = {
+ { .id = "bus" },
+ { .id = "bus_a" },
+ { .id = "ipa" },
+ { .id = "ufs_axi" },
+ { .id = "aggre2_ufs_axi" },
+ { .id = "aggre2_usb3_axi" },
+ { .id = "cfg_noc_usb2_axi" },
+};
+
/**
* struct qcom_icc_provider - Qualcomm specific interconnect provider
* @provider: generic interconnect provider
@@ -307,7 +317,7 @@ DEFINE_QNODE(slv_bimc_cfg, SDM660_SLAVE_BIMC_CFG, 4, -1, 56, true, -1, 0, -1, 0)
DEFINE_QNODE(slv_prng, SDM660_SLAVE_PRNG, 4, -1, 44, true, -1, 0, -1, 0);
DEFINE_QNODE(slv_spdm, SDM660_SLAVE_SPDM, 4, -1, 60, true, -1, 0, -1, 0);
DEFINE_QNODE(slv_qdss_cfg, SDM660_SLAVE_QDSS_CFG, 4, -1, 63, true, -1, 0, -1, 0);
-DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_BLSP_1, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
+DEFINE_QNODE(slv_cnoc_mnoc_cfg, SDM660_SLAVE_CNOC_MNOC_CFG, 4, -1, 66, true, -1, 0, -1, SDM660_MASTER_CNOC_MNOC_CFG);
DEFINE_QNODE(slv_snoc_cfg, SDM660_SLAVE_SNOC_CFG, 4, -1, 70, true, -1, 0, -1, 0);
DEFINE_QNODE(slv_qm_cfg, SDM660_SLAVE_QM_CFG, 4, -1, 212, true, -1, 0, -1, 0);
DEFINE_QNODE(slv_clk_ctl, SDM660_SLAVE_CLK_CTL, 4, -1, 47, true, -1, 0, -1, 0);
@@ -624,13 +634,12 @@ static int qcom_icc_noc_set_qos_priority(struct regmap *rmap,
/* Must be updated one at a time, P1 first, P0 last */
val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
rc = regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
- NOC_QOS_PRIORITY_MASK, val);
+ NOC_QOS_PRIORITY_P1_MASK, val);
if (rc)
return rc;
- val = qos->prio_level << NOC_QOS_PRIORITY_P0_SHIFT;
return regmap_update_bits(rmap, NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
- NOC_QOS_PRIORITY_MASK, val);
+ NOC_QOS_PRIORITY_P0_MASK, qos->prio_level);
}
static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
@@ -810,6 +819,10 @@ static int qnoc_probe(struct platform_device *pdev)
qp->bus_clks = devm_kmemdup(dev, bus_mm_clocks,
sizeof(bus_mm_clocks), GFP_KERNEL);
qp->num_clks = ARRAY_SIZE(bus_mm_clocks);
+ } else if (of_device_is_compatible(dev->of_node, "qcom,sdm660-a2noc")) {
+ qp->bus_clks = devm_kmemdup(dev, bus_a2noc_clocks,
+ sizeof(bus_a2noc_clocks), GFP_KERNEL);
+ qp->num_clks = ARRAY_SIZE(bus_a2noc_clocks);
} else {
if (of_device_is_compatible(dev->of_node, "qcom,sdm660-bimc"))
qp->is_bimc_node = true;
diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c
index 559db9259e65..fdfa39ec2a4d 100644
--- a/drivers/iommu/apple-dart.c
+++ b/drivers/iommu/apple-dart.c
@@ -183,7 +183,6 @@ struct apple_dart_master_cfg {
static struct platform_driver apple_dart_driver;
static const struct iommu_ops apple_dart_iommu_ops;
-static const struct iommu_flush_ops apple_dart_tlb_ops;
static struct apple_dart_domain *to_dart_domain(struct iommu_domain *dom)
{
@@ -338,22 +337,6 @@ static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
apple_dart_domain_flush_tlb(to_dart_domain(domain));
}
-static void apple_dart_tlb_flush_all(void *cookie)
-{
- apple_dart_domain_flush_tlb(cookie);
-}
-
-static void apple_dart_tlb_flush_walk(unsigned long iova, size_t size,
- size_t granule, void *cookie)
-{
- apple_dart_domain_flush_tlb(cookie);
-}
-
-static const struct iommu_flush_ops apple_dart_tlb_ops = {
- .tlb_flush_all = apple_dart_tlb_flush_all,
- .tlb_flush_walk = apple_dart_tlb_flush_walk,
-};
-
static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
@@ -435,7 +418,6 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
.ias = 32,
.oas = 36,
.coherent_walk = 1,
- .tlb = &apple_dart_tlb_ops,
.iommu_dev = dart->dev,
};
@@ -661,16 +643,34 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
return -EINVAL;
}
+static DEFINE_MUTEX(apple_dart_groups_lock);
+
+static void apple_dart_release_group(void *iommu_data)
+{
+ int i, sid;
+ struct apple_dart_stream_map *stream_map;
+ struct apple_dart_master_cfg *group_master_cfg = iommu_data;
+
+ mutex_lock(&apple_dart_groups_lock);
+
+ for_each_stream_map(i, group_master_cfg, stream_map)
+ for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
+ stream_map->dart->sid2group[sid] = NULL;
+
+ kfree(iommu_data);
+ mutex_unlock(&apple_dart_groups_lock);
+}
+
static struct iommu_group *apple_dart_device_group(struct device *dev)
{
- static DEFINE_MUTEX(lock);
int i, sid;
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct apple_dart_stream_map *stream_map;
+ struct apple_dart_master_cfg *group_master_cfg;
struct iommu_group *group = NULL;
struct iommu_group *res = ERR_PTR(-EINVAL);
- mutex_lock(&lock);
+ mutex_lock(&apple_dart_groups_lock);
for_each_stream_map(i, cfg, stream_map) {
for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS) {
@@ -698,6 +698,20 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
#endif
group = generic_device_group(dev);
+ res = ERR_PTR(-ENOMEM);
+ if (!group)
+ goto out;
+
+ group_master_cfg = kzalloc(sizeof(*group_master_cfg), GFP_KERNEL);
+ if (!group_master_cfg) {
+ iommu_group_put(group);
+ goto out;
+ }
+
+ memcpy(group_master_cfg, cfg, sizeof(*group_master_cfg));
+ iommu_group_set_iommudata(group, group_master_cfg,
+ apple_dart_release_group);
+
for_each_stream_map(i, cfg, stream_map)
for_each_set_bit(sid, &stream_map->sidmap, DART_MAX_STREAMS)
stream_map->dart->sid2group[sid] = group;
@@ -705,7 +719,7 @@ static struct iommu_group *apple_dart_device_group(struct device *dev)
res = group;
out:
- mutex_unlock(&lock);
+ mutex_unlock(&apple_dart_groups_lock);
return res;
}
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 0ec5514c9980..b7708b93f3fa 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1942,18 +1942,18 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
reason = dmar_get_fault_reason(fault_reason, &fault_type);
if (fault_type == INTR_REMAP)
- pr_err("[INTR-REMAP] Request device [0x%02x:0x%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
+ pr_err("[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n",
source_id >> 8, PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr >> 48,
fault_reason, reason);
else if (pasid == INVALID_IOASID)
- pr_err("[%s NO_PASID] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
+ pr_err("[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
type ? "DMA Read" : "DMA Write",
source_id >> 8, PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr,
fault_reason, reason);
else
- pr_err("[%s PASID 0x%x] Request device [0x%02x:0x%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
+ pr_err("[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n",
type ? "DMA Read" : "DMA Write", pasid,
source_id >> 8, PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr,
diff --git a/drivers/ipack/devices/ipoctal.c b/drivers/ipack/devices/ipoctal.c
index c14e65a5d38f..c709861198e5 100644
--- a/drivers/ipack/devices/ipoctal.c
+++ b/drivers/ipack/devices/ipoctal.c
@@ -33,6 +33,7 @@ struct ipoctal_channel {
unsigned int pointer_read;
unsigned int pointer_write;
struct tty_port tty_port;
+ bool tty_registered;
union scc2698_channel __iomem *regs;
union scc2698_block __iomem *block_regs;
unsigned int board_id;
@@ -81,22 +82,34 @@ static int ipoctal_port_activate(struct tty_port *port, struct tty_struct *tty)
return 0;
}
-static int ipoctal_open(struct tty_struct *tty, struct file *file)
+static int ipoctal_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct ipoctal_channel *channel = dev_get_drvdata(tty->dev);
struct ipoctal *ipoctal = chan_to_ipoctal(channel, tty->index);
- int err;
-
- tty->driver_data = channel;
+ int res;
if (!ipack_get_carrier(ipoctal->dev))
return -EBUSY;
- err = tty_port_open(&channel->tty_port, tty, file);
- if (err)
- ipack_put_carrier(ipoctal->dev);
+ res = tty_standard_install(driver, tty);
+ if (res)
+ goto err_put_carrier;
+
+ tty->driver_data = channel;
+
+ return 0;
+
+err_put_carrier:
+ ipack_put_carrier(ipoctal->dev);
+
+ return res;
+}
+
+static int ipoctal_open(struct tty_struct *tty, struct file *file)
+{
+ struct ipoctal_channel *channel = tty->driver_data;
- return err;
+ return tty_port_open(&channel->tty_port, tty, file);
}
static void ipoctal_reset_stats(struct ipoctal_stats *stats)
@@ -264,7 +277,6 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
int res;
int i;
struct tty_driver *tty;
- char name[20];
struct ipoctal_channel *channel;
struct ipack_region *region;
void __iomem *addr;
@@ -355,8 +367,11 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
/* Fill struct tty_driver with ipoctal data */
tty->owner = THIS_MODULE;
tty->driver_name = KBUILD_MODNAME;
- sprintf(name, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
- tty->name = name;
+ tty->name = kasprintf(GFP_KERNEL, KBUILD_MODNAME ".%d.%d.", bus_nr, slot);
+ if (!tty->name) {
+ res = -ENOMEM;
+ goto err_put_driver;
+ }
tty->major = 0;
tty->minor_start = 0;
@@ -371,8 +386,7 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
res = tty_register_driver(tty);
if (res) {
dev_err(&ipoctal->dev->dev, "Can't register tty driver.\n");
- tty_driver_kref_put(tty);
- return res;
+ goto err_free_name;
}
/* Save struct tty_driver for use it when uninstalling the device */
@@ -383,7 +397,9 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
channel = &ipoctal->channel[i];
tty_port_init(&channel->tty_port);
- tty_port_alloc_xmit_buf(&channel->tty_port);
+ res = tty_port_alloc_xmit_buf(&channel->tty_port);
+ if (res)
+ continue;
channel->tty_port.ops = &ipoctal_tty_port_ops;
ipoctal_reset_stats(&channel->stats);
@@ -391,13 +407,15 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
spin_lock_init(&channel->lock);
channel->pointer_read = 0;
channel->pointer_write = 0;
- tty_dev = tty_port_register_device(&channel->tty_port, tty, i, NULL);
+ tty_dev = tty_port_register_device_attr(&channel->tty_port, tty,
+ i, NULL, channel, NULL);
if (IS_ERR(tty_dev)) {
dev_err(&ipoctal->dev->dev, "Failed to register tty device.\n");
+ tty_port_free_xmit_buf(&channel->tty_port);
tty_port_destroy(&channel->tty_port);
continue;
}
- dev_set_drvdata(tty_dev, channel);
+ channel->tty_registered = true;
}
/*
@@ -409,6 +427,13 @@ static int ipoctal_inst_slot(struct ipoctal *ipoctal, unsigned int bus_nr,
ipoctal_irq_handler, ipoctal);
return 0;
+
+err_free_name:
+ kfree(tty->name);
+err_put_driver:
+ tty_driver_kref_put(tty);
+
+ return res;
}
static inline int ipoctal_copy_write_buffer(struct ipoctal_channel *channel,
@@ -648,6 +673,7 @@ static void ipoctal_cleanup(struct tty_struct *tty)
static const struct tty_operations ipoctal_fops = {
.ioctl = NULL,
+ .install = ipoctal_install,
.open = ipoctal_open,
.close = ipoctal_close,
.write = ipoctal_write_tty,
@@ -690,12 +716,17 @@ static void __ipoctal_remove(struct ipoctal *ipoctal)
for (i = 0; i < NR_CHANNELS; i++) {
struct ipoctal_channel *channel = &ipoctal->channel[i];
+
+ if (!channel->tty_registered)
+ continue;
+
tty_unregister_device(ipoctal->tty_drv, i);
tty_port_free_xmit_buf(&channel->tty_port);
tty_port_destroy(&channel->tty_port);
}
tty_unregister_driver(ipoctal->tty_drv);
+ kfree(ipoctal->tty_drv->name);
tty_driver_kref_put(ipoctal->tty_drv);
kfree(ipoctal);
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 4d5924e9f766..aca7b595c4c7 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -409,6 +409,7 @@ config MESON_IRQ_GPIO
config GOLDFISH_PIC
bool "Goldfish programmable interrupt controller"
depends on MIPS && (GOLDFISH || COMPILE_TEST)
+ select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
help
Say yes here to enable Goldfish interrupt controller driver used
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 7557ab551295..53e0fb0562c1 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -359,16 +359,16 @@ static void armada_370_xp_ipi_send_mask(struct irq_data *d,
ARMADA_370_XP_SW_TRIG_INT_OFFS);
}
-static void armada_370_xp_ipi_eoi(struct irq_data *d)
+static void armada_370_xp_ipi_ack(struct irq_data *d)
{
writel(~BIT(d->hwirq), per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
}
static struct irq_chip ipi_irqchip = {
.name = "IPI",
+ .irq_ack = armada_370_xp_ipi_ack,
.irq_mask = armada_370_xp_ipi_mask,
.irq_unmask = armada_370_xp_ipi_unmask,
- .irq_eoi = armada_370_xp_ipi_eoi,
.ipi_send_mask = armada_370_xp_ipi_send_mask,
};
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 7f40dca8cda5..eb0882d15366 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4501,7 +4501,7 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
if (err) {
if (i > 0)
- its_vpe_irq_domain_free(domain, virq, i - 1);
+ its_vpe_irq_domain_free(domain, virq, i);
its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index d329ec3d64d8..5f22c9d65e57 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -107,6 +107,8 @@ static DEFINE_RAW_SPINLOCK(cpu_map_lock);
#endif
+static DEFINE_STATIC_KEY_FALSE(needs_rmw_access);
+
/*
* The GIC mapping of CPU interfaces does not necessarily match
* the logical CPU numbering. Let's use a mapping as returned
@@ -774,6 +776,25 @@ static int gic_pm_init(struct gic_chip_data *gic)
#endif
#ifdef CONFIG_SMP
+static void rmw_writeb(u8 bval, void __iomem *addr)
+{
+ static DEFINE_RAW_SPINLOCK(rmw_lock);
+ unsigned long offset = (unsigned long)addr & 3UL;
+ unsigned long shift = offset * 8;
+ unsigned long flags;
+ u32 val;
+
+ raw_spin_lock_irqsave(&rmw_lock, flags);
+
+ addr -= offset;
+ val = readl_relaxed(addr);
+ val &= ~GENMASK(shift + 7, shift);
+ val |= bval << shift;
+ writel_relaxed(val, addr);
+
+ raw_spin_unlock_irqrestore(&rmw_lock, flags);
+}
+
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
@@ -788,7 +809,10 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
- writeb_relaxed(gic_cpu_map[cpu], reg);
+ if (static_branch_unlikely(&needs_rmw_access))
+ rmw_writeb(gic_cpu_map[cpu], reg);
+ else
+ writeb_relaxed(gic_cpu_map[cpu], reg);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE;
@@ -1375,6 +1399,30 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
return true;
}
+static bool gic_enable_rmw_access(void *data)
+{
+ /*
+ * The EMEV2 class of machines has a broken interconnect, and
+ * locks up on accesses that are less than 32bit. So far, only
+ * the affinity setting requires it.
+ */
+ if (of_machine_is_compatible("renesas,emev2")) {
+ static_branch_enable(&needs_rmw_access);
+ return true;
+ }
+
+ return false;
+}
+
+static const struct gic_quirk gic_quirks[] = {
+ {
+ .desc = "broken byte access",
+ .compatible = "arm,pl390",
+ .init = gic_enable_rmw_access,
+ },
+ { },
+};
+
static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
{
if (!gic || !node)
@@ -1391,6 +1439,8 @@ static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
gic->percpu_offset = 0;
+ gic_enable_of_quirks(node, gic_quirks, gic);
+
return 0;
error:
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index f565317a3da3..12df2162108e 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -25,7 +25,7 @@
/* The maximum IRQ pin number of mbigen chip(start from 0) */
#define MAXIMUM_IRQ_PIN_NUM 1407
-/**
+/*
* In mbigen vector register
* bit[21:12]: event id value
* bit[11:0]: device id
@@ -39,14 +39,14 @@
/* offset of vector register in mbigen node */
#define REG_MBIGEN_VEC_OFFSET 0x200
-/**
+/*
* offset of clear register in mbigen node
* This register is used to clear the status
* of interrupt
*/
#define REG_MBIGEN_CLEAR_OFFSET 0xa000
-/**
+/*
* offset of interrupt type register
* This register is used to configure interrupt
* trigger type
diff --git a/drivers/irqchip/irq-renesas-rza1.c b/drivers/irqchip/irq-renesas-rza1.c
index b0d46ac42b89..72c06e883d1c 100644
--- a/drivers/irqchip/irq-renesas-rza1.c
+++ b/drivers/irqchip/irq-renesas-rza1.c
@@ -223,12 +223,12 @@ static int rza1_irqc_probe(struct platform_device *pdev)
goto out_put_node;
}
- priv->chip.name = "rza1-irqc",
- priv->chip.irq_mask = irq_chip_mask_parent,
- priv->chip.irq_unmask = irq_chip_unmask_parent,
- priv->chip.irq_eoi = rza1_irqc_eoi,
- priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy,
- priv->chip.irq_set_type = rza1_irqc_set_type,
+ priv->chip.name = "rza1-irqc";
+ priv->chip.irq_mask = irq_chip_mask_parent;
+ priv->chip.irq_unmask = irq_chip_unmask_parent;
+ priv->chip.irq_eoi = rza1_irqc_eoi;
+ priv->chip.irq_retrigger = irq_chip_retrigger_hierarchy;
+ priv->chip.irq_set_type = rza1_irqc_set_type;
priv->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
priv->irq_domain = irq_domain_add_hierarchy(parent, 0, IRQC_NUM_IRQ,
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index edf4ee6eff25..cf128b3471d7 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -275,8 +275,8 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
bus_nr = ida_simple_get(&mcb_ida, 0, 0, GFP_KERNEL);
if (bus_nr < 0) {
- rc = bus_nr;
- goto err_free;
+ kfree(bus);
+ return ERR_PTR(bus_nr);
}
bus->bus_nr = bus_nr;
@@ -291,12 +291,12 @@ struct mcb_bus *mcb_alloc_bus(struct device *carrier)
dev_set_name(&bus->dev, "mcb:%d", bus_nr);
rc = device_add(&bus->dev);
if (rc)
- goto err_free;
+ goto err_put;
return bus;
-err_free:
- put_device(carrier);
- kfree(bus);
+
+err_put:
+ put_device(&bus->dev);
return ERR_PTR(rc);
}
EXPORT_SYMBOL_NS_GPL(mcb_alloc_bus, MCB);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ae8fe54ea358..6c0c3d0d905a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5700,10 +5700,6 @@ static int md_alloc(dev_t dev, char *name)
disk->flags |= GENHD_FL_EXT_DEVT;
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
- /* As soon as we call add_disk(), another thread could get
- * through to md_open, so make sure it doesn't get too far
- */
- mutex_lock(&mddev->open_mutex);
add_disk(disk);
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
@@ -5718,7 +5714,6 @@ static int md_alloc(dev_t dev, char *name)
if (mddev->kobj.sd &&
sysfs_create_group(&mddev->kobj, &md_bitmap_group))
pr_debug("pointless warning\n");
- mutex_unlock(&mddev->open_mutex);
abort:
mutex_unlock(&disks_mutex);
if (!error && mddev->kobj.sd) {
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index d402e456f27d..7d0ab19c38bb 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1140,8 +1140,8 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
continue;
length = 0;
switch (c) {
- /* SOF0: baseline JPEG */
- case SOF0:
+ /* JPEG_MARKER_SOF0: baseline JPEG */
+ case JPEG_MARKER_SOF0:
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
@@ -1172,7 +1172,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
notfound = 0;
break;
- case DQT:
+ case JPEG_MARKER_DQT:
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
@@ -1185,7 +1185,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
skip(&jpeg_buffer, length);
break;
- case DHT:
+ case JPEG_MARKER_DHT:
if (get_word_be(&jpeg_buffer, &word))
break;
length = (long)word - 2;
@@ -1198,15 +1198,15 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
skip(&jpeg_buffer, length);
break;
- case SOS:
+ case JPEG_MARKER_SOS:
sos = jpeg_buffer.curr - 2; /* 0xffda */
break;
/* skip payload-less markers */
- case RST ... RST + 7:
- case SOI:
- case EOI:
- case TEM:
+ case JPEG_MARKER_RST ... JPEG_MARKER_RST + 7:
+ case JPEG_MARKER_SOI:
+ case JPEG_MARKER_EOI:
+ case JPEG_MARKER_TEM:
break;
/* skip uninteresting payload markers */
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.h b/drivers/media/platform/s5p-jpeg/jpeg-core.h
index a77d93c098ce..8473a019bb5f 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.h
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.h
@@ -37,15 +37,15 @@
#define EXYNOS3250_IRQ_TIMEOUT 0x10000000
/* a selection of JPEG markers */
-#define TEM 0x01
-#define SOF0 0xc0
-#define DHT 0xc4
-#define RST 0xd0
-#define SOI 0xd8
-#define EOI 0xd9
-#define SOS 0xda
-#define DQT 0xdb
-#define DHP 0xde
+#define JPEG_MARKER_TEM 0x01
+#define JPEG_MARKER_SOF0 0xc0
+#define JPEG_MARKER_DHT 0xc4
+#define JPEG_MARKER_RST 0xd0
+#define JPEG_MARKER_SOI 0xd8
+#define JPEG_MARKER_EOI 0xd9
+#define JPEG_MARKER_SOS 0xda
+#define JPEG_MARKER_DQT 0xdb
+#define JPEG_MARKER_DHP 0xde
/* Flags that indicate a format can be used for capture/output */
#define SJPEG_FMT_FLAG_ENC_CAPTURE (1 << 0)
@@ -187,11 +187,11 @@ struct s5p_jpeg_marker {
* @fmt: driver-specific format of this queue
* @w: image width
* @h: image height
- * @sos: SOS marker's position relative to the buffer beginning
- * @dht: DHT markers' positions relative to the buffer beginning
- * @dqt: DQT markers' positions relative to the buffer beginning
- * @sof: SOF0 marker's position relative to the buffer beginning
- * @sof_len: SOF0 marker's payload length (without length field itself)
+ * @sos: JPEG_MARKER_SOS's position relative to the buffer beginning
+ * @dht: JPEG_MARKER_DHT' positions relative to the buffer beginning
+ * @dqt: JPEG_MARKER_DQT' positions relative to the buffer beginning
+ * @sof: JPEG_MARKER_SOF0's position relative to the buffer beginning
+ * @sof_len: JPEG_MARKER_SOF0's payload length (without length field itself)
* @size: image buffer size in bytes
*/
struct s5p_jpeg_q_data {
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 3e729a17b35f..48d52baec1a1 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -24,6 +24,7 @@ static const u8 COMMAND_VERSION[] = { 'v' };
// End transmit and repeat reset command so we exit sump mode
static const u8 COMMAND_RESET[] = { 0xff, 0xff, 0, 0, 0, 0, 0 };
static const u8 COMMAND_SMODE_ENTER[] = { 's' };
+static const u8 COMMAND_SMODE_EXIT[] = { 0 };
static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
#define REPLY_XMITCOUNT 't'
@@ -309,12 +310,30 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
buf[i] = cpu_to_be16(v);
}
- buf[count] = cpu_to_be16(0xffff);
+ buf[count] = 0xffff;
irtoy->tx_buf = buf;
irtoy->tx_len = size;
irtoy->emitted = 0;
+ // There is an issue where if the unit is receiving IR while the
+ // first TXSTART command is sent, the device might end up hanging
+ // with its led on. It does not respond to any command when this
+ // happens. To work around this, re-enter sample mode.
+ err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
+ sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
+ if (err) {
+ dev_err(irtoy->dev, "exit sample mode: %d\n", err);
+ return err;
+ }
+
+ err = irtoy_command(irtoy, COMMAND_SMODE_ENTER,
+ sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND);
+ if (err) {
+ dev_err(irtoy->dev, "enter sample mode: %d\n", err);
+ return err;
+ }
+
err = irtoy_command(irtoy, COMMAND_TXSTART, sizeof(COMMAND_TXSTART),
STATE_TX);
kfree(buf);
diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
index 1b6076a89ca6..6669625ba4c8 100644
--- a/drivers/misc/bcm-vk/bcm_vk_tty.c
+++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
@@ -267,13 +267,13 @@ int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
struct device *tty_dev;
tty_port_init(&vk->tty[i].port);
- tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv,
- i, dev);
+ tty_dev = tty_port_register_device_attr(&vk->tty[i].port,
+ tty_drv, i, dev, vk,
+ NULL);
if (IS_ERR(tty_dev)) {
err = PTR_ERR(tty_dev);
goto unwind;
}
- dev_set_drvdata(tty_dev, vk);
vk->tty[i].is_opened = false;
}
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 2e1befbd1ad9..693981891870 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -1090,7 +1090,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
/* check for 64-bit DMA address supported (DAC) */
/* check for 32-bit DMA address supported (SAC) */
- if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) ||
+ if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
dev_err(&pci_dev->dev,
"err: neither DMA32 nor DMA64 supported\n");
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 7b0516cf808b..91b57544f7c6 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -405,7 +405,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
{
bool next_entry_found = false;
- struct hl_cs *next;
+ struct hl_cs *next, *first_cs;
if (!cs_needs_timeout(cs))
return;
@@ -415,9 +415,16 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
/* We need to handle tdr only once for the complete staged submission.
* Hence, we choose the CS that reaches this function first which is
* the CS marked as 'staged_last'.
+ * In case single staged cs was submitted which has both first and last
+ * indications, then "cs_find_first" below will return NULL, since we
+ * removed the cs node from the list before getting here,
+ * in such cases just continue with the cs to cancel it's TDR work.
*/
- if (cs->staged_cs && cs->staged_last)
- cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+ if (cs->staged_cs && cs->staged_last) {
+ first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+ if (first_cs)
+ cs = first_cs;
+ }
spin_unlock(&hdev->cs_mirror_lock);
@@ -1288,6 +1295,12 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
if (rc)
goto free_cs_object;
+ /* If this is a staged submission we must return the staged sequence
+ * rather than the internal CS sequence
+ */
+ if (cs->staged_cs)
+ *cs_seq = cs->staged_sequence;
+
/* Validate ALL the CS chunks before submitting the CS */
for (i = 0 ; i < num_chunks ; i++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
@@ -1988,6 +2001,15 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
goto free_cs_chunk_array;
}
+ if (!hdev->nic_ports_mask) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
+ dev_err(hdev->dev,
+ "Collective operations not supported when NIC ports are disabled");
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
collective_engine_id = chunk->collective_engine_id;
}
@@ -2026,9 +2048,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
spin_unlock(&ctx->sig_mgr.lock);
if (!handle_found) {
- dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
+ /* treat as signal CS already finished */
+ dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
signal_seq);
- rc = -EINVAL;
+ rc = 0;
goto free_cs_chunk_array;
}
@@ -2613,7 +2636,8 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
* completed after the poll function.
*/
if (!mcs_data.completion_bitmap) {
- dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");
+ dev_warn_ratelimited(hdev->dev,
+ "Multi-CS got completion on wait but no CS completed\n");
rc = -EFAULT;
}
}
@@ -2740,10 +2764,20 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
else
interrupt = &hdev->user_interrupt[interrupt_offset];
+ /* Add pending user interrupt to relevant list for the interrupt
+ * handler to monitor
+ */
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+ /* We check for completion value as interrupt could have been received
+ * before we added the node to the wait list
+ */
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
- goto free_fence;
+ goto remove_pending_user_interrupt;
}
if (completion_value >= target_value)
@@ -2752,14 +2786,7 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
*status = CS_WAIT_STATUS_BUSY;
if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
- goto free_fence;
-
- /* Add pending user interrupt to relevant list for the interrupt
- * handler to monitor
- */
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
- list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ goto remove_pending_user_interrupt;
wait_again:
/* Wait for interrupt handler to signal completion */
@@ -2770,6 +2797,15 @@ wait_again:
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ /* reinit_completion must be called before we check for user
+ * completion value, otherwise, if interrupt is received after
+ * the comparison and before the next wait_for_completion,
+ * we will reach timeout and fail
+ */
+ reinit_completion(&pend->fence.completion);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
@@ -2780,11 +2816,7 @@ wait_again:
if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
} else {
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
- reinit_completion(&pend->fence.completion);
timeout = completion_rc;
-
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
goto wait_again;
}
} else if (completion_rc == -ERESTARTSYS) {
@@ -2802,7 +2834,6 @@ remove_pending_user_interrupt:
list_del(&pend->wait_list_node);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
-free_fence:
kfree(pend);
hl_ctx_put(ctx);
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 76b7de8f1406..0743319b10c7 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -437,6 +437,7 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
struct hl_cs_compl *cs_cmpl)
{
struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
+ u32 offset = 0;
cs_cmpl->hw_sob = handle->hw_sob;
@@ -446,9 +447,13 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
* set offset 1 for example he mean to wait only for the first
* signal only, which will be pre_sob_val, and if he set offset 2
* then the value required is (pre_sob_val + 1) and so on...
+ * if user set wait offset to 0, then treat it as legacy wait cs,
+ * wait for the next signal.
*/
- cs_cmpl->sob_val = handle->pre_sob_val +
- (job->encaps_sig_wait_offset - 1);
+ if (job->encaps_sig_wait_offset)
+ offset = job->encaps_sig_wait_offset - 1;
+
+ cs_cmpl->sob_val = handle->pre_sob_val + offset;
}
static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 383865be3c2c..14da87b38e83 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -395,7 +395,7 @@ static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
{ .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
- { .id = 201, .name = "MON_OBJ_DMA_UP_FEADBACK_RESET" },
+ { .id = 201, .name = "MON_OBJ_DMA_UP_FEEDBACK_RESET" },
{ .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
{ .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
{ .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
@@ -5802,6 +5802,7 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct packet_msg_prot *cq_pkt;
+ u64 msi_addr;
u32 tmp;
cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
@@ -5823,10 +5824,12 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(1);
- if (!gaudi->multi_msi_mode)
- msi_vec = 0;
+ if (gaudi->multi_msi_mode)
+ msi_addr = mmPCIE_MSI_INTR_0 + msi_vec * 4;
+ else
+ msi_addr = mmPCIE_CORE_MSI_REQ;
- cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4);
+ cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr);
}
static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index cb265c00cf73..25ac87cebd45 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -8,16 +8,21 @@
#include "gaudiP.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
-#define GAUDI_NUMBER_OF_RR_REGS 24
-#define GAUDI_NUMBER_OF_LBW_RANGES 12
+#define GAUDI_NUMBER_OF_LBW_RR_REGS 28
+#define GAUDI_NUMBER_OF_HBW_RR_REGS 24
+#define GAUDI_NUMBER_OF_LBW_RANGES 10
-static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_HIT_WPROT,
mmDMA_IF_W_S_DMA0_HIT_WPROT,
mmDMA_IF_W_S_DMA1_HIT_WPROT,
+ mmDMA_IF_E_S_SOB_HIT_WPROT,
mmDMA_IF_E_S_DMA0_HIT_WPROT,
mmDMA_IF_E_S_DMA1_HIT_WPROT,
+ mmDMA_IF_W_N_SOB_HIT_WPROT,
mmDMA_IF_W_N_DMA0_HIT_WPROT,
mmDMA_IF_W_N_DMA1_HIT_WPROT,
+ mmDMA_IF_E_N_SOB_HIT_WPROT,
mmDMA_IF_E_N_DMA0_HIT_WPROT,
mmDMA_IF_E_N_DMA1_HIT_WPROT,
mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
@@ -38,13 +43,17 @@ static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
};
-static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_HIT_RPROT,
mmDMA_IF_W_S_DMA0_HIT_RPROT,
mmDMA_IF_W_S_DMA1_HIT_RPROT,
+ mmDMA_IF_E_S_SOB_HIT_RPROT,
mmDMA_IF_E_S_DMA0_HIT_RPROT,
mmDMA_IF_E_S_DMA1_HIT_RPROT,
+ mmDMA_IF_W_N_SOB_HIT_RPROT,
mmDMA_IF_W_N_DMA0_HIT_RPROT,
mmDMA_IF_W_N_DMA1_HIT_RPROT,
+ mmDMA_IF_E_N_SOB_HIT_RPROT,
mmDMA_IF_E_N_DMA0_HIT_RPROT,
mmDMA_IF_E_N_DMA1_HIT_RPROT,
mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
@@ -65,13 +74,17 @@ static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
};
-static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MIN_WPROT_0,
mmDMA_IF_W_S_DMA0_MIN_WPROT_0,
mmDMA_IF_W_S_DMA1_MIN_WPROT_0,
+ mmDMA_IF_E_S_SOB_MIN_WPROT_0,
mmDMA_IF_E_S_DMA0_MIN_WPROT_0,
mmDMA_IF_E_S_DMA1_MIN_WPROT_0,
+ mmDMA_IF_W_N_SOB_MIN_WPROT_0,
mmDMA_IF_W_N_DMA0_MIN_WPROT_0,
mmDMA_IF_W_N_DMA1_MIN_WPROT_0,
+ mmDMA_IF_E_N_SOB_MIN_WPROT_0,
mmDMA_IF_E_N_DMA0_MIN_WPROT_0,
mmDMA_IF_E_N_DMA1_MIN_WPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
@@ -92,13 +105,17 @@ static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
};
-static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MAX_WPROT_0,
mmDMA_IF_W_S_DMA0_MAX_WPROT_0,
mmDMA_IF_W_S_DMA1_MAX_WPROT_0,
+ mmDMA_IF_E_S_SOB_MAX_WPROT_0,
mmDMA_IF_E_S_DMA0_MAX_WPROT_0,
mmDMA_IF_E_S_DMA1_MAX_WPROT_0,
+ mmDMA_IF_W_N_SOB_MAX_WPROT_0,
mmDMA_IF_W_N_DMA0_MAX_WPROT_0,
mmDMA_IF_W_N_DMA1_MAX_WPROT_0,
+ mmDMA_IF_E_N_SOB_MAX_WPROT_0,
mmDMA_IF_E_N_DMA0_MAX_WPROT_0,
mmDMA_IF_E_N_DMA1_MAX_WPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
@@ -119,13 +136,17 @@ static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
};
-static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MIN_RPROT_0,
mmDMA_IF_W_S_DMA0_MIN_RPROT_0,
mmDMA_IF_W_S_DMA1_MIN_RPROT_0,
+ mmDMA_IF_E_S_SOB_MIN_RPROT_0,
mmDMA_IF_E_S_DMA0_MIN_RPROT_0,
mmDMA_IF_E_S_DMA1_MIN_RPROT_0,
+ mmDMA_IF_W_N_SOB_MIN_RPROT_0,
mmDMA_IF_W_N_DMA0_MIN_RPROT_0,
mmDMA_IF_W_N_DMA1_MIN_RPROT_0,
+ mmDMA_IF_E_N_SOB_MIN_RPROT_0,
mmDMA_IF_E_N_DMA0_MIN_RPROT_0,
mmDMA_IF_E_N_DMA1_MIN_RPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
@@ -146,13 +167,17 @@ static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
};
-static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MAX_RPROT_0,
mmDMA_IF_W_S_DMA0_MAX_RPROT_0,
mmDMA_IF_W_S_DMA1_MAX_RPROT_0,
+ mmDMA_IF_E_S_SOB_MAX_RPROT_0,
mmDMA_IF_E_S_DMA0_MAX_RPROT_0,
mmDMA_IF_E_S_DMA1_MAX_RPROT_0,
+ mmDMA_IF_W_N_SOB_MAX_RPROT_0,
mmDMA_IF_W_N_DMA0_MAX_RPROT_0,
mmDMA_IF_W_N_DMA1_MAX_RPROT_0,
+ mmDMA_IF_E_N_SOB_MAX_RPROT_0,
mmDMA_IF_E_N_DMA0_MAX_RPROT_0,
mmDMA_IF_E_N_DMA1_MAX_RPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
@@ -173,7 +198,7 @@ static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
};
-static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW,
@@ -200,7 +225,7 @@ static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW
};
-static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR,
@@ -227,7 +252,7 @@ static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR
};
-static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
@@ -254,7 +279,7 @@ static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0
};
-static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
@@ -281,7 +306,7 @@ static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0
};
-static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
@@ -308,7 +333,7 @@ static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0
};
-static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
@@ -335,7 +360,7 @@ static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0
};
-static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
@@ -362,7 +387,7 @@ static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0
};
-static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
@@ -389,7 +414,7 @@ static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0
};
-static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
@@ -416,7 +441,7 @@ static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0
};
-static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
@@ -12849,50 +12874,44 @@ static void gaudi_init_range_registers_lbw(struct hl_device *hdev)
u32 lbw_rng_end[GAUDI_NUMBER_OF_LBW_RANGES];
int i, j;
- lbw_rng_start[0] = (0xFBFE0000 & 0x3FFFFFF) - 1;
- lbw_rng_end[0] = (0xFBFFF000 & 0x3FFFFFF) + 1;
+ lbw_rng_start[0] = (0xFC0E8000 & 0x3FFFFFF) - 1; /* 0x000E7FFF */
+ lbw_rng_end[0] = (0xFC11FFFF & 0x3FFFFFF) + 1; /* 0x00120000 */
- lbw_rng_start[1] = (0xFC0E8000 & 0x3FFFFFF) - 1;
- lbw_rng_end[1] = (0xFC120000 & 0x3FFFFFF) + 1;
+ lbw_rng_start[1] = (0xFC1E8000 & 0x3FFFFFF) - 1; /* 0x001E7FFF */
+ lbw_rng_end[1] = (0xFC48FFFF & 0x3FFFFFF) + 1; /* 0x00490000 */
- lbw_rng_start[2] = (0xFC1E8000 & 0x3FFFFFF) - 1;
- lbw_rng_end[2] = (0xFC48FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[2] = (0xFC600000 & 0x3FFFFFF) - 1; /* 0x005FFFFF */
+ lbw_rng_end[2] = (0xFCC48FFF & 0x3FFFFFF) + 1; /* 0x00C49000 */
- lbw_rng_start[3] = (0xFC600000 & 0x3FFFFFF) - 1;
- lbw_rng_end[3] = (0xFCC48FFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[3] = (0xFCC4A000 & 0x3FFFFFF) - 1; /* 0x00C49FFF */
+ lbw_rng_end[3] = (0xFCCDFFFF & 0x3FFFFFF) + 1; /* 0x00CE0000 */
- lbw_rng_start[4] = (0xFCC4A000 & 0x3FFFFFF) - 1;
- lbw_rng_end[4] = (0xFCCDFFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[4] = (0xFCCE4000 & 0x3FFFFFF) - 1; /* 0x00CE3FFF */
+ lbw_rng_end[4] = (0xFCD1FFFF & 0x3FFFFFF) + 1; /* 0x00D20000 */
- lbw_rng_start[5] = (0xFCCE4000 & 0x3FFFFFF) - 1;
- lbw_rng_end[5] = (0xFCD1FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[5] = (0xFCD24000 & 0x3FFFFFF) - 1; /* 0x00D23FFF */
+ lbw_rng_end[5] = (0xFCD5FFFF & 0x3FFFFFF) + 1; /* 0x00D60000 */
- lbw_rng_start[6] = (0xFCD24000 & 0x3FFFFFF) - 1;
- lbw_rng_end[6] = (0xFCD5FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[6] = (0xFCD64000 & 0x3FFFFFF) - 1; /* 0x00D63FFF */
+ lbw_rng_end[6] = (0xFCD9FFFF & 0x3FFFFFF) + 1; /* 0x00DA0000 */
- lbw_rng_start[7] = (0xFCD64000 & 0x3FFFFFF) - 1;
- lbw_rng_end[7] = (0xFCD9FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[7] = (0xFCDA4000 & 0x3FFFFFF) - 1; /* 0x00DA3FFF */
+ lbw_rng_end[7] = (0xFCDDFFFF & 0x3FFFFFF) + 1; /* 0x00DE0000 */
- lbw_rng_start[8] = (0xFCDA4000 & 0x3FFFFFF) - 1;
- lbw_rng_end[8] = (0xFCDDFFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[8] = (0xFCDE4000 & 0x3FFFFFF) - 1; /* 0x00DE3FFF */
+ lbw_rng_end[8] = (0xFCE05FFF & 0x3FFFFFF) + 1; /* 0x00E06000 */
- lbw_rng_start[9] = (0xFCDE4000 & 0x3FFFFFF) - 1;
- lbw_rng_end[9] = (0xFCE05FFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[9] = (0xFCFC9000 & 0x3FFFFFF) - 1; /* 0x00FC8FFF */
+ lbw_rng_end[9] = (0xFFFFFFFE & 0x3FFFFFF) + 1; /* 0x03FFFFFF */
- lbw_rng_start[10] = (0xFEC43000 & 0x3FFFFFF) - 1;
- lbw_rng_end[10] = (0xFEC43FFF & 0x3FFFFFF) + 1;
-
- lbw_rng_start[11] = (0xFE484000 & 0x3FFFFFF) - 1;
- lbw_rng_end[11] = (0xFE484FFF & 0x3FFFFFF) + 1;
-
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++) {
WREG32(gaudi_rr_lbw_hit_aw_regs[i],
(1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
WREG32(gaudi_rr_lbw_hit_ar_regs[i],
(1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
}
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++)
+ for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++)
for (j = 0 ; j < GAUDI_NUMBER_OF_LBW_RANGES ; j++) {
WREG32(gaudi_rr_lbw_min_aw_regs[i] + (j << 2),
lbw_rng_start[j]);
@@ -12939,12 +12958,12 @@ static void gaudi_init_range_registers_hbw(struct hl_device *hdev)
* 6th range is the host
*/
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
WREG32(gaudi_rr_hbw_hit_aw_regs[i], 0x1F);
WREG32(gaudi_rr_hbw_hit_ar_regs[i], 0x1D);
}
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
WREG32(gaudi_rr_hbw_base_low_aw_regs[i], dram_addr_lo);
WREG32(gaudi_rr_hbw_base_low_ar_regs[i], dram_addr_lo);
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index ffdfbd9b3220..1a6576666794 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -308,6 +308,8 @@
#define mmPCIE_AUX_FLR_CTRL 0xC07394
#define mmPCIE_AUX_DBI 0xC07490
+#define mmPCIE_CORE_MSI_REQ 0xC04100
+
#define mmPSOC_PCI_PLL_NR 0xC72100
#define mmSRAM_W_PLL_NR 0x4C8100
#define mmPSOC_HBM_PLL_NR 0xC74100
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 6578cc64ae9e..380f9aa56eb2 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1802,10 +1802,15 @@ static enum hrtimer_restart dw_mci_fault_timer(struct hrtimer *t)
spin_lock_irqsave(&host->irq_lock, flags);
- if (!host->data_status)
+ /*
+ * Only inject an error if we haven't already got an error or data over
+ * interrupt.
+ */
+ if (!host->data_status) {
host->data_status = SDMMC_INT_DCRC;
- set_bit(EVENT_DATA_ERROR, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ set_bit(EVENT_DATA_ERROR, &host->pending_events);
+ tasklet_schedule(&host->tasklet);
+ }
spin_unlock_irqrestore(&host->irq_lock, flags);
@@ -2721,12 +2726,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
+ spin_lock(&host->irq_lock);
+
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
host->data_status = pending;
smp_wmb(); /* drain writebuffer */
set_bit(EVENT_DATA_ERROR, &host->pending_events);
tasklet_schedule(&host->tasklet);
+
+ spin_unlock(&host->irq_lock);
}
if (pending & SDMMC_INT_DATA_OVER) {
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 6fc4cf3c9dce..a4407f391f66 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -561,6 +561,8 @@ static void renesas_sdhi_reset(struct tmio_mmc_host *host)
/* Unknown why but without polling reset status, it will hang */
read_poll_timeout(reset_control_status, ret, ret == 0, 1, 100,
false, priv->rstc);
+ /* At least SDHI_VER_GEN2_SDR50 needs manual release of reset */
+ sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
priv->needs_adjust_hs400 = false;
renesas_sdhi_set_clock(host, host->clk_cache);
} else if (priv->scc_ctl) {
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index a533a90e3904..a7aeb3c132c9 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -351,9 +351,25 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
static void b53_mdio_remove(struct mdio_device *mdiodev)
{
struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
- struct dsa_switch *ds = dev->ds;
- dsa_unregister_switch(ds);
+ if (!dev)
+ return;
+
+ b53_switch_remove(dev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void b53_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct b53_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!dev)
+ return;
+
+ b53_switch_shutdown(dev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id b53_of_match[] = {
@@ -373,6 +389,7 @@ MODULE_DEVICE_TABLE(of, b53_of_match);
static struct mdio_driver b53_mdio_driver = {
.probe = b53_mdio_probe,
.remove = b53_mdio_remove,
+ .shutdown = b53_mdio_shutdown,
.mdiodrv.driver = {
.name = "bcm53xx",
.of_match_table = b53_of_match,
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index 82680e083cc2..ae4c79d39bc0 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -316,9 +316,21 @@ static int b53_mmap_remove(struct platform_device *pdev)
if (dev)
b53_switch_remove(dev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void b53_mmap_shutdown(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (dev)
+ b53_switch_shutdown(dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct of_device_id b53_mmap_of_table[] = {
{ .compatible = "brcm,bcm3384-switch" },
{ .compatible = "brcm,bcm6328-switch" },
@@ -331,6 +343,7 @@ MODULE_DEVICE_TABLE(of, b53_mmap_of_table);
static struct platform_driver b53_mmap_driver = {
.probe = b53_mmap_probe,
.remove = b53_mmap_remove,
+ .shutdown = b53_mmap_shutdown,
.driver = {
.name = "b53-switch",
.of_match_table = b53_mmap_of_table,
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 5d068acf7cf8..959a52d41f0a 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -228,6 +228,11 @@ static inline void b53_switch_remove(struct b53_device *dev)
dsa_unregister_switch(dev->ds);
}
+static inline void b53_switch_shutdown(struct b53_device *dev)
+{
+ dsa_switch_shutdown(dev->ds);
+}
+
#define b53_build_op(type_op_size, val_type) \
static inline int b53_##type_op_size(struct b53_device *dev, u8 page, \
u8 reg, val_type val) \
diff --git a/drivers/net/dsa/b53/b53_spi.c b/drivers/net/dsa/b53/b53_spi.c
index ecb9f7f6b335..01e37b75471e 100644
--- a/drivers/net/dsa/b53/b53_spi.c
+++ b/drivers/net/dsa/b53/b53_spi.c
@@ -321,9 +321,21 @@ static int b53_spi_remove(struct spi_device *spi)
if (dev)
b53_switch_remove(dev);
+ spi_set_drvdata(spi, NULL);
+
return 0;
}
+static void b53_spi_shutdown(struct spi_device *spi)
+{
+ struct b53_device *dev = spi_get_drvdata(spi);
+
+ if (dev)
+ b53_switch_shutdown(dev);
+
+ spi_set_drvdata(spi, NULL);
+}
+
static const struct of_device_id b53_spi_of_match[] = {
{ .compatible = "brcm,bcm5325" },
{ .compatible = "brcm,bcm5365" },
@@ -344,6 +356,7 @@ static struct spi_driver b53_spi_driver = {
},
.probe = b53_spi_probe,
.remove = b53_spi_remove,
+ .shutdown = b53_spi_shutdown,
};
module_spi_driver(b53_spi_driver);
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index 3f4249de70c5..4591bb1c05d2 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -629,17 +629,34 @@ static int b53_srab_probe(struct platform_device *pdev)
static int b53_srab_remove(struct platform_device *pdev)
{
struct b53_device *dev = platform_get_drvdata(pdev);
- struct b53_srab_priv *priv = dev->priv;
- b53_srab_intr_set(priv, false);
+ if (!dev)
+ return 0;
+
+ b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void b53_srab_shutdown(struct platform_device *pdev)
+{
+ struct b53_device *dev = platform_get_drvdata(pdev);
+
+ if (!dev)
+ return;
+
+ b53_switch_shutdown(dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static struct platform_driver b53_srab_driver = {
.probe = b53_srab_probe,
.remove = b53_srab_remove,
+ .shutdown = b53_srab_shutdown,
.driver = {
.name = "b53-srab-switch",
.of_match_table = b53_srab_of_match,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 6ce9ec1283e0..7578a5c38df5 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -68,7 +68,7 @@ static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int port, count = 0;
- for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_cpu_port(ds, port))
continue;
if (priv->port_sts[port].enabled)
@@ -1512,6 +1512,9 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+ if (!priv)
+ return 0;
+
priv->wol_ports_mask = 0;
/* Disable interrupts */
bcm_sf2_intr_disable(priv);
@@ -1523,6 +1526,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
@@ -1530,6 +1535,9 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
+ if (!priv)
+ return;
+
/* For a kernel about to be kexec'd we want to keep the GPHY on for a
* successful MDIO bus scan to occur. If we did turn off the GPHY
* before (e.g: port_disable), this will also power it back on.
@@ -1538,6 +1546,10 @@ static void bcm_sf2_sw_shutdown(struct platform_device *pdev)
*/
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
+
+ dsa_switch_shutdown(priv->dev->ds);
+
+ platform_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index bfdf3324aac3..e638e3eea911 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -340,10 +340,29 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
- struct dsa_loop_priv *ps = ds->priv;
+ struct dsa_loop_priv *ps;
+
+ if (!ds)
+ return;
+
+ ps = ds->priv;
dsa_unregister_switch(ds);
dev_put(ps->netdev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static struct mdio_driver dsa_loop_drv = {
@@ -352,6 +371,7 @@ static struct mdio_driver dsa_loop_drv = {
},
.probe = dsa_loop_drv_probe,
.remove = dsa_loop_drv_remove,
+ .shutdown = dsa_loop_drv_shutdown,
};
#define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 542cfc4ccb08..354655f9ed00 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -1916,6 +1916,9 @@ static int hellcreek_remove(struct platform_device *pdev)
{
struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+ if (!hellcreek)
+ return 0;
+
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
@@ -1924,6 +1927,18 @@ static int hellcreek_remove(struct platform_device *pdev)
return 0;
}
+static void hellcreek_shutdown(struct platform_device *pdev)
+{
+ struct hellcreek *hellcreek = platform_get_drvdata(pdev);
+
+ if (!hellcreek)
+ return;
+
+ dsa_switch_shutdown(hellcreek->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct hellcreek_platform_data de1soc_r1_pdata = {
.name = "r4c30",
.num_ports = 4,
@@ -1946,6 +1961,7 @@ MODULE_DEVICE_TABLE(of, hellcreek_of_match);
static struct platform_driver hellcreek_driver = {
.probe = hellcreek_probe,
.remove = hellcreek_remove,
+ .shutdown = hellcreek_shutdown,
.driver = {
.name = "hellcreek",
.of_match_table = hellcreek_of_match,
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index d7ce281570b5..89f920289ae2 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1379,6 +1379,12 @@ int lan9303_remove(struct lan9303 *chip)
}
EXPORT_SYMBOL(lan9303_remove);
+void lan9303_shutdown(struct lan9303 *chip)
+{
+ dsa_switch_shutdown(chip->ds);
+}
+EXPORT_SYMBOL(lan9303_shutdown);
+
MODULE_AUTHOR("Juergen Borleis <kernel@pengutronix.de>");
MODULE_DESCRIPTION("Core driver for SMSC/Microchip LAN9303 three port ethernet switch");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/lan9303.h b/drivers/net/dsa/lan9303.h
index 11f590b64701..c7f73efa50f0 100644
--- a/drivers/net/dsa/lan9303.h
+++ b/drivers/net/dsa/lan9303.h
@@ -10,3 +10,4 @@ extern const struct lan9303_phy_ops lan9303_indirect_phy_ops;
int lan9303_probe(struct lan9303 *chip, struct device_node *np);
int lan9303_remove(struct lan9303 *chip);
+void lan9303_shutdown(struct lan9303 *chip);
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 9bffaef65a04..8ca4713310fa 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -67,13 +67,28 @@ static int lan9303_i2c_probe(struct i2c_client *client,
static int lan9303_i2c_remove(struct i2c_client *client)
{
- struct lan9303_i2c *sw_dev;
+ struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
- sw_dev = i2c_get_clientdata(client);
if (!sw_dev)
- return -ENODEV;
+ return 0;
+
+ lan9303_remove(&sw_dev->chip);
+
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static void lan9303_i2c_shutdown(struct i2c_client *client)
+{
+ struct lan9303_i2c *sw_dev = i2c_get_clientdata(client);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_shutdown(&sw_dev->chip);
- return lan9303_remove(&sw_dev->chip);
+ i2c_set_clientdata(client, NULL);
}
/*-------------------------------------------------------------------------*/
@@ -97,6 +112,7 @@ static struct i2c_driver lan9303_i2c_driver = {
},
.probe = lan9303_i2c_probe,
.remove = lan9303_i2c_remove,
+ .shutdown = lan9303_i2c_shutdown,
.id_table = lan9303_i2c_id,
};
module_i2c_driver(lan9303_i2c_driver);
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index 9cbe80460b53..bbb7032409ba 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -138,6 +138,20 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
return;
lan9303_remove(&sw_dev->chip);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct lan9303_mdio *sw_dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (!sw_dev)
+ return;
+
+ lan9303_shutdown(&sw_dev->chip);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
/*-------------------------------------------------------------------------*/
@@ -155,6 +169,7 @@ static struct mdio_driver lan9303_mdio_driver = {
},
.probe = lan9303_mdio_probe,
.remove = lan9303_mdio_remove,
+ .shutdown = lan9303_mdio_shutdown,
};
mdio_module_driver(lan9303_mdio_driver);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 267324889dd6..3ff4b7e177f3 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -2184,6 +2184,9 @@ static int gswip_remove(struct platform_device *pdev)
struct gswip_priv *priv = platform_get_drvdata(pdev);
int i;
+ if (!priv)
+ return 0;
+
/* disable the switch */
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
@@ -2197,9 +2200,23 @@ static int gswip_remove(struct platform_device *pdev)
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void gswip_shutdown(struct platform_device *pdev)
+{
+ struct gswip_priv *priv = platform_get_drvdata(pdev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct gswip_hw_info gswip_xrx200 = {
.max_ports = 7,
.cpu_port = 6,
@@ -2223,6 +2240,7 @@ MODULE_DEVICE_TABLE(of, gswip_of_match);
static struct platform_driver gswip_driver = {
.probe = gswip_probe,
.remove = gswip_remove,
+ .shutdown = gswip_shutdown,
.driver = {
.name = "gswip",
.of_match_table = gswip_of_match,
diff --git a/drivers/net/dsa/microchip/ksz8795_spi.c b/drivers/net/dsa/microchip/ksz8795_spi.c
index ea7550d1b634..866767b70d65 100644
--- a/drivers/net/dsa/microchip/ksz8795_spi.c
+++ b/drivers/net/dsa/microchip/ksz8795_spi.c
@@ -94,6 +94,8 @@ static int ksz8795_spi_remove(struct spi_device *spi)
if (dev)
ksz_switch_remove(dev);
+ spi_set_drvdata(spi, NULL);
+
return 0;
}
@@ -101,8 +103,15 @@ static void ksz8795_spi_shutdown(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
- if (dev && dev->dev_ops->shutdown)
+ if (!dev)
+ return;
+
+ if (dev->dev_ops->shutdown)
dev->dev_ops->shutdown(dev);
+
+ dsa_switch_shutdown(dev->ds);
+
+ spi_set_drvdata(spi, NULL);
}
static const struct of_device_id ksz8795_dt_ids[] = {
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 11293485138c..5883fa7edda2 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -191,6 +191,18 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
if (dev)
ksz_switch_remove(dev);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
+{
+ struct ksz_device *dev = dev_get_drvdata(&mdiodev->dev);
+
+ if (dev)
+ dsa_switch_shutdown(dev->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id ksz8863_dt_ids[] = {
@@ -203,6 +215,7 @@ MODULE_DEVICE_TABLE(of, ksz8863_dt_ids);
static struct mdio_driver ksz8863_driver = {
.probe = ksz8863_smi_probe,
.remove = ksz8863_smi_remove,
+ .shutdown = ksz8863_smi_shutdown,
.mdiodrv.driver = {
.name = "ksz8863-switch",
.of_match_table = ksz8863_dt_ids,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 4e053a25d077..f3afb8b8c4cc 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -56,7 +56,10 @@ static int ksz9477_i2c_remove(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
- ksz_switch_remove(dev);
+ if (dev)
+ ksz_switch_remove(dev);
+
+ i2c_set_clientdata(i2c, NULL);
return 0;
}
@@ -65,8 +68,15 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
{
struct ksz_device *dev = i2c_get_clientdata(i2c);
- if (dev && dev->dev_ops->shutdown)
+ if (!dev)
+ return;
+
+ if (dev->dev_ops->shutdown)
dev->dev_ops->shutdown(dev);
+
+ dsa_switch_shutdown(dev->ds);
+
+ i2c_set_clientdata(i2c, NULL);
}
static const struct i2c_device_id ksz9477_i2c_id[] = {
diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c
index 15bc11b3cda4..e3cb0e6c9f6f 100644
--- a/drivers/net/dsa/microchip/ksz9477_spi.c
+++ b/drivers/net/dsa/microchip/ksz9477_spi.c
@@ -72,6 +72,8 @@ static int ksz9477_spi_remove(struct spi_device *spi)
if (dev)
ksz_switch_remove(dev);
+ spi_set_drvdata(spi, NULL);
+
return 0;
}
@@ -79,8 +81,10 @@ static void ksz9477_spi_shutdown(struct spi_device *spi)
{
struct ksz_device *dev = spi_get_drvdata(spi);
- if (dev && dev->dev_ops->shutdown)
- dev->dev_ops->shutdown(dev);
+ if (dev)
+ dsa_switch_shutdown(dev->ds);
+
+ spi_set_drvdata(spi, NULL);
}
static const struct of_device_id ksz9477_dt_ids[] = {
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index d0cba2d1cd68..094737e5084a 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -3286,6 +3286,9 @@ mt7530_remove(struct mdio_device *mdiodev)
struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
int ret = 0;
+ if (!priv)
+ return;
+
ret = regulator_disable(priv->core_pwr);
if (ret < 0)
dev_err(priv->dev,
@@ -3301,11 +3304,26 @@ mt7530_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(priv->ds);
mutex_destroy(&priv->reg_mutex);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mt7530_shutdown(struct mdio_device *mdiodev)
+{
+ struct mt7530_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static struct mdio_driver mt7530_mdio_driver = {
.probe = mt7530_probe,
.remove = mt7530_remove,
+ .shutdown = mt7530_shutdown,
.mdiodrv.driver = {
.name = "mt7530",
.of_match_table = mt7530_of_match,
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 24b8219fd607..a4c6eb9a52d0 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -290,7 +290,24 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+ if (!ds)
+ return;
+
dsa_unregister_switch(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mv88e6060_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id mv88e6060_of_match[] = {
@@ -303,6 +320,7 @@ static const struct of_device_id mv88e6060_of_match[] = {
static struct mdio_driver mv88e6060_driver = {
.probe = mv88e6060_probe,
.remove = mv88e6060_remove,
+ .shutdown = mv88e6060_shutdown,
.mdiodrv.driver = {
.name = "mv88e6060",
.of_match_table = mv88e6060_of_match,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index c45ca2473743..03744d1c43fc 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2834,8 +2834,8 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (err)
return err;
- /* Port Control 2: don't force a good FCS, set the maximum frame size to
- * 10240 bytes, disable 802.1q tags checking, don't discard tagged or
+ /* Port Control 2: don't force a good FCS, set the MTU size to
+ * 10222 bytes, disable 802.1q tags checking, don't discard tagged or
* untagged frames on this port, do a destination address lookup on all
* received packets as usual, disable ARP mirroring and don't send a
* copy of all transmitted/received frames on this port to the CPU.
@@ -2854,7 +2854,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
return err;
if (chip->info->ops->port_set_jumbo_size) {
- err = chip->info->ops->port_set_jumbo_size(chip, port, 10240);
+ err = chip->info->ops->port_set_jumbo_size(chip, port, 10218);
if (err)
return err;
}
@@ -2944,10 +2944,10 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port)
struct mv88e6xxx_chip *chip = ds->priv;
if (chip->info->ops->port_set_jumbo_size)
- return 10240;
+ return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
else if (chip->info->ops->set_max_frame_size)
- return 1632;
- return 1522;
+ return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
+ return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN;
}
static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
@@ -2955,6 +2955,9 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
struct mv88e6xxx_chip *chip = ds->priv;
int ret = 0;
+ if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
+ new_mtu += EDSA_HLEN;
+
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_set_jumbo_size)
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
@@ -3071,7 +3074,7 @@ static void mv88e6xxx_teardown(struct dsa_switch *ds)
{
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
- mv88e6xxx_teardown_devlink_regions(ds);
+ mv88e6xxx_teardown_devlink_regions_global(ds);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -3215,7 +3218,7 @@ unlock:
if (err)
goto out_resources;
- err = mv88e6xxx_setup_devlink_regions(ds);
+ err = mv88e6xxx_setup_devlink_regions_global(ds);
if (err)
goto out_params;
@@ -3229,6 +3232,16 @@ out_resources:
return err;
}
+static int mv88e6xxx_port_setup(struct dsa_switch *ds, int port)
+{
+ return mv88e6xxx_setup_devlink_regions_port(ds, port);
+}
+
+static void mv88e6xxx_port_teardown(struct dsa_switch *ds, int port)
+{
+ mv88e6xxx_teardown_devlink_regions_port(ds, port);
+}
+
/* prod_id for switch families which do not have a PHY model number */
static const u16 family_prod_id_table[] = {
[MV88E6XXX_FAMILY_6341] = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
@@ -3715,7 +3728,6 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
.port_set_ether_type = mv88e6351_port_set_ether_type,
- .port_set_jumbo_size = mv88e6165_port_set_jumbo_size,
.port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
.port_pause_limit = mv88e6097_port_pause_limit,
.port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit,
@@ -3740,6 +3752,7 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
.avb_ops = &mv88e6165_avb_ops,
.ptp_ops = &mv88e6165_ptp_ops,
.phylink_validate = mv88e6185_phylink_validate,
+ .set_max_frame_size = mv88e6185_g1_set_max_frame_size,
};
static const struct mv88e6xxx_ops mv88e6165_ops = {
@@ -6116,6 +6129,8 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.change_tag_protocol = mv88e6xxx_change_tag_protocol,
.setup = mv88e6xxx_setup,
.teardown = mv88e6xxx_teardown,
+ .port_setup = mv88e6xxx_port_setup,
+ .port_teardown = mv88e6xxx_port_teardown,
.phylink_validate = mv88e6xxx_validate,
.phylink_mac_link_state = mv88e6xxx_serdes_pcs_get_state,
.phylink_mac_config = mv88e6xxx_mac_config,
@@ -6389,7 +6404,12 @@ out:
static void mv88e6xxx_remove(struct mdio_device *mdiodev)
{
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_chip *chip;
+
+ if (!ds)
+ return;
+
+ chip = ds->priv;
if (chip->info->ptp_support) {
mv88e6xxx_hwtstamp_free(chip);
@@ -6410,6 +6430,20 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+
+ if (!ds)
+ return;
+
+ dsa_switch_shutdown(ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id mv88e6xxx_of_match[] = {
@@ -6433,6 +6467,7 @@ MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
static struct mdio_driver mv88e6xxx_driver = {
.probe = mv88e6xxx_probe,
.remove = mv88e6xxx_remove,
+ .shutdown = mv88e6xxx_shutdown,
.mdiodrv.driver = {
.name = "mv88e6085",
.of_match_table = mv88e6xxx_of_match,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 675b1f3e43b7..59f316cc8583 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -18,6 +18,7 @@
#include <linux/timecounter.h>
#include <net/dsa.h>
+#define EDSA_HLEN 8
#define MV88E6XXX_N_FID 4096
/* PVT limits for 4-bit port and 5-bit switch */
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
index 0c0f5ea6680c..381068395c63 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.c
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -647,26 +647,25 @@ static struct mv88e6xxx_region mv88e6xxx_regions[] = {
},
};
-static void
-mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip)
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
dsa_devlink_region_destroy(chip->regions[i]);
}
-static void
-mv88e6xxx_teardown_devlink_regions_port(struct mv88e6xxx_chip *chip,
- int port)
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
dsa_devlink_region_destroy(chip->ports[port].region);
}
-static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
- struct mv88e6xxx_chip *chip,
- int port)
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port)
{
+ struct mv88e6xxx_chip *chip = ds->priv;
struct devlink_region *region;
region = dsa_devlink_port_region_create(ds,
@@ -681,40 +680,10 @@ static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
return 0;
}
-static void
-mv88e6xxx_teardown_devlink_regions_ports(struct mv88e6xxx_chip *chip)
-{
- int port;
-
- for (port = 0; port < mv88e6xxx_num_ports(chip); port++)
- mv88e6xxx_teardown_devlink_regions_port(chip, port);
-}
-
-static int mv88e6xxx_setup_devlink_regions_ports(struct dsa_switch *ds,
- struct mv88e6xxx_chip *chip)
-{
- int port;
- int err;
-
- for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
- err = mv88e6xxx_setup_devlink_regions_port(ds, chip, port);
- if (err)
- goto out;
- }
-
- return 0;
-
-out:
- while (port-- > 0)
- mv88e6xxx_teardown_devlink_regions_port(chip, port);
-
- return err;
-}
-
-static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
- struct mv88e6xxx_chip *chip)
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds)
{
bool (*cond)(struct mv88e6xxx_chip *chip);
+ struct mv88e6xxx_chip *chip = ds->priv;
struct devlink_region_ops *ops;
struct devlink_region *region;
u64 size;
@@ -753,30 +722,6 @@ out:
return PTR_ERR(region);
}
-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- err = mv88e6xxx_setup_devlink_regions_global(ds, chip);
- if (err)
- return err;
-
- err = mv88e6xxx_setup_devlink_regions_ports(ds, chip);
- if (err)
- mv88e6xxx_teardown_devlink_regions_global(chip);
-
- return err;
-}
-
-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
-
- mv88e6xxx_teardown_devlink_regions_ports(chip);
- mv88e6xxx_teardown_devlink_regions_global(chip);
-}
-
int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.h b/drivers/net/dsa/mv88e6xxx/devlink.h
index 3d72db3dcf95..65ce6a6858b9 100644
--- a/drivers/net/dsa/mv88e6xxx/devlink.h
+++ b/drivers/net/dsa/mv88e6xxx/devlink.h
@@ -12,8 +12,10 @@ int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
-int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds);
-void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_regions_global(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds, int port);
+void mv88e6xxx_teardown_devlink_regions_port(struct dsa_switch *ds, int port);
int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
struct devlink_info_req *req,
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 815b0f681d69..5848112036b0 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -232,6 +232,8 @@ int mv88e6185_g1_set_max_frame_size(struct mv88e6xxx_chip *chip, int mtu)
u16 val;
int err;
+ mtu += ETH_HLEN + ETH_FCS_LEN;
+
err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index f77e2ee64a60..451028c57af8 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -1277,6 +1277,8 @@ int mv88e6165_port_set_jumbo_size(struct mv88e6xxx_chip *chip, int port,
u16 reg;
int err;
+ size += VLAN_ETH_HLEN + ETH_FCS_LEN;
+
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_CTL2, &reg);
if (err)
return err;
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 3656e67af789..a3a9636430d6 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019-2021 NXP Semiconductors
+/* Copyright 2019-2021 NXP
*
* This is an umbrella module for all network switches that are
* register-compatible with Ocelot and that perform I/O to their host CPU
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 5854bab43327..54024b6f9498 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
*/
#ifndef _MSCC_FELIX_H
#define _MSCC_FELIX_H
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index f966a253d1c7..11b42fd812e4 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Copyright 2017 Microsemi Corporation
- * Copyright 2018-2019 NXP Semiconductors
+ * Copyright 2018-2019 NXP
*/
#include <linux/fsl/enetc_mdio.h>
#include <soc/mscc/ocelot_qsys.h>
@@ -1472,9 +1472,10 @@ err_pci_enable:
static void felix_pci_remove(struct pci_dev *pdev)
{
- struct felix *felix;
+ struct felix *felix = pci_get_drvdata(pdev);
- felix = pci_get_drvdata(pdev);
+ if (!felix)
+ return;
dsa_unregister_switch(felix->ds);
@@ -1482,6 +1483,20 @@ static void felix_pci_remove(struct pci_dev *pdev)
kfree(felix);
pci_disable_device(pdev);
+
+ pci_set_drvdata(pdev, NULL);
+}
+
+static void felix_pci_shutdown(struct pci_dev *pdev)
+{
+ struct felix *felix = pci_get_drvdata(pdev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ pci_set_drvdata(pdev, NULL);
}
static struct pci_device_id felix_ids[] = {
@@ -1498,6 +1513,7 @@ static struct pci_driver felix_vsc9959_pci_driver = {
.id_table = felix_ids,
.probe = felix_pci_probe,
.remove = felix_pci_remove,
+ .shutdown = felix_pci_shutdown,
};
module_pci_driver(felix_vsc9959_pci_driver);
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index deae923c8b7a..de1d34a1f1e4 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1245,18 +1245,33 @@ err_alloc_felix:
static int seville_remove(struct platform_device *pdev)
{
- struct felix *felix;
+ struct felix *felix = platform_get_drvdata(pdev);
- felix = platform_get_drvdata(pdev);
+ if (!felix)
+ return 0;
dsa_unregister_switch(felix->ds);
kfree(felix->ds);
kfree(felix);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void seville_shutdown(struct platform_device *pdev)
+{
+ struct felix *felix = platform_get_drvdata(pdev);
+
+ if (!felix)
+ return;
+
+ dsa_switch_shutdown(felix->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct of_device_id seville_of_match[] = {
{ .compatible = "mscc,vsc9953-switch" },
{ },
@@ -1266,6 +1281,7 @@ MODULE_DEVICE_TABLE(of, seville_of_match);
static struct platform_driver seville_vsc9953_driver = {
.probe = seville_probe,
.remove = seville_remove,
+ .shutdown = seville_shutdown,
.driver = {
.name = "mscc_seville",
.of_match_table = of_match_ptr(seville_of_match),
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 563d8a279030..a6bfb6abc51a 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -1083,6 +1083,9 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
unsigned int i;
+ if (!priv)
+ return;
+
for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
struct ar9331_sw_port *port = &priv->port[i];
@@ -1094,6 +1097,20 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
+{
+ struct ar9331_sw_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(&priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id ar9331_sw_of_match[] = {
@@ -1104,6 +1121,7 @@ static const struct of_device_id ar9331_sw_of_match[] = {
static struct mdio_driver ar9331_sw_mdio_driver = {
.probe = ar9331_sw_probe,
.remove = ar9331_sw_remove,
+ .shutdown = ar9331_sw_shutdown,
.mdiodrv.driver = {
.name = AR9331_SW_NAME,
.of_match_table = ar9331_sw_of_match,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index bda5a9bf4f52..a984f06f6f04 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1880,10 +1880,27 @@ qca8k_sw_remove(struct mdio_device *mdiodev)
struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
int i;
+ if (!priv)
+ return;
+
for (i = 0; i < QCA8K_NUM_PORTS; i++)
qca8k_port_set_status(priv, i, 0);
dsa_unregister_switch(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
+{
+ struct qca8k_priv *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
#ifdef CONFIG_PM_SLEEP
@@ -1940,6 +1957,7 @@ static const struct of_device_id qca8k_of_match[] = {
static struct mdio_driver qca8kmdio_driver = {
.probe = qca8k_sw_probe,
.remove = qca8k_sw_remove,
+ .shutdown = qca8k_sw_shutdown,
.mdiodrv.driver = {
.name = "qca8k",
.of_match_table = qca8k_of_match,
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index 8e49d4f85d48..2fcfd917b876 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -368,7 +368,7 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi)
smi->slave_mii_bus->parent = smi->dev;
smi->ds->slave_mii_bus = smi->slave_mii_bus;
- ret = of_mdiobus_register(smi->slave_mii_bus, mdio_np);
+ ret = devm_of_mdiobus_register(smi->dev, smi->slave_mii_bus, mdio_np);
if (ret) {
dev_err(smi->dev, "unable to register MDIO bus %s\n",
smi->slave_mii_bus->id);
@@ -464,16 +464,33 @@ static int realtek_smi_probe(struct platform_device *pdev)
static int realtek_smi_remove(struct platform_device *pdev)
{
- struct realtek_smi *smi = dev_get_drvdata(&pdev->dev);
+ struct realtek_smi *smi = platform_get_drvdata(pdev);
+
+ if (!smi)
+ return 0;
dsa_unregister_switch(smi->ds);
if (smi->slave_mii_bus)
of_node_put(smi->slave_mii_bus->dev.of_node);
gpiod_set_value(smi->reset, 1);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
+static void realtek_smi_shutdown(struct platform_device *pdev)
+{
+ struct realtek_smi *smi = platform_get_drvdata(pdev);
+
+ if (!smi)
+ return;
+
+ dsa_switch_shutdown(smi->ds);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
static const struct of_device_id realtek_smi_of_match[] = {
{
.compatible = "realtek,rtl8366rb",
@@ -495,6 +512,7 @@ static struct platform_driver realtek_smi_driver = {
},
.probe = realtek_smi_probe,
.remove = realtek_smi_remove,
+ .shutdown = realtek_smi_shutdown,
};
module_platform_driver(realtek_smi_driver);
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 387a1f2f161c..5bbf1707f2af 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#include <linux/packing.h>
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
index 05c7f4ca3b1a..0569ff066634 100644
--- a/drivers/net/dsa/sja1105/sja1105_devlink.c
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
- * Copyright 2020 NXP Semiconductors
+ * Copyright 2020 NXP
*/
#include "sja1105.h"
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 6c10ffa968ce..72b9b39b0989 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
*/
#include "sja1105.h"
#include "sja1105_vl.h"
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 2f8cc6686c38..7c0db80eff00 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -3335,13 +3335,29 @@ static int sja1105_probe(struct spi_device *spi)
static int sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
- struct dsa_switch *ds = priv->ds;
- dsa_unregister_switch(ds);
+ if (!priv)
+ return 0;
+
+ dsa_unregister_switch(priv->ds);
+
+ spi_set_drvdata(spi, NULL);
return 0;
}
+static void sja1105_shutdown(struct spi_device *spi)
+{
+ struct sja1105_private *priv = spi_get_drvdata(spi);
+
+ if (!priv)
+ return;
+
+ dsa_switch_shutdown(priv->ds);
+
+ spi_set_drvdata(spi, NULL);
+}
+
static const struct of_device_id sja1105_dt_ids[] = {
{ .compatible = "nxp,sja1105e", .data = &sja1105e_info },
{ .compatible = "nxp,sja1105t", .data = &sja1105t_info },
@@ -3365,6 +3381,7 @@ static struct spi_driver sja1105_driver = {
},
.probe = sja1105_probe,
.remove = sja1105_remove,
+ .shutdown = sja1105_shutdown,
};
module_spi_driver(sja1105_driver);
diff --git a/drivers/net/dsa/sja1105/sja1105_mdio.c b/drivers/net/dsa/sja1105/sja1105_mdio.c
index 705d3900e43a..215dd17ca790 100644
--- a/drivers/net/dsa/sja1105/sja1105_mdio.c
+++ b/drivers/net/dsa/sja1105/sja1105_mdio.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2021, NXP Semiconductors
+/* Copyright 2021 NXP
*/
#include <linux/pcs/pcs-xpcs.h>
#include <linux/of_mdio.h>
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index d60a530d0272..d3c9ad6d39d4 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.c b/drivers/net/dsa/sja1105/sja1105_static_config.c
index 7a422ef4deb6..baba204ad62f 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#include "sja1105_static_config.h"
diff --git a/drivers/net/dsa/sja1105/sja1105_static_config.h b/drivers/net/dsa/sja1105/sja1105_static_config.h
index bce0f5c03d0b..6a372d5f22ae 100644
--- a/drivers/net/dsa/sja1105/sja1105_static_config.h
+++ b/drivers/net/dsa/sja1105/sja1105_static_config.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _SJA1105_STATIC_CONFIG_H
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index ec7b65daec20..6802f4057cc0 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
*/
#include <net/tc_act/tc_gate.h>
#include <linux/dsa/8021q.h>
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.h b/drivers/net/dsa/sja1105/sja1105_vl.h
index 173d78963fed..51fba0dce91a 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.h
+++ b/drivers/net/dsa/sja1105/sja1105_vl.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright 2020, NXP Semiconductors
+/* Copyright 2020 NXP
*/
#ifndef _SJA1105_VL_H
#define _SJA1105_VL_H
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index 19ce4aa0973b..a4b1447ff055 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -1225,6 +1225,12 @@ int vsc73xx_remove(struct vsc73xx *vsc)
}
EXPORT_SYMBOL(vsc73xx_remove);
+void vsc73xx_shutdown(struct vsc73xx *vsc)
+{
+ dsa_switch_shutdown(vsc->ds);
+}
+EXPORT_SYMBOL(vsc73xx_shutdown);
+
MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
MODULE_DESCRIPTION("Vitesse VSC7385/7388/7395/7398 driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index 2a57f337b2a2..fe4b154a0a57 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -116,7 +116,26 @@ static int vsc73xx_platform_remove(struct platform_device *pdev)
{
struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
- return vsc73xx_remove(&vsc_platform->vsc);
+ if (!vsc_platform)
+ return 0;
+
+ vsc73xx_remove(&vsc_platform->vsc);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static void vsc73xx_platform_shutdown(struct platform_device *pdev)
+{
+ struct vsc73xx_platform *vsc_platform = platform_get_drvdata(pdev);
+
+ if (!vsc_platform)
+ return;
+
+ vsc73xx_shutdown(&vsc_platform->vsc);
+
+ platform_set_drvdata(pdev, NULL);
}
static const struct vsc73xx_ops vsc73xx_platform_ops = {
@@ -144,6 +163,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static struct platform_driver vsc73xx_platform_driver = {
.probe = vsc73xx_platform_probe,
.remove = vsc73xx_platform_remove,
+ .shutdown = vsc73xx_platform_shutdown,
.driver = {
.name = "vsc73xx-platform",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 81eca4a5781d..645398901e05 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -163,7 +163,26 @@ static int vsc73xx_spi_remove(struct spi_device *spi)
{
struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
- return vsc73xx_remove(&vsc_spi->vsc);
+ if (!vsc_spi)
+ return 0;
+
+ vsc73xx_remove(&vsc_spi->vsc);
+
+ spi_set_drvdata(spi, NULL);
+
+ return 0;
+}
+
+static void vsc73xx_spi_shutdown(struct spi_device *spi)
+{
+ struct vsc73xx_spi *vsc_spi = spi_get_drvdata(spi);
+
+ if (!vsc_spi)
+ return;
+
+ vsc73xx_shutdown(&vsc_spi->vsc);
+
+ spi_set_drvdata(spi, NULL);
}
static const struct vsc73xx_ops vsc73xx_spi_ops = {
@@ -191,6 +210,7 @@ MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
static struct spi_driver vsc73xx_spi_driver = {
.probe = vsc73xx_spi_probe,
.remove = vsc73xx_spi_remove,
+ .shutdown = vsc73xx_spi_shutdown,
.driver = {
.name = "vsc73xx-spi",
.of_match_table = vsc73xx_of_match,
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 7478f8d4e0a9..30b951504e65 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -27,3 +27,4 @@ struct vsc73xx_ops {
int vsc73xx_is_addr_valid(u8 block, u8 subblock);
int vsc73xx_probe(struct vsc73xx *vsc);
int vsc73xx_remove(struct vsc73xx *vsc);
+void vsc73xx_shutdown(struct vsc73xx *vsc);
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 130abb0f1438..469420941054 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -822,6 +822,12 @@ void xrs700x_switch_remove(struct xrs700x *priv)
}
EXPORT_SYMBOL(xrs700x_switch_remove);
+void xrs700x_switch_shutdown(struct xrs700x *priv)
+{
+ dsa_switch_shutdown(priv->ds);
+}
+EXPORT_SYMBOL(xrs700x_switch_shutdown);
+
MODULE_AUTHOR("George McCollister <george.mccollister@gmail.com>");
MODULE_DESCRIPTION("Arrow SpeedChips XRS700x DSA driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/xrs700x/xrs700x.h b/drivers/net/dsa/xrs700x/xrs700x.h
index ff62cf61b091..4d58257471d2 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.h
+++ b/drivers/net/dsa/xrs700x/xrs700x.h
@@ -40,3 +40,4 @@ struct xrs700x {
struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv);
int xrs700x_switch_register(struct xrs700x *priv);
void xrs700x_switch_remove(struct xrs700x *priv);
+void xrs700x_switch_shutdown(struct xrs700x *priv);
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 489d9385b4f0..6deae388a0d6 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -109,11 +109,28 @@ static int xrs700x_i2c_remove(struct i2c_client *i2c)
{
struct xrs700x *priv = i2c_get_clientdata(i2c);
+ if (!priv)
+ return 0;
+
xrs700x_switch_remove(priv);
+ i2c_set_clientdata(i2c, NULL);
+
return 0;
}
+static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
+{
+ struct xrs700x *priv = i2c_get_clientdata(i2c);
+
+ if (!priv)
+ return;
+
+ xrs700x_switch_shutdown(priv);
+
+ i2c_set_clientdata(i2c, NULL);
+}
+
static const struct i2c_device_id xrs700x_i2c_id[] = {
{ "xrs700x-switch", 0 },
{},
@@ -137,6 +154,7 @@ static struct i2c_driver xrs700x_i2c_driver = {
},
.probe = xrs700x_i2c_probe,
.remove = xrs700x_i2c_remove,
+ .shutdown = xrs700x_i2c_shutdown,
.id_table = xrs700x_i2c_id,
};
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index 44f58bee04a4..d01cf1073d49 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -136,7 +136,24 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
{
struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+ if (!priv)
+ return;
+
xrs700x_switch_remove(priv);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
+}
+
+static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
+{
+ struct xrs700x *priv = dev_get_drvdata(&mdiodev->dev);
+
+ if (!priv)
+ return;
+
+ xrs700x_switch_shutdown(priv);
+
+ dev_set_drvdata(&mdiodev->dev, NULL);
}
static const struct of_device_id __maybe_unused xrs700x_mdio_dt_ids[] = {
@@ -155,6 +172,7 @@ static struct mdio_driver xrs700x_mdio_driver = {
},
.probe = xrs700x_mdio_probe,
.remove = xrs700x_mdio_remove,
+ .shutdown = xrs700x_mdio_shutdown,
};
mdio_module_driver(xrs700x_mdio_driver);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index dee9ff74d6d6..d4b1976ee69b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -413,13 +413,13 @@ static int atl_resume_common(struct device *dev, bool deep)
if (deep) {
/* Reinitialize Nic/Vecs objects */
aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
+ }
+ if (netif_running(nic->ndev)) {
ret = aq_nic_init(nic);
if (ret)
goto err_exit;
- }
- if (netif_running(nic->ndev)) {
ret = aq_nic_start(nic);
if (ret)
goto err_exit;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 85fa0ab7201c..9513cfb5ba58 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -129,6 +129,8 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
+ if (err == -EPROBE_DEFER)
+ return err;
/* If no MAC address assigned via device tree, check SPROM */
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 4ab5bf64d353..df8ff839cc62 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -192,6 +192,9 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->dma_dev = &pdev->dev;
ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
if (ret)
dev_warn(&pdev->dev,
"MAC address not present in device tree\n");
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 037767b370d5..62f84cc91e4d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -391,7 +391,7 @@ static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
* netif_tx_queue_stopped().
*/
smp_mb();
- if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
+ if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
netif_tx_wake_queue(txq);
return false;
}
@@ -764,7 +764,7 @@ next_tx_int:
smp_mb();
if (unlikely(netif_tx_queue_stopped(txq)) &&
- bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
+ bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
netif_tx_wake_queue(txq);
}
@@ -2416,7 +2416,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
+ if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
rx_pkts = budget;
raw_cons = NEXT_RAW_CMP(raw_cons);
if (budget)
@@ -3640,7 +3640,7 @@ static int bnxt_init_tx_rings(struct bnxt *bp)
u16 i;
bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
- MAX_SKB_FRAGS + 1);
+ BNXT_MIN_TX_DESC_CNT);
for (i = 0; i < bp->tx_nr_rings; i++) {
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index ec046e7a2484..19fe6478e9b4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -629,6 +629,11 @@ struct nqe_cn {
#define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1)
#define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1)
+/* Minimum TX BDs for a TX packet with MAX_SKB_FRAGS + 1. We need one extra
+ * BD because the first TX BD is always a long BD.
+ */
+#define BNXT_MIN_TX_DESC_CNT (MAX_SKB_FRAGS + 2)
+
#define RX_RING(x) (((x) & ~(RX_DESC_CNT - 1)) >> (BNXT_PAGE_SHIFT - 4))
#define RX_IDX(x) ((x) & (RX_DESC_CNT - 1))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index b056e3c29bbd..7260910e75fb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -798,7 +798,7 @@ static int bnxt_set_ringparam(struct net_device *dev,
if ((ering->rx_pending > BNXT_MAX_RX_DESC_CNT) ||
(ering->tx_pending > BNXT_MAX_TX_DESC_CNT) ||
- (ering->tx_pending <= MAX_SKB_FRAGS))
+ (ering->tx_pending < BNXT_MIN_TX_DESC_CNT))
return -EINVAL;
if (netif_running(dev))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 3ca93adb9662..042327b9981f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -419,7 +419,7 @@ static void enetc_rx_dim_work(struct work_struct *w)
static void enetc_rx_net_dim(struct enetc_int_vector *v)
{
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
v->comp_cnt++;
@@ -1879,7 +1879,6 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
- cpumask_t cpu_mask;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
@@ -1908,9 +1907,7 @@ static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
}
- cpumask_clear(&cpu_mask);
- cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
- irq_set_affinity_hint(irq, &cpu_mask);
+ irq_set_affinity_hint(irq, get_cpu_mask(i % num_online_cpus()));
}
return 0;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
index ee1468e3eaa3..91f02c505028 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
*
* The Integrated Endpoint Register Block (IERB) is configured by pre-boot
* software and is supposed to be to ENETC what a NVRAM is to a 'real' PCIe
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
index b3b774e0998a..c2ce47c4be9f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ierb.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
-/* Copyright 2021 NXP Semiconductors */
+/* Copyright 2021 NXP */
#include <linux/pci.h>
#include <linux/platform_device.h>
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 60d94e0a07d6..4c977dfc44f0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -541,8 +541,7 @@ static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
if (phy_interface_mode_is_rgmii(phy_mode)) {
val = enetc_port_rd(hw, ENETC_PM0_IF_MODE);
- val &= ~ENETC_PM0_IFM_EN_AUTO;
- val &= ENETC_PM0_IFM_IFMODE_MASK;
+ val &= ~(ENETC_PM0_IFM_EN_AUTO | ENETC_PM0_IFM_IFMODE_MASK);
val |= ENETC_PM0_IFM_IFMODE_GMII | ENETC_PM0_IFM_RG;
enetc_port_wr(hw, ENETC_PM0_IF_MODE, val);
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 80bd5c629fa0..ec87b370bba1 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -4176,5 +4176,4 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
-MODULE_ALIAS("platform:"DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 546a60530384..8ba21d6dc220 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -752,7 +752,6 @@ struct hnae3_tc_info {
u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
u16 tqp_count[HNAE3_MAX_TC];
u16 tqp_offset[HNAE3_MAX_TC];
- unsigned long tc_en; /* bitmap of TC enabled */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index adc54a726661..468b8f07bf47 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -623,13 +623,9 @@ static int hns3_nic_set_real_num_queue(struct net_device *netdev)
return ret;
}
- for (i = 0; i < HNAE3_MAX_TC; i++) {
- if (!test_bit(i, &tc_info->tc_en))
- continue;
-
+ for (i = 0; i < tc_info->num_tc; i++)
netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i],
tc_info->tqp_offset[i]);
- }
}
ret = netif_set_real_num_tx_queues(netdev, queue_size);
@@ -779,6 +775,11 @@ static int hns3_nic_net_open(struct net_device *netdev)
if (hns3_nic_resetting(netdev))
return -EBUSY;
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
+ netdev_warn(netdev, "net open repeatedly!\n");
+ return 0;
+ }
+
netif_carrier_off(netdev);
ret = hns3_nic_set_real_num_queue(netdev);
@@ -4865,12 +4866,9 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
struct hnae3_tc_info *tc_info = &kinfo->tc_info;
int i;
- for (i = 0; i < HNAE3_MAX_TC; i++) {
+ for (i = 0; i < tc_info->num_tc; i++) {
int j;
- if (!test_bit(i, &tc_info->tc_en))
- continue;
-
for (j = 0; j < tc_info->tqp_count[i]; j++) {
struct hnae3_queue *q;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 7ea511d59e91..5ebd96f6833d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -334,7 +334,8 @@ static void hns3_selftest_prepare(struct net_device *ndev,
#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Disable the vlan filter for selftest does not support it */
- if (h->ae_algo->ops->enable_vlan_filter)
+ if (h->ae_algo->ops->enable_vlan_filter &&
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
h->ae_algo->ops->enable_vlan_filter(h, false);
#endif
@@ -359,7 +360,8 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
h->ae_algo->ops->halt_autoneg(h, false);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
- if (h->ae_algo->ops->enable_vlan_filter)
+ if (h->ae_algo->ops->enable_vlan_filter &&
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
h->ae_algo->ops->enable_vlan_filter(h, true);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index ac9b69513332..9c2eeaa82294 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -467,7 +467,7 @@ err_csq:
return ret;
}
-static int hclge_firmware_compat_config(struct hclge_dev *hdev)
+static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
{
struct hclge_firmware_compat_cmd *req;
struct hclge_desc desc;
@@ -475,13 +475,16 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_IMP_COMPAT_CFG, false);
- req = (struct hclge_firmware_compat_cmd *)desc.data;
+ if (en) {
+ req = (struct hclge_firmware_compat_cmd *)desc.data;
- hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
- hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
- if (hnae3_dev_phy_imp_supported(hdev))
- hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
- req->compat = cpu_to_le32(compat);
+ hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
+ if (hnae3_dev_phy_imp_supported(hdev))
+ hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
+
+ req->compat = cpu_to_le32(compat);
+ }
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
@@ -538,7 +541,7 @@ int hclge_cmd_init(struct hclge_dev *hdev)
/* ask the firmware to enable some features, driver can work without
* it.
*/
- ret = hclge_firmware_compat_config(hdev);
+ ret = hclge_firmware_compat_config(hdev, true);
if (ret)
dev_warn(&hdev->pdev->dev,
"Firmware compatible features not enabled(%d).\n",
@@ -568,6 +571,8 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
void hclge_cmd_uninit(struct hclge_dev *hdev)
{
+ hclge_firmware_compat_config(hdev, false);
+
set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
/* wait to ensure that the firmware completes the possible left
* over commands.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 4a619e5d3f35..307c9e830510 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -247,6 +247,10 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
}
hclge_tm_schd_info_update(hdev, num_tc);
+ if (num_tc > 1)
+ hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+ else
+ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -306,8 +310,7 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
u8 i, j, pfc_map, *prio_tc;
int ret;
- if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
if (pfc->pfc_en == hdev->tm_info.pfc_en)
@@ -441,8 +444,6 @@ static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
struct tc_mqprio_qopt_offload *mqprio_qopt)
{
- int i;
-
memset(tc_info, 0, sizeof(*tc_info));
tc_info->num_tc = mqprio_qopt->qopt.num_tc;
memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
@@ -451,9 +452,6 @@ static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
sizeof_field(struct hnae3_tc_info, tqp_count));
memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
sizeof_field(struct hnae3_tc_info, tqp_offset));
-
- for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
- set_bit(tc_info->prio_tc[i], &tc_info->tc_en);
}
static int hclge_config_tc(struct hclge_dev *hdev,
@@ -519,12 +517,17 @@ static int hclge_setup_tc(struct hnae3_handle *h,
return hclge_notify_init_up(hdev);
err_out:
- /* roll-back */
- memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
- if (hclge_config_tc(hdev, &kinfo->tc_info))
- dev_err(&hdev->pdev->dev,
- "failed to roll back tc configuration\n");
-
+ if (!tc) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to destroy mqprio, will active after reset, ret = %d\n",
+ ret);
+ } else {
+ /* roll-back */
+ memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
+ if (hclge_config_tc(hdev, &kinfo->tc_info))
+ dev_err(&hdev->pdev->dev,
+ "failed to roll back tc configuration\n");
+ }
hclge_notify_init_up(hdev);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 87d96f82c318..32f62cd2dd99 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -719,9 +719,9 @@ static void hclge_dbg_fill_shaper_content(struct hclge_tm_shaper_para *para,
sprintf(result[(*index)++], "%6u", para->rate);
}
-static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+static int __hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *data_str,
+ char *buf, int len)
{
- char data_str[ARRAY_SIZE(tm_pg_items)][HCLGE_DBG_DATA_STR_LEN];
struct hclge_tm_shaper_para c_shaper_para, p_shaper_para;
char *result[ARRAY_SIZE(tm_pg_items)], *sch_mode_str;
u8 pg_id, sch_mode, weight, pri_bit_map, i, j;
@@ -729,8 +729,10 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
int pos = 0;
int ret;
- for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++)
- result[i] = &data_str[i][0];
+ for (i = 0; i < ARRAY_SIZE(tm_pg_items); i++) {
+ result[i] = data_str;
+ data_str += HCLGE_DBG_DATA_STR_LEN;
+ }
hclge_dbg_fill_content(content, sizeof(content), tm_pg_items,
NULL, ARRAY_SIZE(tm_pg_items));
@@ -781,6 +783,24 @@ static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
return 0;
}
+static int hclge_dbg_dump_tm_pg(struct hclge_dev *hdev, char *buf, int len)
+{
+ char *data_str;
+ int ret;
+
+ data_str = kcalloc(ARRAY_SIZE(tm_pg_items),
+ HCLGE_DBG_DATA_STR_LEN, GFP_KERNEL);
+
+ if (!data_str)
+ return -ENOMEM;
+
+ ret = __hclge_dbg_dump_tm_pg(hdev, data_str, buf, len);
+
+ kfree(data_str);
+
+ return ret;
+}
+
static int hclge_dbg_dump_tm_port(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_tm_shaper_para shaper_para;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 718c16d686fa..bb9b026ae88e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -2445,12 +2445,12 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
return;
}
- dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vport(%u), queue_id(%u)\n",
vf_id, q_id);
if (vf_id) {
if (vf_id >= hdev->num_alloc_vport) {
- dev_err(dev, "invalid vf id(%u)\n", vf_id);
+ dev_err(dev, "invalid vport(%u)\n", vf_id);
return;
}
@@ -2463,8 +2463,8 @@ static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
if (ret)
- dev_err(dev, "inform reset to vf(%u) failed %d!\n",
- hdev->vport->vport_id, ret);
+ dev_err(dev, "inform reset to vport(%u) failed %d!\n",
+ vf_id, ret);
} else {
set_bit(HNAE3_FUNC_RESET, reset_requests);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index f1e46ba799f9..f5b8d1fee0f1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3661,7 +3661,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
if (ret) {
dev_err(&hdev->pdev->dev,
"set vf(%u) rst failed %d!\n",
- vport->vport_id, ret);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
return ret;
}
@@ -3676,7 +3677,8 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
if (ret)
dev_warn(&hdev->pdev->dev,
"inform reset to vf(%u) failed %d!\n",
- vport->vport_id, ret);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM,
+ ret);
}
return 0;
@@ -4741,6 +4743,24 @@ static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
return 0;
}
+static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
+ u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = vport->rss_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
@@ -4750,30 +4770,27 @@ static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
u8 hash_algo;
int ret, i;
+ ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
+ return ret;
+ }
+
/* Set the RSS Hash Key if specififed by the user */
if (key) {
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
- break;
- case ETH_RSS_HASH_XOR:
- hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
- break;
- case ETH_RSS_HASH_NO_CHANGE:
- hash_algo = vport->rss_algo;
- break;
- default:
- return -EINVAL;
- }
-
ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
if (ret)
return ret;
/* Update the shadow RSS key with user specified qids */
memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
- vport->rss_algo = hash_algo;
+ } else {
+ ret = hclge_set_rss_algo_key(hdev, hash_algo,
+ vport->rss_hash_key);
+ if (ret)
+ return ret;
}
+ vport->rss_algo = hash_algo;
/* Update the shadow RSS table with user specified qids */
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
@@ -6627,10 +6644,13 @@ static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
u16 tqps;
+ /* To keep consistent with user's configuration, minus 1 when
+ * printing 'vf', because vf id from ethtool is added 1 for vf.
+ */
if (vf > hdev->num_req_vfs) {
dev_err(&hdev->pdev->dev,
- "Error: vf id (%u) > max vf num (%u)\n",
- vf, hdev->num_req_vfs);
+ "Error: vf id (%u) should be less than %u\n",
+ vf - 1, hdev->num_req_vfs);
return -EINVAL;
}
@@ -8688,15 +8708,8 @@ int hclge_add_uc_addr_common(struct hclge_vport *vport,
}
/* check if we just hit the duplicate */
- if (!ret) {
- dev_warn(&hdev->pdev->dev, "VF %u mac(%pM) exists\n",
- vport->vport_id, addr);
- return 0;
- }
-
- dev_err(&hdev->pdev->dev,
- "PF failed to add unicast entry(%pM) in the MAC table\n",
- addr);
+ if (!ret)
+ return -EEXIST;
return ret;
}
@@ -8848,7 +8861,13 @@ static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
} else {
set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
&vport->state);
- break;
+
+ /* If one unicast mac address is existing in hardware,
+ * we need to try whether other unicast mac addresses
+ * are new addresses that can be added.
+ */
+ if (ret != -EEXIST)
+ break;
}
}
}
@@ -9797,6 +9816,9 @@ static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
if (is_kill && !vlan_id)
return 0;
+ if (vlan_id >= VLAN_N_VID)
+ return -EINVAL;
+
ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -10703,7 +10725,8 @@ static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
return 0;
}
-static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
+static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
+ u8 *reset_status)
{
struct hclge_reset_tqp_queue_cmd *req;
struct hclge_desc desc;
@@ -10721,7 +10744,9 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
return ret;
}
- return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+ *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+
+ return 0;
}
u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
@@ -10740,7 +10765,7 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
u16 reset_try_times = 0;
- int reset_status;
+ u8 reset_status;
u16 queue_gid;
int ret;
u16 i;
@@ -10756,7 +10781,11 @@ static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
}
while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
- reset_status = hclge_get_reset_status(hdev, queue_gid);
+ ret = hclge_get_reset_status(hdev, queue_gid,
+ &reset_status);
+ if (ret)
+ return ret;
+
if (reset_status)
break;
@@ -11449,11 +11478,11 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev)
struct hclge_vport *vport = &hdev->vport[i];
int ret;
- /* Send cmd to clear VF's FUNC_RST_ING */
+ /* Send cmd to clear vport's FUNC_RST_ING */
ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
if (ret)
dev_warn(&hdev->pdev->dev,
- "clear vf(%u) rst failed %d!\n",
+ "clear vport(%u) rst failed %d!\n",
vport->vport_id, ret);
}
}
@@ -12767,8 +12796,12 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
continue;
if (vport->vf_info.trusted) {
- uc_en = vport->vf_info.request_uc_en > 0;
- mc_en = vport->vf_info.request_mc_en > 0;
+ uc_en = vport->vf_info.request_uc_en > 0 ||
+ vport->overflow_promisc_flags &
+ HNAE3_OVERFLOW_UPE;
+ mc_en = vport->vf_info.request_mc_en > 0 ||
+ vport->overflow_promisc_flags &
+ HNAE3_OVERFLOW_MPE;
}
bc_en = vport->vf_info.request_bc_en > 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 2ce5302c5956..65d78ee4d65a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -566,7 +566,7 @@ static int hclge_reset_vf(struct hclge_vport *vport)
struct hclge_dev *hdev = vport->back;
dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
- vport->vport_id);
+ vport->vport_id - HCLGE_VF_VPORT_START_NUM);
return hclge_func_reset_cmd(hdev, vport->vport_id);
}
@@ -590,9 +590,17 @@ static void hclge_get_queue_id_in_pf(struct hclge_vport *vport,
struct hclge_mbx_vf_to_pf_cmd *mbx_req,
struct hclge_respond_to_vf_msg *resp_msg)
{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
u16 queue_id, qid_in_pf;
memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
+ queue_id, mbx_req->mbx_src_vfid);
+ return;
+ }
+
qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
resp_msg->len = sizeof(qid_in_pf);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 78d5bf1ea561..f314dbd3ce11 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -581,7 +581,7 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
dev_err(&hdev->pdev->dev,
- "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
vport->vport_id, shap_cfg_cmd->qs_id,
max_tx_rate, ret);
return ret;
@@ -687,12 +687,10 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
for (i = 0; i < HNAE3_MAX_TC; i++) {
if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
- set_bit(i, &kinfo->tc_info.tc_en);
kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
} else {
/* Set to default queue if TC is disable */
- clear_bit(i, &kinfo->tc_info.tc_en);
kinfo->tc_info.tqp_offset[i] = 0;
kinfo->tc_info.tqp_count[i] = 1;
}
@@ -729,14 +727,6 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
hdev->tm_info.prio_tc[i] =
(i >= hdev->tm_info.num_tc) ? 0 : i;
-
- /* DCB is enabled if we have more than 1 TC or pfc_en is
- * non-zero.
- */
- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
}
static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
@@ -767,10 +757,10 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
{
- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
dev_warn(&hdev->pdev->dev,
- "DCB is disable, but last mode is FC_PFC\n");
+ "Only 1 tc used, but last mode is FC_PFC\n");
hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
@@ -796,7 +786,7 @@ static void hclge_update_fc_mode(struct hclge_dev *hdev)
}
}
-static void hclge_pfc_info_init(struct hclge_dev *hdev)
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
{
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
hclge_update_fc_mode(hdev);
@@ -812,7 +802,7 @@ static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
hclge_tm_vport_info_update(hdev);
- hclge_pfc_info_init(hdev);
+ hclge_tm_pfc_info_update(hdev);
}
static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
@@ -1558,19 +1548,6 @@ void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
hclge_tm_schd_info_init(hdev);
}
-void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
-{
- /* DCB is enabled if we have more than 1 TC or pfc_en is
- * non-zero.
- */
- if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
-
- hclge_pfc_info_init(hdev);
-}
-
int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
{
int ret;
@@ -1616,7 +1593,7 @@ int hclge_tm_vport_map_update(struct hclge_dev *hdev)
if (ret)
return ret;
- if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
return 0;
return hclge_tm_bp_setup(hdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index a69e892277b3..5fdac8685f95 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -816,40 +816,56 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
return 0;
}
+static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
+ u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = hdev->rss_cfg.hash_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 hash_algo;
int ret, i;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
+ if (ret)
+ return ret;
+
/* Set the RSS Hash Key if specififed by the user */
if (key) {
- switch (hfunc) {
- case ETH_RSS_HASH_TOP:
- rss_cfg->hash_algo =
- HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
- break;
- case ETH_RSS_HASH_XOR:
- rss_cfg->hash_algo =
- HCLGEVF_RSS_HASH_ALGO_SIMPLE;
- break;
- case ETH_RSS_HASH_NO_CHANGE:
- break;
- default:
- return -EINVAL;
- }
-
- ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
- key);
- if (ret)
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "invalid hfunc type %u\n", hfunc);
return ret;
+ }
/* Update the shadow RSS key with user specified qids */
memcpy(rss_cfg->rss_hash_key, key,
HCLGEVF_RSS_KEY_SIZE);
+ } else {
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
}
+ rss_cfg->hash_algo = hash_algo;
}
/* update the shadow RSS table with user specified qids */
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 3e54017a2a5b..07fdab58001d 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -354,7 +354,7 @@ static int hns_mdio_reset(struct mii_bus *bus)
if (dev_of_node(bus->parent)) {
if (!mdio_dev->subctrl_vbase) {
- dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+ dev_err(&bus->dev, "mdio sys ctl reg has not mapped\n");
return -ENODEV;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a4579b340120..6aa6ff89a765 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -4708,14 +4708,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
return 0;
}
- if (adapter->failover_pending) {
- adapter->init_done_rc = -EAGAIN;
- netdev_dbg(netdev, "Failover pending, ignoring login response\n");
- complete(&adapter->init_done);
- /* login response buffer will be released on reset */
- return 0;
- }
-
netdev->mtu = adapter->req_mtu - ETH_HLEN;
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index b0b6f90deb7d..ed8ea63bb172 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -335,6 +335,7 @@ config IGC
tristate "Intel(R) Ethernet Controller I225-LM/I225-V support"
default n
depends on PCI
+ depends on PTP_1588_CLOCK_OPTIONAL
help
This driver supports Intel(R) Ethernet Controller I225-LM/I225-V
family of adapters.
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 373eb027b925..09ae1939e6db 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2437,11 +2437,15 @@ static void e100_get_drvinfo(struct net_device *netdev,
sizeof(info->bus_info));
}
-#define E100_PHY_REGS 0x1C
+#define E100_PHY_REGS 0x1D
static int e100_get_regs_len(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
- return 1 + E100_PHY_REGS + sizeof(nic->mem->dump_buf);
+
+ /* We know the number of registers, and the size of the dump buffer.
+ * Calculate the total size in bytes.
+ */
+ return (1 + E100_PHY_REGS) * sizeof(u32) + sizeof(nic->mem->dump_buf);
}
static void e100_get_regs(struct net_device *netdev,
@@ -2455,14 +2459,18 @@ static void e100_get_regs(struct net_device *netdev,
buff[0] = ioread8(&nic->csr->scb.cmd_hi) << 24 |
ioread8(&nic->csr->scb.cmd_lo) << 16 |
ioread16(&nic->csr->scb.status);
- for (i = E100_PHY_REGS; i >= 0; i--)
- buff[1 + E100_PHY_REGS - i] =
- mdio_read(netdev, nic->mii.phy_id, i);
+ for (i = 0; i < E100_PHY_REGS; i++)
+ /* Note that we read the registers in reverse order. This
+ * ordering is the ABI apparently used by ethtool and other
+ * applications.
+ */
+ buff[1 + i] = mdio_read(netdev, nic->mii.phy_id,
+ E100_PHY_REGS - 1 - i);
memset(nic->mem->dump_buf, 0, sizeof(nic->mem->dump_buf));
e100_exec_cb(nic, NULL, e100_dump);
msleep(10);
- memcpy(&buff[2 + E100_PHY_REGS], nic->mem->dump_buf,
- sizeof(nic->mem->dump_buf));
+ memcpy(&buff[1 + E100_PHY_REGS], nic->mem->dump_buf,
+ sizeof(nic->mem->dump_buf));
}
static void e100_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index fc26e4ddeb0d..beda8e0ef7d4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3208,7 +3208,7 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter)
max_combined = ixgbe_max_rss_indices(adapter);
}
- return max_combined;
+ return min_t(int, max_combined, num_online_cpus());
}
static void ixgbe_get_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 24e06ba6f5e9..13c4782b920a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10112,6 +10112,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct bpf_prog *old_prog;
bool need_reset;
+ int num_queues;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
return -EINVAL;
@@ -10161,11 +10162,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
/* Kick start the NAPI context if there is an AF_XDP socket open
* on that queue id. This so that receiving will start.
*/
- if (need_reset && prog)
- for (i = 0; i < adapter->num_rx_queues; i++)
+ if (need_reset && prog) {
+ num_queues = min_t(int, adapter->num_rx_queues,
+ adapter->num_xdp_queues);
+ for (i = 0; i < num_queues; i++)
if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index b5f68f66d42a..7bb1f20002b5 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -186,6 +186,9 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
int hash;
int i;
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
+ return -EEXIST;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
struct flow_match_meta match;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index a2f61a87cef8..8af7f2827322 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -372,6 +372,9 @@ mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
int nhoff = skb_network_offset(skb);
int ret = 0;
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
if (skb->protocol != htons(ETH_P_IP))
return -EPROTONOSUPPORT;
@@ -1269,7 +1272,6 @@ static void mlx4_en_do_set_rx_mode(struct work_struct *work)
if (!netif_carrier_ok(dev)) {
if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
if (priv->port_state.link_state) {
- priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
netif_carrier_on(dev);
en_dbg(LINK, priv, "Link Up\n");
}
@@ -1557,26 +1559,36 @@ static void mlx4_en_service_task(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
-static void mlx4_en_linkstate(struct work_struct *work)
+static void mlx4_en_linkstate(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_port_state *port_state = &priv->port_state;
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+ bool up;
+
+ if (mlx4_en_QUERY_PORT(mdev, priv->port))
+ port_state->link_state = MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN;
+
+ up = port_state->link_state == MLX4_PORT_STATE_DEV_EVENT_PORT_UP;
+ if (up == netif_carrier_ok(dev))
+ netif_carrier_event(dev);
+ if (!up) {
+ en_info(priv, "Link Down\n");
+ netif_carrier_off(dev);
+ } else {
+ en_info(priv, "Link Up\n");
+ netif_carrier_on(dev);
+ }
+}
+
+static void mlx4_en_linkstate_work(struct work_struct *work)
{
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
linkstate_task);
struct mlx4_en_dev *mdev = priv->mdev;
- int linkstate = priv->link_state;
mutex_lock(&mdev->state_lock);
- /* If observable port state changed set carrier state and
- * report to system log */
- if (priv->last_link_state != linkstate) {
- if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
- en_info(priv, "Link Down\n");
- netif_carrier_off(priv->dev);
- } else {
- en_info(priv, "Link Up\n");
- netif_carrier_on(priv->dev);
- }
- }
- priv->last_link_state = linkstate;
+ mlx4_en_linkstate(priv);
mutex_unlock(&mdev->state_lock);
}
@@ -2079,9 +2091,11 @@ static int mlx4_en_open(struct net_device *dev)
mlx4_en_clear_stats(dev);
err = mlx4_en_start_port(dev);
- if (err)
+ if (err) {
en_err(priv, "Failed starting port:%d\n", priv->port);
-
+ goto out;
+ }
+ mlx4_en_linkstate(priv);
out:
mutex_unlock(&mdev->state_lock);
return err;
@@ -3168,7 +3182,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
INIT_WORK(&priv->restart_task, mlx4_en_restart);
- INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate_work);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f3d1a20201ef..6bf558c5ec10 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -552,7 +552,6 @@ struct mlx4_en_priv {
struct mlx4_hwq_resources res;
int link_state;
- int last_link_state;
bool port_up;
int port;
int registered;
diff --git a/drivers/net/ethernet/micrel/Makefile b/drivers/net/ethernet/micrel/Makefile
index 5cc00d22c708..6ecc4eb30e74 100644
--- a/drivers/net/ethernet/micrel/Makefile
+++ b/drivers/net/ethernet/micrel/Makefile
@@ -4,8 +4,6 @@
#
obj-$(CONFIG_KS8842) += ks8842.o
-obj-$(CONFIG_KS8851) += ks8851.o
-ks8851-objs = ks8851_common.o ks8851_spi.o
-obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
-ks8851_mll-objs = ks8851_common.o ks8851_par.o
+obj-$(CONFIG_KS8851) += ks8851_common.o ks8851_spi.o
+obj-$(CONFIG_KS8851_MLL) += ks8851_common.o ks8851_par.o
obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index 3f69bb59ba49..a6db1a8156e1 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -1057,6 +1057,7 @@ int ks8851_suspend(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(ks8851_suspend);
int ks8851_resume(struct device *dev)
{
@@ -1070,6 +1071,7 @@ int ks8851_resume(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(ks8851_resume);
#endif
static int ks8851_register_mdiobus(struct ks8851_net *ks, struct device *dev)
@@ -1243,6 +1245,7 @@ err_reg:
err_reg_io:
return ret;
}
+EXPORT_SYMBOL_GPL(ks8851_probe_common);
int ks8851_remove_common(struct device *dev)
{
@@ -1261,3 +1264,8 @@ int ks8851_remove_common(struct device *dev)
return 0;
}
+EXPORT_SYMBOL_GPL(ks8851_remove_common);
+
+MODULE_DESCRIPTION("KS8851 Network driver");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index c581b955efb3..559177e6ded4 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -563,16 +563,6 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA |
DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG);
- /* Take MAC, Port, Phy (intern) and PCS (SGMII/Serdes) clock out of
- * reset
- */
- ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(speed),
- DEV_CLOCK_CFG);
-
- /* No PFC */
- ocelot_write_gix(ocelot, ANA_PFC_PFC_CFG_FC_LINK_SPEED(speed),
- ANA_PFC_PFC_CFG, port);
-
/* Core: Enable port for frame transfer */
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
@@ -1303,14 +1293,19 @@ static u32 ocelot_get_bond_mask(struct ocelot *ocelot, struct net_device *bond,
return mask;
}
-static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot,
+static u32 ocelot_get_bridge_fwd_mask(struct ocelot *ocelot, int src_port,
struct net_device *bridge)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[src_port];
u32 mask = 0;
int port;
+ if (!ocelot_port || ocelot_port->bridge != bridge ||
+ ocelot_port->stp_state != BR_STATE_FORWARDING)
+ return 0;
+
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct ocelot_port *ocelot_port = ocelot->ports[port];
+ ocelot_port = ocelot->ports[port];
if (!ocelot_port)
continue;
@@ -1376,7 +1371,7 @@ void ocelot_apply_bridge_fwd_mask(struct ocelot *ocelot)
struct net_device *bridge = ocelot_port->bridge;
struct net_device *bond = ocelot_port->bond;
- mask = ocelot_get_bridge_fwd_mask(ocelot, bridge);
+ mask = ocelot_get_bridge_fwd_mask(ocelot, port, bridge);
mask |= cpu_fwd_mask;
mask &= ~BIT(port);
if (bond) {
diff --git a/drivers/net/ethernet/mscc/ocelot_devlink.c b/drivers/net/ethernet/mscc/ocelot_devlink.c
index edafbd37d12c..b8737efd2a85 100644
--- a/drivers/net/ethernet/mscc/ocelot_devlink.c
+++ b/drivers/net/ethernet/mscc/ocelot_devlink.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
*/
#include <net/devlink.h>
#include "ocelot.h"
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 08b481a93460..4b0941f09f71 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -2,7 +2,7 @@
/* Microsemi Ocelot Switch driver
*
* Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
*/
#include <linux/if_bridge.h>
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index c0c465a4a981..e54b9fb2a97a 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -5,7 +5,7 @@
* mscc_ocelot_switch_lib.
*
* Copyright (c) 2017, 2019 Microsemi Corporation
- * Copyright 2020-2021 NXP Semiconductors
+ * Copyright 2020-2021 NXP
*/
#include <linux/if_bridge.h>
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 58a854666c62..c14de5fcedea 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -380,15 +380,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
&ionic_dbg_intr_stats_desc[i]);
(*buf)++;
}
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->napi_stats,
- &ionic_dbg_napi_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
- **buf = txqcq->napi_stats.work_done_cntr[i];
- (*buf)++;
- }
for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
**buf = txstats->sg_cntr[i];
(*buf)++;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index fc8b3e64f153..186d0048a9d1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1297,6 +1297,14 @@ qed_iwarp_wait_cid_map_cleared(struct qed_hwfn *p_hwfn, struct qed_bmap *bmap)
prev_weight = weight;
while (weight) {
+ /* If the HW device is during recovery, all resources are
+ * immediately reset without receiving a per-cid indication
+ * from HW. In this case we don't expect the cid_map to be
+ * cleared.
+ */
+ if (p_hwfn->cdev->recov_in_prog)
+ return 0;
+
msleep(QED_IWARP_MAX_CID_CLEAN_TIME);
weight = bitmap_weight(bmap->bitmap, bmap->max_count);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index f16a157bb95a..cf5baa5e59bc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -77,6 +77,14 @@ void qed_roce_stop(struct qed_hwfn *p_hwfn)
* Beyond the added delay we clear the bitmap anyway.
*/
while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
+ /* If the HW device is during recovery, all resources are
+ * immediately reset without receiving a per-cid indication
+ * from HW. In this case we don't expect the cid bitmap to be
+ * cleared.
+ */
+ if (p_hwfn->cdev->recov_in_prog)
+ return;
+
msleep(100);
if (wait_count++ > 20) {
DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 553c4403258a..981ccf47dcea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -486,6 +486,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
eee_tw_timer);
+ if (priv->hw->xpcs)
+ xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ true);
}
if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig
index 309de38a7530..b0d3f9a2950c 100644
--- a/drivers/net/ethernet/sun/Kconfig
+++ b/drivers/net/ethernet/sun/Kconfig
@@ -73,6 +73,7 @@ config CASSINI
config SUNVNET_COMMON
tristate "Common routines to support Sun Virtual Networking"
depends on SUN_LDOMS
+ depends on INET
default m
config SUNVNET
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index f4843f9672c1..441da03c23ee 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -48,6 +48,7 @@ config BPQETHER
config DMASCC
tristate "High-speed (DMA) SCC driver for AX.25"
depends on ISA && AX25 && BROKEN_ON_SMP && ISA_DMA_API
+ depends on VIRT_TO_BUS
help
This is a driver for high-speed SCC boards, i.e. those supporting
DMA on one port. You usually use those boards to connect your
diff --git a/drivers/net/mdio/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 0d7d3e15d2f0..5f4cd24a0241 100644
--- a/drivers/net/mdio/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -207,6 +207,7 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
{
struct ipq4019_mdio_data *priv;
struct mii_bus *bus;
+ struct resource *res;
int ret;
bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv));
@@ -224,7 +225,10 @@ static int ipq4019_mdio_probe(struct platform_device *pdev)
return PTR_ERR(priv->mdio_clk);
/* The platform resource is provided on the chipset IPQ5018 */
- priv->eth_ldo_rdy = devm_platform_ioremap_resource(pdev, 1);
+ /* This resource is optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res)
+ priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res);
bus->name = "ipq4019_mdio";
bus->read = ipq4019_mdio_read;
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 1ee592d3eae4..17f98f609ec8 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -134,8 +134,9 @@ static int mscc_miim_reset(struct mii_bus *bus)
static int mscc_miim_probe(struct platform_device *pdev)
{
- struct mii_bus *bus;
struct mscc_miim_dev *dev;
+ struct resource *res;
+ struct mii_bus *bus;
int ret;
bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*dev));
@@ -156,10 +157,14 @@ static int mscc_miim_probe(struct platform_device *pdev)
return PTR_ERR(dev->regs);
}
- dev->phy_regs = devm_platform_ioremap_resource(pdev, 1);
- if (IS_ERR(dev->phy_regs)) {
- dev_err(&pdev->dev, "Unable to map internal phy registers\n");
- return PTR_ERR(dev->phy_regs);
+ /* This resource is optional */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ dev->phy_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dev->phy_regs)) {
+ dev_err(&pdev->dev, "Unable to map internal phy registers\n");
+ return PTR_ERR(dev->phy_regs);
+ }
}
ret = of_mdiobus_register(bus, pdev->dev.of_node);
diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c
index d127eb6e9257..aaa628f859fd 100644
--- a/drivers/net/mhi_net.c
+++ b/drivers/net/mhi_net.c
@@ -321,7 +321,7 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
/* Start MHI channels */
err = mhi_prepare_for_transfer(mhi_dev);
if (err)
- goto out_err;
+ return err;
/* Number of transfer descriptors determines size of the queue */
mhi_netdev->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
@@ -331,10 +331,6 @@ static int mhi_net_newlink(struct mhi_device *mhi_dev, struct net_device *ndev)
return err;
return 0;
-
-out_err:
- free_netdev(ndev);
- return err;
}
static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev)
diff --git a/drivers/net/pcs/pcs-xpcs-nxp.c b/drivers/net/pcs/pcs-xpcs-nxp.c
index 984c9f7f16a8..d16fc58cd48d 100644
--- a/drivers/net/pcs/pcs-xpcs-nxp.c
+++ b/drivers/net/pcs/pcs-xpcs-nxp.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2021 NXP Semiconductors
+/* Copyright 2021 NXP
*/
#include <linux/pcs/pcs-xpcs.h>
#include "pcs-xpcs.h"
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index e79297a4bae8..27b6a3f507ae 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -27,7 +27,12 @@
#define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe
#define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf
#define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a
+#define MII_BCM7XXX_SHD_3_PCS_CTRL 0x0
+#define MII_BCM7XXX_SHD_3_PCS_STATUS 0x1
+#define MII_BCM7XXX_SHD_3_EEE_CAP 0x2
#define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3
+#define MII_BCM7XXX_SHD_3_EEE_LP 0x4
+#define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x5
#define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6
#define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400
#define MII_BCM7XXX_SHD_3_AN_STAT 0xb
@@ -216,25 +221,37 @@ static int bcm7xxx_28nm_resume(struct phy_device *phydev)
return genphy_config_aneg(phydev);
}
-static int phy_set_clr_bits(struct phy_device *dev, int location,
- int set_mask, int clr_mask)
+static int __phy_set_clr_bits(struct phy_device *dev, int location,
+ int set_mask, int clr_mask)
{
int v, ret;
- v = phy_read(dev, location);
+ v = __phy_read(dev, location);
if (v < 0)
return v;
v &= ~clr_mask;
v |= set_mask;
- ret = phy_write(dev, location, v);
+ ret = __phy_write(dev, location, v);
if (ret < 0)
return ret;
return v;
}
+static int phy_set_clr_bits(struct phy_device *dev, int location,
+ int set_mask, int clr_mask)
+{
+ int ret;
+
+ mutex_lock(&dev->mdio.bus->mdio_lock);
+ ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask);
+ mutex_unlock(&dev->mdio.bus->mdio_lock);
+
+ return ret;
+}
+
static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev)
{
int ret;
@@ -398,6 +415,93 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
return bcm7xxx_28nm_ephy_apd_enable(phydev);
}
+#define MII_BCM7XXX_REG_INVALID 0xff
+
+static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
+{
+ switch (regnum) {
+ case MDIO_CTRL1:
+ return MII_BCM7XXX_SHD_3_PCS_CTRL;
+ case MDIO_STAT1:
+ return MII_BCM7XXX_SHD_3_PCS_STATUS;
+ case MDIO_PCS_EEE_ABLE:
+ return MII_BCM7XXX_SHD_3_EEE_CAP;
+ case MDIO_AN_EEE_ADV:
+ return MII_BCM7XXX_SHD_3_AN_EEE_ADV;
+ case MDIO_AN_EEE_LPABLE:
+ return MII_BCM7XXX_SHD_3_EEE_LP;
+ case MDIO_PCS_EEE_WK_ERR:
+ return MII_BCM7XXX_SHD_3_EEE_WK_ERR;
+ default:
+ return MII_BCM7XXX_REG_INVALID;
+ }
+}
+
+static bool bcm7xxx_28nm_ephy_dev_valid(int devnum)
+{
+ return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS;
+}
+
+static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev,
+ int devnum, u16 regnum)
+{
+ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
+ int ret;
+
+ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
+ shd == MII_BCM7XXX_REG_INVALID)
+ return -EOPNOTSUPP;
+
+ /* set shadow mode 2 */
+ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Access the desired shadow register address */
+ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT);
+
+reset_shadow_mode:
+ /* reset shadow mode 2 */
+ __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+ MII_BCM7XXX_SHD_MODE_2);
+ return ret;
+}
+
+static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev,
+ int devnum, u16 regnum, u16 val)
+{
+ u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum);
+ int ret;
+
+ if (!bcm7xxx_28nm_ephy_dev_valid(devnum) ||
+ shd == MII_BCM7XXX_REG_INVALID)
+ return -EOPNOTSUPP;
+
+ /* set shadow mode 2 */
+ ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Access the desired shadow register address */
+ ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd);
+ if (ret < 0)
+ goto reset_shadow_mode;
+
+ /* Write the desired value in the shadow register */
+ __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val);
+
+reset_shadow_mode:
+ /* reset shadow mode 2 */
+ return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0,
+ MII_BCM7XXX_SHD_MODE_2);
+}
+
static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev)
{
int ret;
@@ -595,6 +699,8 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
.get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \
.remove = bcm7xxx_28nm_remove, \
+ .read_mmd = bcm7xxx_28nm_ephy_read_mmd, \
+ .write_mmd = bcm7xxx_28nm_ephy_write_mmd, \
}
#define BCM7XXX_40NM_EPHY(_oui, _name) \
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 53f034fc2ef7..e37edf70f2f8 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -525,6 +525,10 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
NULL == bus->read || NULL == bus->write)
return -EINVAL;
+ if (bus->parent && bus->parent->of_node)
+ bus->parent->of_node->fwnode.flags |=
+ FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD;
+
BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
bus->state != MDIOBUS_UNREGISTERED);
@@ -537,6 +541,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
+ put_device(&bus->dev);
return -EINVAL;
}
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index c94cb5382dc9..250742ffdfd9 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -179,6 +179,16 @@ static int mdio_remove(struct device *dev)
return 0;
}
+static void mdio_shutdown(struct device *dev)
+{
+ struct mdio_device *mdiodev = to_mdio_device(dev);
+ struct device_driver *drv = mdiodev->dev.driver;
+ struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+
+ if (mdiodrv->shutdown)
+ mdiodrv->shutdown(mdiodev);
+}
+
/**
* mdio_driver_register - register an mdio_driver with the MDIO layer
* @drv: new mdio_driver to register
@@ -193,6 +203,7 @@ int mdio_driver_register(struct mdio_driver *drv)
mdiodrv->driver.bus = &mdio_bus_type;
mdiodrv->driver.probe = mdio_probe;
mdiodrv->driver.remove = mdio_remove;
+ mdiodrv->driver.shutdown = mdio_shutdown;
retval = driver_register(&mdiodrv->driver);
if (retval) {
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index 2d5d5081c3b6..5ce1bf03bbd7 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -493,6 +493,25 @@ static int gpy_loopback(struct phy_device *phydev, bool enable)
return ret;
}
+static int gpy115_loopback(struct phy_device *phydev, bool enable)
+{
+ int ret;
+ int fw_minor;
+
+ if (enable)
+ return gpy_loopback(phydev, enable);
+
+ ret = phy_read(phydev, PHY_FWV);
+ if (ret < 0)
+ return ret;
+
+ fw_minor = FIELD_GET(PHY_FWV_MINOR_MASK, ret);
+ if (fw_minor > 0x0076)
+ return gpy_loopback(phydev, 0);
+
+ return genphy_soft_reset(phydev);
+}
+
static struct phy_driver gpy_drivers[] = {
{
PHY_ID_MATCH_MODEL(PHY_ID_GPY2xx),
@@ -527,7 +546,7 @@ static struct phy_driver gpy_drivers[] = {
.handle_interrupt = gpy_handle_interrupt,
.set_wol = gpy_set_wol,
.get_wol = gpy_get_wol,
- .set_loopback = gpy_loopback,
+ .set_loopback = gpy115_loopback,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_GPY115C),
@@ -544,7 +563,7 @@ static struct phy_driver gpy_drivers[] = {
.handle_interrupt = gpy_handle_interrupt,
.set_wol = gpy_set_wol,
.get_wol = gpy_get_wol,
- .set_loopback = gpy_loopback,
+ .set_loopback = gpy115_loopback,
},
{
.phy_id = PHY_ID_GPY211B,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index a57251ba5991..f97813a4e8d1 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2719,14 +2719,14 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
serial = kzalloc(sizeof(*serial), GFP_KERNEL);
if (!serial)
- goto exit;
+ goto err_free_dev;
hso_dev->port_data.dev_serial = serial;
serial->parent = hso_dev;
if (hso_serial_common_create
(serial, 1, CTRL_URB_RX_SIZE, CTRL_URB_TX_SIZE))
- goto exit;
+ goto err_free_serial;
serial->tx_data_length--;
serial->write_data = hso_mux_serial_write_data;
@@ -2742,11 +2742,9 @@ struct hso_device *hso_create_mux_serial_device(struct usb_interface *interface,
/* done, return it */
return hso_dev;
-exit:
- if (serial) {
- tty_unregister_device(tty_drv, serial->minor);
- kfree(serial);
- }
+err_free_serial:
+ kfree(serial);
+err_free_dev:
kfree(hso_dev);
return NULL;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 7d953974eb9b..26b1bd8e845b 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1178,7 +1178,10 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
static void smsc95xx_handle_link_change(struct net_device *net)
{
+ struct usbnet *dev = netdev_priv(net);
+
phy_print_status(net->phydev);
+ usbnet_defer_kevent(dev, EVENT_LINK_CHANGE);
}
static int smsc95xx_start_phy(struct usbnet *dev)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 271d38c1d9f8..79bd2585ec6b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -423,6 +423,10 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
skb_reserve(skb, p - buf);
skb_put(skb, len);
+
+ page = (struct page *)page->private;
+ if (page)
+ give_pages(rq, page);
goto ok;
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5a8df5a195cb..141635a35c28 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -4756,12 +4756,12 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
LIST_HEAD(list);
unsigned int h;
- rtnl_lock();
list_for_each_entry(net, net_list, exit_list) {
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unregister_nexthop_notifier(net, &vn->nexthop_notifier_block);
}
+ rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
vxlan_destroy_tunnels(net, &list);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index ffa894f7312a..0adae76eb8df 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1867,8 +1867,8 @@ mac80211_hwsim_beacon(struct hrtimer *timer)
bcn_int -= data->bcn_delta;
data->bcn_delta = 0;
}
- hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer),
- ns_to_ktime(bcn_int * NSEC_PER_USEC));
+ hrtimer_forward_now(&data->beacon_timer,
+ ns_to_ktime(bcn_int * NSEC_PER_USEC));
return HRTIMER_RESTART;
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 39a01c2a3058..32d5bc4919d8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -499,7 +499,7 @@ check_frags:
* the header's copy failed, and they are
* sharing a slot, send an error
*/
- if (i == 0 && sharedslot)
+ if (i == 0 && !first_shinfo && sharedslot)
xenvif_idx_release(queue, pending_idx,
XEN_NETIF_RSP_ERROR);
else
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index a620c34790e6..0875b773fb41 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -278,6 +278,7 @@ static int st_nci_spi_remove(struct spi_device *dev)
static struct spi_device_id st_nci_spi_id_table[] = {
{ST_NCI_SPI_DRIVER_NAME, 0},
+ {"st21nfcb-spi", 0},
{}
};
MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 72de88ff0d30..ef4950f80832 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -380,7 +380,6 @@ static int pmem_attach_disk(struct device *dev,
struct nd_pfn_sb *pfn_sb;
struct pmem_device *pmem;
struct request_queue *q;
- struct device *gendev;
struct gendisk *disk;
void *addr;
int rc;
@@ -489,10 +488,8 @@ static int pmem_attach_disk(struct device *dev,
}
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
pmem->dax_dev = dax_dev;
- gendev = disk_to_dev(disk);
- gendev->groups = pmem_attribute_groups;
- device_add_disk(dev, disk, NULL);
+ device_add_disk(dev, disk, pmem_attribute_groups);
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 6600e138945e..7712a8f78337 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
-#include <linux/list_sort.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/pr.h>
@@ -979,6 +978,7 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
{
struct nvme_command *cmd = nvme_req(req)->cmd;
+ struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK;
if (!(req->rq_flags & RQF_DONTPREP)) {
@@ -1027,7 +1027,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
return BLK_STS_IOERR;
}
- nvme_req(req)->genctr++;
+ if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
+ nvme_req(req)->genctr++;
cmd->common.command_id = nvme_cid(req);
trace_nvme_setup_cmd(req, cmd);
return ret;
@@ -3716,15 +3717,6 @@ out_unlock:
return ret;
}
-static int ns_cmp(void *priv, const struct list_head *a,
- const struct list_head *b)
-{
- struct nvme_ns *nsa = container_of(a, struct nvme_ns, list);
- struct nvme_ns *nsb = container_of(b, struct nvme_ns, list);
-
- return nsa->head->ns_id - nsb->head->ns_id;
-}
-
struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
{
struct nvme_ns *ns, *ret = NULL;
@@ -3745,6 +3737,22 @@ struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid)
}
EXPORT_SYMBOL_NS_GPL(nvme_find_get_ns, NVME_TARGET_PASSTHRU);
+/*
+ * Add the namespace to the controller list while keeping the list ordered.
+ */
+static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns)
+{
+ struct nvme_ns *tmp;
+
+ list_for_each_entry_reverse(tmp, &ns->ctrl->namespaces, list) {
+ if (tmp->head->ns_id < ns->head->ns_id) {
+ list_add(&ns->list, &tmp->list);
+ return;
+ }
+ }
+ list_add(&ns->list, &ns->ctrl->namespaces);
+}
+
static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
struct nvme_ns_ids *ids)
{
@@ -3795,9 +3803,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid,
goto out_unlink_ns;
down_write(&ctrl->namespaces_rwsem);
- list_add_tail(&ns->list, &ctrl->namespaces);
+ nvme_ns_add_to_ctrl_list(ns);
up_write(&ctrl->namespaces_rwsem);
-
nvme_get_ctrl(ctrl);
if (device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups))
@@ -4080,10 +4087,6 @@ static void nvme_scan_work(struct work_struct *work)
if (nvme_scan_ns_list(ctrl) != 0)
nvme_scan_ns_sequential(ctrl);
mutex_unlock(&ctrl->scan_lock);
-
- down_write(&ctrl->namespaces_rwsem);
- list_sort(NULL, &ctrl->namespaces, ns_cmp);
- up_write(&ctrl->namespaces_rwsem);
}
/*
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b08a61ca283f..aa14ad963d91 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2487,6 +2487,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
*/
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
+ nvme_sync_io_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
@@ -2510,6 +2511,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
* clean up the admin queue. Same thing as above.
*/
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
@@ -2951,6 +2953,13 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
if (ctrl->ctrl.queue_count == 1)
return 0;
+ if (prior_ioq_cnt != nr_io_queues) {
+ dev_info(ctrl->ctrl.device,
+ "reconnect: revising io queue count from %d to %d\n",
+ prior_ioq_cnt, nr_io_queues);
+ blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
+ }
+
ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
if (ret)
goto out_free_io_queues;
@@ -2959,15 +2968,6 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_delete_hw_queues;
- if (prior_ioq_cnt != nr_io_queues) {
- dev_info(ctrl->ctrl.device,
- "reconnect: revising io queue count from %d to %d\n",
- prior_ioq_cnt, nr_io_queues);
- nvme_wait_freeze(&ctrl->ctrl);
- blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
- nvme_unfreeze(&ctrl->ctrl);
- }
-
return 0;
out_delete_hw_queues:
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9871c0c9374c..ed79a6c7e804 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -138,6 +138,12 @@ enum nvme_quirks {
* 48 bits.
*/
NVME_QUIRK_DMA_ADDRESS_BITS_48 = (1 << 16),
+
+ /*
+ * The controller requires the command_id value be be limited, so skip
+ * encoding the generation sequence number.
+ */
+ NVME_QUIRK_SKIP_CID_GEN = (1 << 17),
};
/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b82492cd7503..456a0e8a5718 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3369,7 +3369,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
NVME_QUIRK_128_BYTES_SQES |
- NVME_QUIRK_SHARED_TAGS },
+ NVME_QUIRK_SHARED_TAGS |
+ NVME_QUIRK_SKIP_CID_GEN },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ 0, }
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index e4249b7dc056..3c1c29dd3020 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -620,7 +620,7 @@ static int nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req,
cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
data->ttag = pdu->ttag;
data->command_id = nvme_cid(rq);
- data->data_offset = cpu_to_le32(req->data_sent);
+ data->data_offset = pdu->r2t_offset;
data->data_length = cpu_to_le32(req->pdu_len);
return 0;
}
@@ -953,7 +953,15 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
nvme_tcp_ddgst_update(queue->snd_hash, page,
offset, ret);
- /* fully successful last write*/
+ /*
+ * update the request iterator except for the last payload send
+ * in the request where we don't want to modify it as we may
+ * compete with the RX path completing the request.
+ */
+ if (req->data_sent + ret < req->data_len)
+ nvme_tcp_advance_req(req, ret);
+
+ /* fully successful last send in current PDU */
if (last && ret == len) {
if (queue->data_digest) {
nvme_tcp_ddgst_final(queue->snd_hash,
@@ -965,7 +973,6 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
}
return 1;
}
- nvme_tcp_advance_req(req, ret);
}
return -EAGAIN;
}
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index 39854d43758b..da414617a54d 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -109,6 +109,7 @@ config MTK_EFUSE
config NVMEM_NINTENDO_OTP
tristate "Nintendo Wii and Wii U OTP Support"
+ depends on WII || COMPILE_TEST
help
This is a driver exposing the OTP of a Nintendo Wii or Wii U console.
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 0c473d75e625..43e615aa12ff 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -110,7 +110,7 @@ config PCI_PF_STUB
config XEN_PCIDEV_FRONTEND
tristate "Xen PCI Frontend"
- depends on X86 && XEN
+ depends on XEN_PV
select PCI_XEN
select XEN_XENBUS_FRONTEND
default y
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index a4ac87c8b4f8..5082102d7d0d 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -2306,7 +2306,7 @@ EXPORT_SYMBOL_GPL(devm_pinctrl_register_and_init);
/**
* devm_pinctrl_unregister() - Resource managed version of pinctrl_unregister().
- * @dev: device for which which resource was allocated
+ * @dev: device for which resource was allocated
* @pctldev: the pinctrl device to unregister.
*/
void devm_pinctrl_unregister(struct device *dev, struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index c001f2ed20f8..8d0f88e9ca88 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -445,6 +445,7 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
u32 wake_mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3);
+ int err;
raw_spin_lock_irqsave(&gpio_dev->lock, flags);
pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
@@ -457,6 +458,15 @@ static int amd_gpio_irq_set_wake(struct irq_data *d, unsigned int on)
writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ if (on)
+ err = enable_irq_wake(gpio_dev->irq);
+ else
+ err = disable_irq_wake(gpio_dev->irq);
+
+ if (err)
+ dev_err(&gpio_dev->pdev->dev, "failed to %s wake-up interrupt\n",
+ on ? "enable" : "disable");
+
return 0;
}
@@ -902,7 +912,6 @@ static struct pinctrl_desc amd_pinctrl_desc = {
static int amd_gpio_probe(struct platform_device *pdev)
{
int ret = 0;
- int irq_base;
struct resource *res;
struct amd_gpio *gpio_dev;
struct gpio_irq_chip *girq;
@@ -925,9 +934,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
if (!gpio_dev->base)
return -ENOMEM;
- irq_base = platform_get_irq(pdev, 0);
- if (irq_base < 0)
- return irq_base;
+ gpio_dev->irq = platform_get_irq(pdev, 0);
+ if (gpio_dev->irq < 0)
+ return gpio_dev->irq;
#ifdef CONFIG_PM_SLEEP
gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins,
@@ -987,7 +996,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
goto out2;
}
- ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler,
+ ret = devm_request_irq(&pdev->dev, gpio_dev->irq, amd_gpio_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, gpio_dev);
if (ret)
goto out2;
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 95e763424042..1d4317073654 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -98,6 +98,7 @@ struct amd_gpio {
struct resource *res;
struct platform_device *pdev;
u32 *saved_regs;
+ int irq;
};
/* KERNCZ configuration*/
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index ae33e376695f..5ce260f152ce 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -2092,6 +2092,23 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
return false;
}
+static int rockchip_pinconf_defer_output(struct rockchip_pin_bank *bank,
+ unsigned int pin, u32 arg)
+{
+ struct rockchip_pin_output_deferred *cfg;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ cfg->pin = pin;
+ cfg->arg = arg;
+
+ list_add_tail(&cfg->head, &bank->deferred_output);
+
+ return 0;
+}
+
/* set the pin config settings for a specified pin */
static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned num_configs)
@@ -2136,6 +2153,22 @@ static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
if (rc != RK_FUNC_GPIO)
return -EINVAL;
+ /*
+ * Check for gpio driver not being probed yet.
+ * The lock makes sure that either gpio-probe has completed
+ * or the gpio driver hasn't probed yet.
+ */
+ mutex_lock(&bank->deferred_lock);
+ if (!gpio || !gpio->direction_output) {
+ rc = rockchip_pinconf_defer_output(bank, pin - bank->pin_base, arg);
+ mutex_unlock(&bank->deferred_lock);
+ if (rc)
+ return rc;
+
+ break;
+ }
+ mutex_unlock(&bank->deferred_lock);
+
rc = gpio->direction_output(gpio, pin - bank->pin_base,
arg);
if (rc)
@@ -2204,6 +2237,11 @@ static int rockchip_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
if (rc != RK_FUNC_GPIO)
return -EINVAL;
+ if (!gpio || !gpio->get) {
+ arg = 0;
+ break;
+ }
+
rc = gpio->get(gpio, pin - bank->pin_base);
if (rc < 0)
return rc;
@@ -2450,6 +2488,9 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
pin_bank->name, pin);
pdesc++;
}
+
+ INIT_LIST_HEAD(&pin_bank->deferred_output);
+ mutex_init(&pin_bank->deferred_lock);
}
ret = rockchip_pinctrl_parse_dt(pdev, info);
@@ -2716,6 +2757,31 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
return 0;
}
+static int rockchip_pinctrl_remove(struct platform_device *pdev)
+{
+ struct rockchip_pinctrl *info = platform_get_drvdata(pdev);
+ struct rockchip_pin_bank *bank;
+ struct rockchip_pin_output_deferred *cfg;
+ int i;
+
+ of_platform_depopulate(&pdev->dev);
+
+ for (i = 0; i < info->ctrl->nr_banks; i++) {
+ bank = &info->ctrl->pin_banks[i];
+
+ mutex_lock(&bank->deferred_lock);
+ while (!list_empty(&bank->deferred_output)) {
+ cfg = list_first_entry(&bank->deferred_output,
+ struct rockchip_pin_output_deferred, head);
+ list_del(&cfg->head);
+ kfree(cfg);
+ }
+ mutex_unlock(&bank->deferred_lock);
+ }
+
+ return 0;
+}
+
static struct rockchip_pin_bank px30_pin_banks[] = {
PIN_BANK_IOMUX_FLAGS(0, 32, "gpio0", IOMUX_SOURCE_PMU,
IOMUX_SOURCE_PMU,
@@ -3175,6 +3241,7 @@ static const struct of_device_id rockchip_pinctrl_dt_match[] = {
static struct platform_driver rockchip_pinctrl_driver = {
.probe = rockchip_pinctrl_probe,
+ .remove = rockchip_pinctrl_remove,
.driver = {
.name = "rockchip-pinctrl",
.pm = &rockchip_pinctrl_dev_pm_ops,
diff --git a/drivers/pinctrl/pinctrl-rockchip.h b/drivers/pinctrl/pinctrl-rockchip.h
index 589d4d2a98c9..91f10279d084 100644
--- a/drivers/pinctrl/pinctrl-rockchip.h
+++ b/drivers/pinctrl/pinctrl-rockchip.h
@@ -141,6 +141,8 @@ struct rockchip_drv {
* @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode
* @recalced_mask: bit mask to indicate a need to recalulate the mask
* @route_mask: bits describing the routing pins of per bank
+ * @deferred_output: gpio output settings to be done after gpio bank probed
+ * @deferred_lock: mutex for the deferred_output shared btw gpio and pinctrl
*/
struct rockchip_pin_bank {
struct device *dev;
@@ -169,6 +171,8 @@ struct rockchip_pin_bank {
u32 toggle_edge_mode;
u32 recalced_mask;
u32 route_mask;
+ struct list_head deferred_output;
+ struct mutex deferred_lock;
};
/**
@@ -243,6 +247,12 @@ struct rockchip_pin_config {
unsigned int nconfigs;
};
+struct rockchip_pin_output_deferred {
+ struct list_head head;
+ unsigned int pin;
+ u32 arg;
+};
+
/**
* struct rockchip_pin_group: represent group of pins of a pinmux function.
* @name: name of the pin group, used to lookup the group.
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index afddf6d60dbe..9017ede409c9 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1496,6 +1496,7 @@ static const struct of_device_id sc7280_pinctrl_of_match[] = {
static struct platform_driver sc7280_pinctrl_driver = {
.driver = {
.name = "sc7280-pinctrl",
+ .pm = &msm_pinctrl_dev_pm_ops,
.of_match_table = sc7280_pinctrl_of_match,
},
.probe = sc7280_pinctrl_probe,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 98bf0e2a2a8d..b2562e893139 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2016-2021 The Linux Foundation. All rights reserved.
*/
#include <linux/gpio/driver.h>
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/spmi.h>
#include <linux/types.h>
#include <dt-bindings/pinctrl/qcom,pmic-gpio.h>
@@ -171,6 +172,8 @@ struct pmic_gpio_state {
struct pinctrl_dev *ctrl;
struct gpio_chip chip;
struct irq_chip irq;
+ u8 usid;
+ u8 pid_base;
};
static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -949,12 +952,36 @@ static int pmic_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
unsigned int *parent_hwirq,
unsigned int *parent_type)
{
- *parent_hwirq = child_hwirq + 0xc0;
+ struct pmic_gpio_state *state = gpiochip_get_data(chip);
+
+ *parent_hwirq = child_hwirq + state->pid_base;
*parent_type = child_type;
return 0;
}
+static void *pmic_gpio_populate_parent_fwspec(struct gpio_chip *chip,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct pmic_gpio_state *state = gpiochip_get_data(chip);
+ struct irq_fwspec *fwspec;
+
+ fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = chip->irq.parent_domain->fwnode;
+
+ fwspec->param_count = 4;
+ fwspec->param[0] = state->usid;
+ fwspec->param[1] = parent_hwirq;
+ /* param[2] must be left as 0 */
+ fwspec->param[3] = parent_type;
+
+ return fwspec;
+}
+
static int pmic_gpio_probe(struct platform_device *pdev)
{
struct irq_domain *parent_domain;
@@ -965,6 +992,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
struct pmic_gpio_pad *pad, *pads;
struct pmic_gpio_state *state;
struct gpio_irq_chip *girq;
+ const struct spmi_device *parent_spmi_dev;
int ret, npins, i;
u32 reg;
@@ -984,6 +1012,9 @@ static int pmic_gpio_probe(struct platform_device *pdev)
state->dev = &pdev->dev;
state->map = dev_get_regmap(dev->parent, NULL);
+ parent_spmi_dev = to_spmi_device(dev->parent);
+ state->usid = parent_spmi_dev->usid;
+ state->pid_base = reg >> 8;
pindesc = devm_kcalloc(dev, npins, sizeof(*pindesc), GFP_KERNEL);
if (!pindesc)
@@ -1059,7 +1090,7 @@ static int pmic_gpio_probe(struct platform_device *pdev)
girq->fwnode = of_node_to_fwnode(state->dev->of_node);
girq->parent_domain = parent_domain;
girq->child_to_parent_hwirq = pmic_gpio_child_to_parent_hwirq;
- girq->populate_parent_alloc_arg = gpiochip_populate_parent_fwspec_fourcell;
+ girq->populate_parent_alloc_arg = pmic_gpio_populate_parent_fwspec;
girq->child_offset_to_irq = pmic_gpio_child_offset_to_irq;
girq->child_irq_domain_ops.translate = pmic_gpio_domain_translate;
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index 3481479a2942..d6a7c896ac86 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -71,7 +71,7 @@
#define AMD_CPU_ID_YC 0x14B5
#define PMC_MSG_DELAY_MIN_US 100
-#define RESPONSE_REGISTER_LOOP_MAX 200
+#define RESPONSE_REGISTER_LOOP_MAX 20000
#define SOC_SUBSYSTEM_IP_MAX 12
#define DELAY_MIN_US 2000
diff --git a/drivers/platform/x86/dell/Kconfig b/drivers/platform/x86/dell/Kconfig
index 821aba31821c..42513eab1d06 100644
--- a/drivers/platform/x86/dell/Kconfig
+++ b/drivers/platform/x86/dell/Kconfig
@@ -166,8 +166,7 @@ config DELL_WMI
config DELL_WMI_PRIVACY
bool "Dell WMI Hardware Privacy Support"
- depends on DELL_WMI
- depends on LEDS_TRIGGER_AUDIO
+ depends on LEDS_TRIGGER_AUDIO = y || DELL_WMI = LEDS_TRIGGER_AUDIO
help
This option adds integration with the "Dell Hardware Privacy"
feature of Dell laptops to the dell-wmi driver.
diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c
index 7f3a03f937f6..d53634c8a6e0 100644
--- a/drivers/platform/x86/gigabyte-wmi.c
+++ b/drivers/platform/x86/gigabyte-wmi.c
@@ -144,6 +144,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE V2"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 GAMING X V2"),
+ DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550I AORUS PRO AX"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),
DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index a33a5826e81a..08598942a6d7 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -118,12 +118,30 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
{ }
};
+/*
+ * Some devices, even non convertible ones, can send incorrect SW_TABLET_MODE
+ * reports. Accept such reports only from devices in this list.
+ */
+static const struct dmi_system_id dmi_auto_add_switch[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "31" /* Convertible */),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_CHASSIS_TYPE, "32" /* Detachable */),
+ },
+ },
+ {} /* Array terminator */
+};
+
struct intel_hid_priv {
struct input_dev *input_dev;
struct input_dev *array;
struct input_dev *switches;
bool wakeup_mode;
- bool dual_accel;
+ bool auto_add_switch;
};
#define HID_EVENT_FILTER_UUID "eeec56b3-4442-408f-a792-4edd4d758054"
@@ -452,10 +470,8 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
* Some convertible have unreliable VGBS return which could cause incorrect
* SW_TABLET_MODE report, in these cases we enable support when receiving
* the first event instead of during driver setup.
- *
- * See dual_accel_detect.h for more info on the dual_accel check.
*/
- if (!priv->switches && !priv->dual_accel && (event == 0xcc || event == 0xcd)) {
+ if (!priv->switches && priv->auto_add_switch && (event == 0xcc || event == 0xcd)) {
dev_info(&device->dev, "switch event received, enable switches supports\n");
err = intel_hid_switches_setup(device);
if (err)
@@ -596,7 +612,8 @@ static int intel_hid_probe(struct platform_device *device)
return -ENOMEM;
dev_set_drvdata(&device->dev, priv);
- priv->dual_accel = dual_accel_detect();
+ /* See dual_accel_detect.h for more info on the dual_accel check. */
+ priv->auto_add_switch = dmi_check_system(dmi_auto_add_switch) && !dual_accel_detect();
err = intel_hid_input_setup(device);
if (err) {
diff --git a/drivers/platform/x86/intel/punit_ipc.c b/drivers/platform/x86/intel/punit_ipc.c
index f58b8543f6ac..66bb39fd0ef9 100644
--- a/drivers/platform/x86/intel/punit_ipc.c
+++ b/drivers/platform/x86/intel/punit_ipc.c
@@ -8,7 +8,6 @@
* which provide mailbox interface for power management usage.
*/
-#include <linux/acpi.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -319,7 +318,7 @@ static struct platform_driver intel_punit_ipc_driver = {
.remove = intel_punit_ipc_remove,
.driver = {
.name = "intel_punit_ipc",
- .acpi_match_table = ACPI_PTR(punit_ipc_acpi_ids),
+ .acpi_match_table = punit_ipc_acpi_ids,
},
};
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index 3e520d5bca07..88b551caeaaf 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -655,7 +655,7 @@ static int acpi_add(struct acpi_device *device)
goto out_platform_registered;
}
product = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (strlen(product) > 4)
+ if (product && strlen(product) > 4)
switch (product[4]) {
case '5':
case '6':
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 0e1451b1d9c6..033f797861d8 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -100,10 +100,10 @@ static const struct ts_dmi_data chuwi_hi10_air_data = {
};
static const struct property_entry chuwi_hi10_plus_props[] = {
- PROPERTY_ENTRY_U32("touchscreen-min-x", 0),
- PROPERTY_ENTRY_U32("touchscreen-min-y", 5),
- PROPERTY_ENTRY_U32("touchscreen-size-x", 1914),
- PROPERTY_ENTRY_U32("touchscreen-size-y", 1283),
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 12),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 10),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1908),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1270),
PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hi10plus.fw"),
PROPERTY_ENTRY_U32("silead,max-fingers", 10),
PROPERTY_ENTRY_BOOL("silead,home-button"),
@@ -111,6 +111,15 @@ static const struct property_entry chuwi_hi10_plus_props[] = {
};
static const struct ts_dmi_data chuwi_hi10_plus_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hi10plus.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 34056,
+ .sha256 = { 0xfd, 0x0a, 0x08, 0x08, 0x3c, 0xa6, 0x34, 0x4e,
+ 0x2c, 0x49, 0x9c, 0xcd, 0x7d, 0x44, 0x9d, 0x38,
+ 0x10, 0x68, 0xb5, 0xbd, 0xb7, 0x2a, 0x63, 0xb5,
+ 0x67, 0x0b, 0x96, 0xbd, 0x89, 0x67, 0x85, 0x09 },
+ },
.acpi_name = "MSSL0017:00",
.properties = chuwi_hi10_plus_props,
};
@@ -141,6 +150,33 @@ static const struct ts_dmi_data chuwi_hi10_pro_data = {
.properties = chuwi_hi10_pro_props,
};
+static const struct property_entry chuwi_hibook_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-x", 30),
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 4),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1892),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1276),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-chuwi-hibook.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data chuwi_hibook_data = {
+ .embedded_fw = {
+ .name = "silead/gsl1680-chuwi-hibook.fw",
+ .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+ .length = 40392,
+ .sha256 = { 0xf7, 0xc0, 0xe8, 0x5a, 0x6c, 0xf2, 0xeb, 0x8d,
+ 0x12, 0xc4, 0x45, 0xbf, 0x55, 0x13, 0x4c, 0x1a,
+ 0x13, 0x04, 0x31, 0x08, 0x65, 0x73, 0xf7, 0xa8,
+ 0x1b, 0x7d, 0x59, 0xc9, 0xe6, 0x97, 0xf7, 0x38 },
+ },
+ .acpi_name = "MSSL0017:00",
+ .properties = chuwi_hibook_props,
+};
+
static const struct property_entry chuwi_vi8_props[] = {
PROPERTY_ENTRY_U32("touchscreen-min-x", 4),
PROPERTY_ENTRY_U32("touchscreen-min-y", 6),
@@ -980,6 +1016,16 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Chuwi HiBook (CWI514) */
+ .driver_data = (void *)&chuwi_hibook_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+ },
+ },
+ {
/* Chuwi Vi8 (CWI506) */
.driver_data = (void *)&chuwi_vi8_data,
.matches = {
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index f02bedf41264..458218f88c5e 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -174,6 +174,7 @@ config PTP_1588_CLOCK_OCP
depends on I2C && MTD
depends on SERIAL_8250
depends on !S390
+ depends on COMMON_CLK
select NET_DEVLINK
help
This driver adds support for an OpenCompute time card.
diff --git a/drivers/regulator/max14577-regulator.c b/drivers/regulator/max14577-regulator.c
index 1d78b455cc48..e34face736f4 100644
--- a/drivers/regulator/max14577-regulator.c
+++ b/drivers/regulator/max14577-regulator.c
@@ -269,5 +269,3 @@ module_exit(max14577_regulator_exit);
MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
MODULE_DESCRIPTION("Maxim 14577/77836 regulator driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:max14577-regulator");
-MODULE_ALIAS("platform:max77836-regulator");
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 6cca910a76de..7f458d510483 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -991,7 +991,7 @@ static const struct rpmh_vreg_init_data pm8009_1_vreg_data[] = {
RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
- RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
{}
};
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index f3c656975e05..93695d535380 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -262,10 +262,12 @@ static int blacklist_parse_proc_parameters(char *buf)
if (strcmp("free", parm) == 0) {
rc = blacklist_parse_parameters(buf, free, 0);
- /* There could be subchannels without proper devices connected.
- * evaluate all the entries
+ /*
+ * Evaluate the subchannels without an online device. This way,
+ * no path-verification will be triggered on those subchannels
+ * and it avoids unnecessary delays.
*/
- css_schedule_eval_all();
+ css_schedule_eval_cond(CSS_EVAL_NOT_ONLINE, 0);
} else if (strcmp("add", parm) == 0)
rc = blacklist_parse_parameters(buf, add, 0);
else if (strcmp("purge", parm) == 0)
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 2ec741106cb6..f0538609dfe4 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -77,12 +77,13 @@ EXPORT_SYMBOL(ccwgroup_set_online);
/**
* ccwgroup_set_offline() - disable a ccwgroup device
* @gdev: target ccwgroup device
+ * @call_gdrv: Call the registered gdrv set_offline function
*
* This function attempts to put the ccwgroup device into the offline state.
* Returns:
* %0 on success and a negative error value on failure.
*/
-int ccwgroup_set_offline(struct ccwgroup_device *gdev)
+int ccwgroup_set_offline(struct ccwgroup_device *gdev, bool call_gdrv)
{
struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver);
int ret = -EINVAL;
@@ -91,11 +92,16 @@ int ccwgroup_set_offline(struct ccwgroup_device *gdev)
return -EAGAIN;
if (gdev->state == CCWGROUP_OFFLINE)
goto out;
+ if (!call_gdrv) {
+ ret = 0;
+ goto offline;
+ }
if (gdrv->set_offline)
ret = gdrv->set_offline(gdev);
if (ret)
goto out;
+offline:
gdev->state = CCWGROUP_OFFLINE;
out:
atomic_set(&gdev->onoff, 0);
@@ -124,7 +130,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
if (value == 1)
ret = ccwgroup_set_online(gdev);
else if (value == 0)
- ret = ccwgroup_set_offline(gdev);
+ ret = ccwgroup_set_offline(gdev, true);
else
ret = -EINVAL;
out:
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 3377097e65de..44461928aab8 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -788,27 +788,49 @@ static int __unset_registered(struct device *dev, void *data)
return 0;
}
-void css_schedule_eval_all_unreg(unsigned long delay)
+static int __unset_online(struct device *dev, void *data)
+{
+ struct idset *set = data;
+ struct subchannel *sch = to_subchannel(dev);
+ struct ccw_device *cdev = sch_get_cdev(sch);
+
+ if (cdev && cdev->online)
+ idset_sch_del(set, sch->schid);
+
+ return 0;
+}
+
+void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
{
unsigned long flags;
- struct idset *unreg_set;
+ struct idset *set;
/* Find unregistered subchannels. */
- unreg_set = idset_sch_new();
- if (!unreg_set) {
+ set = idset_sch_new();
+ if (!set) {
/* Fallback. */
css_schedule_eval_all();
return;
}
- idset_fill(unreg_set);
- bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
+ idset_fill(set);
+ switch (cond) {
+ case CSS_EVAL_UNREG:
+ bus_for_each_dev(&css_bus_type, NULL, set, __unset_registered);
+ break;
+ case CSS_EVAL_NOT_ONLINE:
+ bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
+ break;
+ default:
+ break;
+ }
+
/* Apply to slow_subchannel_set. */
spin_lock_irqsave(&slow_subchannel_lock, flags);
- idset_add_set(slow_subchannel_set, unreg_set);
+ idset_add_set(slow_subchannel_set, set);
atomic_set(&css_eval_scheduled, 1);
queue_delayed_work(cio_work_q, &slow_path_work, delay);
spin_unlock_irqrestore(&slow_subchannel_lock, flags);
- idset_free(unreg_set);
+ idset_free(set);
}
void css_wait_for_slow_path(void)
@@ -820,7 +842,7 @@ void css_wait_for_slow_path(void)
void css_schedule_reprobe(void)
{
/* Schedule with a delay to allow merging of subsequent calls. */
- css_schedule_eval_all_unreg(1 * HZ);
+ css_schedule_eval_cond(CSS_EVAL_UNREG, 1 * HZ);
}
EXPORT_SYMBOL_GPL(css_schedule_reprobe);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index c98522cbe276..ede0b905bc6f 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -34,6 +34,14 @@
#define SNID_STATE3_MULTI_PATH 1
#define SNID_STATE3_SINGLE_PATH 0
+/*
+ * Conditions used to specify which subchannels need evaluation
+ */
+enum css_eval_cond {
+ CSS_EVAL_UNREG, /* unregistered subchannels */
+ CSS_EVAL_NOT_ONLINE /* sch without an online-device */
+};
+
struct path_state {
__u8 state1 : 2; /* path state value 1 */
__u8 state2 : 2; /* path state value 2 */
@@ -136,7 +144,7 @@ static inline struct channel_subsystem *css_by_id(u8 cssid)
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
-void css_schedule_eval_all_unreg(unsigned long delay);
+void css_schedule_eval_cond(enum css_eval_cond, unsigned long delay);
int css_complete_work(void);
int sch_is_pseudo_sch(struct subchannel *);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 118939a7729a..623d5269a52c 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -361,6 +361,7 @@ err_list:
mutex_lock(&matrix_dev->lock);
list_del(&matrix_mdev->node);
mutex_unlock(&matrix_dev->lock);
+ vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
err_dec_available:
atomic_inc(&matrix_dev->available_instances);
@@ -376,9 +377,10 @@ static void vfio_ap_mdev_remove(struct mdev_device *mdev)
mutex_lock(&matrix_dev->lock);
vfio_ap_mdev_reset_queues(matrix_mdev);
list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->lock);
+ vfio_uninit_group_dev(&matrix_mdev->vdev);
kfree(matrix_mdev);
atomic_inc(&matrix_dev->available_instances);
- mutex_unlock(&matrix_dev->lock);
}
static ssize_t name_show(struct mdev_type *mtype,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 535a60b3946d..a5aa0bdc61d6 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -858,7 +858,6 @@ struct qeth_card {
struct napi_struct napi;
struct qeth_rx rx;
struct delayed_work buffer_reclaim_work;
- struct work_struct close_dev_work;
};
static inline bool qeth_card_hw_is_reachable(struct qeth_card *card)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 41ca6273b750..e9807d2996a9 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -70,15 +70,6 @@ static void qeth_issue_next_read_cb(struct qeth_card *card,
static int qeth_qdio_establish(struct qeth_card *);
static void qeth_free_qdio_queues(struct qeth_card *card);
-static void qeth_close_dev_handler(struct work_struct *work)
-{
- struct qeth_card *card;
-
- card = container_of(work, struct qeth_card, close_dev_work);
- QETH_CARD_TEXT(card, 2, "cldevhdl");
- ccwgroup_set_offline(card->gdev);
-}
-
static const char *qeth_get_cardname(struct qeth_card *card)
{
if (IS_VM_NIC(card)) {
@@ -202,6 +193,9 @@ static void qeth_clear_working_pool_list(struct qeth_card *card)
&card->qdio.in_buf_pool.entry_list, list)
list_del(&pool_entry->list);
+ if (!queue)
+ return;
+
for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
queue->bufs[i].pool_entry = NULL;
}
@@ -792,10 +786,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_STOPLAN:
if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
dev_err(&card->gdev->dev,
- "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
+ "Adjacent port of interface %s is no longer in reflective relay mode, trigger recovery\n",
netdev_name(card->dev));
- schedule_work(&card->close_dev_work);
+ /* Set offline, then probably fail to set online: */
+ qeth_schedule_recovery(card);
} else {
+ /* stay online for subsequent STARTLAN */
dev_warn(&card->gdev->dev,
"The link for interface %s on CHPID 0x%X failed\n",
netdev_name(card->dev), card->info.chpid);
@@ -1537,7 +1533,6 @@ static void qeth_setup_card(struct qeth_card *card)
INIT_LIST_HEAD(&card->ipato.entries);
qeth_init_qdio_info(card);
INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
- INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
hash_init(card->rx_mode_addrs);
hash_init(card->local_addrs4);
hash_init(card->local_addrs6);
@@ -5519,7 +5514,8 @@ static int qeth_do_reset(void *data)
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
} else {
- ccwgroup_set_offline(card->gdev);
+ qeth_set_offline(card, disc, true);
+ ccwgroup_set_offline(card->gdev, false);
dev_warn(&card->gdev->dev,
"The qeth device driver failed to recover an error on the device\n");
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 72e84ff9fea5..dc6c00768d91 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2307,7 +2307,6 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
if (gdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED) {
priv = netdev_priv(card->dev);
if (priv->brport_features & BR_LEARNING_SYNC) {
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 3a523e700a5a..6fd3e288f059 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1969,7 +1969,6 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
if (cgdev->state == CCWGROUP_ONLINE)
qeth_set_offline(card, card->discipline, false);
- cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
unregister_netdev(card->dev);
diff --git a/drivers/scsi/arm/Kconfig b/drivers/scsi/arm/Kconfig
index f34badc75196..9f64133f976a 100644
--- a/drivers/scsi/arm/Kconfig
+++ b/drivers/scsi/arm/Kconfig
@@ -10,17 +10,6 @@ config SCSI_ACORNSCSI_3
This enables support for the Acorn SCSI card (aka30). If you have an
Acorn system with one of these, say Y. If unsure, say N.
-config SCSI_ACORNSCSI_TAGGED_QUEUE
- bool "Support SCSI 2 Tagged queueing"
- depends on SCSI_ACORNSCSI_3
- help
- Say Y here to enable tagged queuing support on the Acorn SCSI card.
-
- This is a feature of SCSI-2 which improves performance: the host
- adapter can send several SCSI commands to a device's queue even if
- previous commands haven't finished yet. Some SCSI devices don't
- implement this properly, so the safe answer is N.
-
config SCSI_ACORNSCSI_SYNC
bool "Support SCSI 2 Synchronous Transfers"
depends on SCSI_ACORNSCSI_3
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 4a84599ff491..b4cb5fb19998 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -52,12 +52,8 @@
* You can tell if you have a device that supports tagged queueing my
* cating (eg) /proc/scsi/acornscsi/0 and see if the SCSI revision is reported
* as '2 TAG'.
- *
- * Also note that CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE is normally set in the config
- * scripts, but disabled here. Once debugged, remove the #undef, otherwise to debug,
- * comment out the undef.
*/
-#undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
+
/*
* SCSI-II Synchronous transfer support.
*
@@ -171,7 +167,7 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
unsigned int result);
static int acornscsi_reconnect_finish(AS_Host *host);
static void acornscsi_dma_cleanup(AS_Host *host);
-static void acornscsi_abortcmd(AS_Host *host, unsigned char tag);
+static void acornscsi_abortcmd(AS_Host *host);
/* ====================================================================================
* Miscellaneous
@@ -741,17 +737,6 @@ intr_ret_t acornscsi_kick(AS_Host *host)
#endif
if (from_queue) {
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- /*
- * tagged queueing - allocate a new tag to this command
- */
- if (SCpnt->device->simple_tags) {
- SCpnt->device->current_tag += 1;
- if (SCpnt->device->current_tag == 0)
- SCpnt->device->current_tag = 1;
- SCpnt->tag = SCpnt->device->current_tag;
- } else
-#endif
set_bit(SCpnt->device->id * 8 +
(u8)(SCpnt->device->lun & 0x07), host->busyluns);
@@ -1192,7 +1177,7 @@ void acornscsi_dma_intr(AS_Host *host)
* the device recognises the attention.
*/
if (dmac_read(host, DMAC_STATUS) & STATUS_RQ0) {
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
dmac_write(host, DMAC_TXCNTLO, 0);
dmac_write(host, DMAC_TXCNTHI, 0);
@@ -1560,23 +1545,6 @@ void acornscsi_message(AS_Host *host)
acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
switch (host->scsi.last_message) {
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- case HEAD_OF_QUEUE_TAG:
- case ORDERED_QUEUE_TAG:
- case SIMPLE_QUEUE_TAG:
- /*
- * ANSI standard says: (Section SCSI-2 Rev. 10c Sect 5.6.17)
- * If a target does not implement tagged queuing and a queue tag
- * message is received, it shall respond with a MESSAGE REJECT
- * message and accept the I/O process as if it were untagged.
- */
- printk(KERN_NOTICE "scsi%d.%c: disabling tagged queueing\n",
- host->host->host_no, acornscsi_target(host));
- host->SCpnt->device->simple_tags = 0;
- set_bit(host->SCpnt->device->id * 8 +
- (u8)(host->SCpnt->device->lun & 0x7), host->busyluns);
- break;
-#endif
case EXTENDED_MESSAGE | (EXTENDED_SDTR << 8):
/*
* Target can't handle synchronous transfers
@@ -1687,24 +1655,11 @@ void acornscsi_buildmessages(AS_Host *host)
#if 0
/* does the device need the current command aborted */
if (cmd_aborted) {
- acornscsi_abortcmd(host->SCpnt->tag);
+ acornscsi_abortcmd(host);
return;
}
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- if (host->SCpnt->tag) {
- unsigned int tag_type;
-
- if (host->SCpnt->cmnd[0] == REQUEST_SENSE ||
- host->SCpnt->cmnd[0] == TEST_UNIT_READY ||
- host->SCpnt->cmnd[0] == INQUIRY)
- tag_type = HEAD_OF_QUEUE_TAG;
- else
- tag_type = SIMPLE_QUEUE_TAG;
- msgqueue_addmsg(&host->scsi.msgs, 2, tag_type, host->SCpnt->tag);
- }
-#endif
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
if (host->device[host->SCpnt->device->id].sync_state == SYNC_NEGOCIATE) {
@@ -1798,7 +1753,7 @@ int acornscsi_reconnect(AS_Host *host)
"to reconnect with\n",
host->host->host_no, '0' + target);
acornscsi_dumplog(host, target);
- acornscsi_abortcmd(host, 0);
+ acornscsi_abortcmd(host);
if (host->SCpnt) {
queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt);
host->SCpnt = NULL;
@@ -1821,7 +1776,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
host->scsi.disconnectable = 0;
if (host->SCpnt->device->id == host->scsi.reconnected.target &&
host->SCpnt->device->lun == host->scsi.reconnected.lun &&
- host->SCpnt->tag == host->scsi.reconnected.tag) {
+ scsi_cmd_to_tag(host->SCpnt) == host->scsi.reconnected.tag) {
#if (DEBUG & (DEBUG_QUEUES|DEBUG_DISCON))
DBG(host->SCpnt, printk("scsi%d.%c: reconnected",
host->host->host_no, acornscsi_target(host)));
@@ -1848,7 +1803,7 @@ int acornscsi_reconnect_finish(AS_Host *host)
}
if (!host->SCpnt)
- acornscsi_abortcmd(host, host->scsi.reconnected.tag);
+ acornscsi_abortcmd(host);
else {
/*
* Restore data pointer from SAVED pointers.
@@ -1889,21 +1844,15 @@ void acornscsi_disconnect_unexpected(AS_Host *host)
* Function: void acornscsi_abortcmd(AS_host *host, unsigned char tag)
* Purpose : abort a currently executing command
* Params : host - host with connected command to abort
- * tag - tag to abort
*/
static
-void acornscsi_abortcmd(AS_Host *host, unsigned char tag)
+void acornscsi_abortcmd(AS_Host *host)
{
host->scsi.phase = PHASE_ABORTED;
sbic_arm_write(host, SBIC_CMND, CMND_ASSERTATN);
msgqueue_flush(&host->scsi.msgs);
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- if (tag)
- msgqueue_addmsg(&host->scsi.msgs, 2, ABORT_TAG, tag);
- else
-#endif
- msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
+ msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
}
/* ==========================================================================================
@@ -1993,7 +1942,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTING, SSR %02X?\n",
host->host->host_no, acornscsi_target(host), ssr);
acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
}
return INTR_PROCESSING;
@@ -2029,7 +1978,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
printk(KERN_ERR "scsi%d.%c: PHASE_CONNECTED, SSR %02X?\n",
host->host->host_no, acornscsi_target(host), ssr);
acornscsi_dumplog(host, host->SCpnt ? host->SCpnt->device->id : 8);
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
}
return INTR_PROCESSING;
@@ -2075,20 +2024,20 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
case 0x18: /* -> PHASE_DATAOUT */
/* COMMAND -> DATA OUT */
if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
acornscsi_dma_setup(host, DMA_OUT);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAOUT;
return INTR_IDLE;
case 0x19: /* -> PHASE_DATAIN */
/* COMMAND -> DATA IN */
if (host->scsi.SCp.sent_command != host->SCpnt->cmd_len)
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
acornscsi_dma_setup(host, DMA_IN);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAIN;
return INTR_IDLE;
@@ -2156,7 +2105,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
/* MESSAGE IN -> DATA OUT */
acornscsi_dma_setup(host, DMA_OUT);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAOUT;
return INTR_IDLE;
@@ -2165,7 +2114,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
/* MESSAGE IN -> DATA IN */
acornscsi_dma_setup(host, DMA_IN);
if (!acornscsi_starttransfer(host))
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
host->scsi.phase = PHASE_DATAIN;
return INTR_IDLE;
@@ -2206,7 +2155,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
switch (ssr) {
case 0x19: /* -> PHASE_DATAIN */
case 0x89: /* -> PHASE_DATAIN */
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
return INTR_IDLE;
case 0x1b: /* -> PHASE_STATUSIN */
@@ -2255,7 +2204,7 @@ intr_ret_t acornscsi_sbicintr(AS_Host *host, int in_irq)
switch (ssr) {
case 0x18: /* -> PHASE_DATAOUT */
case 0x88: /* -> PHASE_DATAOUT */
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
return INTR_IDLE;
case 0x1b: /* -> PHASE_STATUSIN */
@@ -2482,7 +2431,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
SCpnt->scsi_done = done;
SCpnt->host_scribble = NULL;
SCpnt->result = 0;
- SCpnt->tag = 0;
SCpnt->SCp.phase = (int)acornscsi_datadirection(SCpnt->cmnd[0]);
SCpnt->SCp.sent_command = 0;
SCpnt->SCp.scsi_xferred = 0;
@@ -2581,7 +2529,7 @@ static enum res_abort acornscsi_do_abort(AS_Host *host, struct scsi_cmnd *SCpnt)
break;
default:
- acornscsi_abortcmd(host, host->SCpnt->tag);
+ acornscsi_abortcmd(host);
res = res_snooze;
}
local_irq_restore(flags);
@@ -2747,9 +2695,6 @@ char *acornscsi_info(struct Scsi_Host *host)
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
" SYNC"
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- " TAG"
-#endif
#if (DEBUG & DEBUG_NO_WRITE)
" NOWRITE (" __stringify(NO_WRITE) ")"
#endif
@@ -2770,9 +2715,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
#ifdef CONFIG_SCSI_ACORNSCSI_SYNC
" SYNC"
#endif
-#ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
- " TAG"
-#endif
#if (DEBUG & DEBUG_NO_WRITE)
" NOWRITE (" __stringify(NO_WRITE) ")"
#endif
@@ -2827,9 +2769,8 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
seq_printf(m, "Device/Lun TaggedQ Sync\n");
seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported)
- seq_printf(m, "%3sabled(%3d) ",
- scd->simple_tags ? "en" : "dis",
- scd->current_tag);
+ seq_printf(m, "%3sabled ",
+ scd->simple_tags ? "en" : "dis");
else
seq_printf(m, "unsupported ");
diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
index 9c4458a99025..cf71ef488e36 100644
--- a/drivers/scsi/arm/fas216.c
+++ b/drivers/scsi/arm/fas216.c
@@ -77,7 +77,6 @@
* I was thinking that this was a good chip until I found this restriction ;(
*/
#define SCSI2_SYNC
-#undef SCSI2_TAG
#undef DEBUG_CONNECT
#undef DEBUG_MESSAGES
@@ -990,7 +989,7 @@ fas216_reselected_intr(FAS216_Info *info)
info->scsi.disconnectable = 0;
if (info->SCpnt->device->id == target &&
info->SCpnt->device->lun == lun &&
- info->SCpnt->tag == tag) {
+ scsi_cmd_to_rq(info->SCpnt)->tag == tag) {
fas216_log(info, LOG_CONNECT, "reconnected previously executing command");
} else {
queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt);
@@ -1791,8 +1790,9 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
/*
* add tag message if required
*/
- if (SCpnt->tag)
- msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG, SCpnt->tag);
+ if (SCpnt->device->simple_tags)
+ msgqueue_addmsg(&info->scsi.msgs, 2, SIMPLE_QUEUE_TAG,
+ scsi_cmd_to_rq(SCpnt)->tag);
do {
#ifdef SCSI2_SYNC
@@ -1815,20 +1815,8 @@ static void fas216_start_command(FAS216_Info *info, struct scsi_cmnd *SCpnt)
static void fas216_allocate_tag(FAS216_Info *info, struct scsi_cmnd *SCpnt)
{
-#ifdef SCSI2_TAG
- /*
- * tagged queuing - allocate a new tag to this command
- */
- if (SCpnt->device->simple_tags && SCpnt->cmnd[0] != REQUEST_SENSE &&
- SCpnt->cmnd[0] != INQUIRY) {
- SCpnt->device->current_tag += 1;
- if (SCpnt->device->current_tag == 0)
- SCpnt->device->current_tag = 1;
- SCpnt->tag = SCpnt->device->current_tag;
- } else
-#endif
- set_bit(SCpnt->device->id * 8 +
- (u8)(SCpnt->device->lun & 0x7), info->busyluns);
+ set_bit(SCpnt->device->id * 8 +
+ (u8)(SCpnt->device->lun & 0x7), info->busyluns);
info->stats.removes += 1;
switch (SCpnt->cmnd[0]) {
@@ -2117,7 +2105,6 @@ request_sense:
init_SCp(SCpnt);
SCpnt->SCp.Message = 0;
SCpnt->SCp.Status = 0;
- SCpnt->tag = 0;
SCpnt->host_scribble = (void *)fas216_rq_sns_done;
/*
@@ -2223,7 +2210,6 @@ static int fas216_queue_command_lck(struct scsi_cmnd *SCpnt,
init_SCp(SCpnt);
info->stats.queues += 1;
- SCpnt->tag = 0;
spin_lock(&info->host_lock);
@@ -3003,9 +2989,8 @@ void fas216_print_devices(FAS216_Info *info, struct seq_file *m)
dev = &info->device[scd->id];
seq_printf(m, " %d/%llu ", scd->id, scd->lun);
if (scd->tagged_supported)
- seq_printf(m, "%3sabled(%3d) ",
- scd->simple_tags ? "en" : "dis",
- scd->current_tag);
+ seq_printf(m, "%3sabled ",
+ scd->simple_tags ? "en" : "dis");
else
seq_puts(m, "unsupported ");
diff --git a/drivers/scsi/arm/queue.c b/drivers/scsi/arm/queue.c
index e5559f27669d..c6f71a7d1b8e 100644
--- a/drivers/scsi/arm/queue.c
+++ b/drivers/scsi/arm/queue.c
@@ -214,7 +214,7 @@ struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun,
list_for_each(l, &queue->head) {
QE_t *q = list_entry(l, QE_t, list);
if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun &&
- q->SCpnt->tag == tag) {
+ scsi_cmd_to_rq(q->SCpnt)->tag == tag) {
SCpnt = __queue_remove(queue, l);
break;
}
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 390b07bf92b9..ccbded3353bd 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -1254,3 +1254,4 @@ MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
MODULE_VERSION(CSIO_DRV_VERSION);
MODULE_FIRMWARE(FW_FNAME_T5);
MODULE_FIRMWARE(FW_FNAME_T6);
+MODULE_SOFTDEP("pre: cxgb4");
diff --git a/drivers/scsi/elx/efct/efct_lio.c b/drivers/scsi/elx/efct/efct_lio.c
index bb3b460dc0bc..4d73e92909ab 100644
--- a/drivers/scsi/elx/efct/efct_lio.c
+++ b/drivers/scsi/elx/efct/efct_lio.c
@@ -880,11 +880,11 @@ efct_lio_npiv_drop_nport(struct se_wwn *wwn)
struct efct *efct = lio_vport->efct;
unsigned long flags = 0;
- spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
-
if (lio_vport->fc_vport)
fc_vport_terminate(lio_vport->fc_vport);
+ spin_lock_irqsave(&efct->tgt_efct.efct_lio_lock, flags);
+
list_for_each_entry_safe(vport, next_vport, &efct->tgt_efct.vport_list,
list_entry) {
if (vport->lio_vport == lio_vport) {
diff --git a/drivers/scsi/elx/libefc/efc_device.c b/drivers/scsi/elx/libefc/efc_device.c
index 725ca2a23fb2..52be01333c6e 100644
--- a/drivers/scsi/elx/libefc/efc_device.c
+++ b/drivers/scsi/elx/libefc/efc_device.c
@@ -928,22 +928,21 @@ __efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
break;
case EFC_EVT_NPORT_TOPOLOGY_NOTIFY: {
- enum efc_nport_topology topology =
- (enum efc_nport_topology)arg;
+ enum efc_nport_topology *topology = arg;
WARN_ON(node->nport->domain->attached);
WARN_ON(node->send_ls_acc != EFC_NODE_SEND_LS_ACC_PLOGI);
node_printf(node, "topology notification, topology=%d\n",
- topology);
+ *topology);
/* At the time the PLOGI was received, the topology was unknown,
* so we didn't know which node would perform the domain attach:
* 1. The node from which the PLOGI was sent (p2p) or
* 2. The node to which the FLOGI was sent (fabric).
*/
- if (topology == EFC_NPORT_TOPO_P2P) {
+ if (*topology == EFC_NPORT_TOPO_P2P) {
/* if this is p2p, need to attach to the domain using
* the d_id from the PLOGI received
*/
diff --git a/drivers/scsi/elx/libefc/efc_fabric.c b/drivers/scsi/elx/libefc/efc_fabric.c
index d397220d9e54..3270ce40196c 100644
--- a/drivers/scsi/elx/libefc/efc_fabric.c
+++ b/drivers/scsi/elx/libefc/efc_fabric.c
@@ -107,7 +107,6 @@ void
efc_fabric_notify_topology(struct efc_node *node)
{
struct efc_node *tmp_node;
- enum efc_nport_topology topology = node->nport->topology;
unsigned long index;
/*
@@ -118,7 +117,7 @@ efc_fabric_notify_topology(struct efc_node *node)
if (tmp_node != node) {
efc_node_post_event(tmp_node,
EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
- (void *)topology);
+ &node->nport->topology);
}
}
}
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index b35bf70a8c0d..ebe417921dac 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -285,11 +285,8 @@ buffer_done:
"6312 Catching potential buffer "
"overflow > PAGE_SIZE = %lu bytes\n",
PAGE_SIZE);
- strscpy(buf + PAGE_SIZE - 1 -
- strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1),
- LPFC_INFO_MORE_STR,
- strnlen(LPFC_INFO_MORE_STR, PAGE_SIZE - 1)
- + 1);
+ strscpy(buf + PAGE_SIZE - 1 - sizeof(LPFC_INFO_MORE_STR),
+ LPFC_INFO_MORE_STR, sizeof(LPFC_INFO_MORE_STR) + 1);
}
return len;
}
@@ -6204,7 +6201,8 @@ lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
len = scnprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
- len += scnprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "Cfg: %d SCSI: %d NVME: %d\n",
phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
phba->cfg_nvme_seg_cnt);
return len;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 1254a575fd47..052c0e5b1119 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -4015,11 +4015,11 @@ lpfc_cmpl_els_edc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
be32_to_cpu(pcgd->desc_tag),
be32_to_cpu(pcgd->desc_len),
be32_to_cpu(pcgd->xmt_signal_capability),
- be32_to_cpu(pcgd->xmt_signal_frequency.count),
- be32_to_cpu(pcgd->xmt_signal_frequency.units),
+ be16_to_cpu(pcgd->xmt_signal_frequency.count),
+ be16_to_cpu(pcgd->xmt_signal_frequency.units),
be32_to_cpu(pcgd->rcv_signal_capability),
- be32_to_cpu(pcgd->rcv_signal_frequency.count),
- be32_to_cpu(pcgd->rcv_signal_frequency.units));
+ be16_to_cpu(pcgd->rcv_signal_frequency.count),
+ be16_to_cpu(pcgd->rcv_signal_frequency.units));
/* Compare driver and Fport capabilities and choose
* least common.
@@ -9387,7 +9387,7 @@ lpfc_display_fpin_wwpn(struct lpfc_hba *phba, __be64 *wwnlist, u32 cnt)
/* Extract the next WWPN from the payload */
wwn = *wwnlist++;
wwpn = be64_to_cpu(wwn);
- len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ,
+ len += scnprintf(buf + len, LPFC_FPIN_WWPN_LINE_SZ - len,
" %016llx", wwpn);
/* Log a message if we are on the last WWPN
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 79a4872c2edb..7359505e6041 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1167,7 +1167,7 @@ struct lpfc_mbx_read_object { /* Version 0 */
#define lpfc_mbx_rd_object_rlen_MASK 0x00FFFFFF
#define lpfc_mbx_rd_object_rlen_WORD word0
uint32_t rd_object_offset;
- uint32_t rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
+ __le32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW];
#define LPFC_OBJ_NAME_SZ 104 /* 26 x sizeof(uint32_t) is 104. */
uint32_t rd_object_cnt;
struct lpfc_mbx_host_buf rd_object_hbuf[4];
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0ec322f0e3cb..195169badb37 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -5518,7 +5518,7 @@ lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
if (phba->cgn_fpin_frequency &&
phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
- cp->cgn_stat_npm = cpu_to_le32(value);
+ cp->cgn_stat_npm = value;
}
value = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ,
LPFC_CGN_CRC32_SEED);
@@ -5547,9 +5547,9 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
uint32_t mbps;
uint32_t dvalue, wvalue, lvalue, avalue;
uint64_t latsum;
- uint16_t *ptr;
- uint32_t *lptr;
- uint16_t *mptr;
+ __le16 *ptr;
+ __le32 *lptr;
+ __le16 *mptr;
/* Make sure we have a congestion info buffer */
if (!phba->cgn_i)
@@ -5570,7 +5570,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
if (phba->cgn_fpin_frequency &&
phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
- cp->cgn_stat_npm = cpu_to_le32(value);
+ cp->cgn_stat_npm = value;
}
/* Read and clear the latency counters for this minute */
@@ -5753,7 +5753,7 @@ lpfc_cgn_save_evt_cnt(struct lpfc_hba *phba)
dvalue += le32_to_cpu(cp->cgn_drvr_hr[i]);
wvalue += le32_to_cpu(cp->cgn_warn_hr[i]);
lvalue += le32_to_cpu(cp->cgn_latency_hr[i]);
- mbps += le32_to_cpu(cp->cgn_bw_hr[i]);
+ mbps += le16_to_cpu(cp->cgn_bw_hr[i]);
avalue += le32_to_cpu(cp->cgn_alarm_hr[i]);
}
if (lvalue) /* Avg of latency averages */
@@ -8277,11 +8277,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0;
out_free_hba_hdwq_info:
- free_percpu(phba->sli4_hba.c_stat);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ free_percpu(phba->sli4_hba.c_stat);
out_free_hba_idle_stat:
- kfree(phba->sli4_hba.idle_stat);
#endif
+ kfree(phba->sli4_hba.idle_stat);
out_free_hba_eq_info:
free_percpu(phba->sli4_hba.eq_info);
out_free_hba_cpu_map:
@@ -13411,8 +13411,8 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
/* last used Index initialized to 0xff already */
- cp->cgn_warn_freq = LPFC_FPIN_INIT_FREQ;
- cp->cgn_alarm_freq = LPFC_FPIN_INIT_FREQ;
+ cp->cgn_warn_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
+ cp->cgn_alarm_freq = cpu_to_le16(LPFC_FPIN_INIT_FREQ);
crc = lpfc_cgn_calc_crc32(cp, LPFC_CGN_INFO_SZ, LPFC_CGN_CRC32_SEED);
cp->cgn_info_crc = cpu_to_le32(crc);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 73a3568ff17e..479b3eed6208 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -1489,9 +1489,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
struct lpfc_nvme_qhandle *lpfc_queue_info;
struct lpfc_nvme_fcpreq_priv *freqpriv;
struct nvme_common_command *sqe;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t start = 0;
-#endif
/* Validate pointers. LLDD fault handling with transport does
* have timing races.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 0fde1e874c7a..befdf864c43b 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -1495,7 +1495,6 @@ static int
lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
uint8_t *txop, uint8_t *rxop)
{
- uint8_t ret = 0;
if (sc->prot_flags & SCSI_PROT_IP_CHECKSUM) {
switch (scsi_get_prot_op(sc)) {
@@ -1548,7 +1547,7 @@ lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
}
}
- return ret;
+ return 0;
}
#endif
@@ -5578,12 +5577,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err, idx;
u8 *uuid = NULL;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint64_t start = 0L;
+ uint64_t start;
- if (phba->ktime_on)
- start = ktime_get_ns();
-#endif
start = ktime_get_ns();
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index ffd8a140638c..78ce38d7251c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -22090,6 +22090,7 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
struct lpfc_dmabuf *pcmd;
+ u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
/* sanity check on queue memory */
if (!datap)
@@ -22113,10 +22114,10 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
memset((void *)read_object->u.request.rd_object_name, 0,
LPFC_OBJ_NAME_SZ);
- sprintf((uint8_t *)read_object->u.request.rd_object_name, rdobject);
+ scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
for (j = 0; j < strlen(rdobject); j++)
read_object->u.request.rd_object_name[j] =
- cpu_to_le32(read_object->u.request.rd_object_name[j]);
+ cpu_to_le32(rd_object_name[j]);
pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
if (pcmd)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index e4298bf4a482..39d8754e63ac 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1916,7 +1916,7 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
raid = MR_LdRaidGet(ld, local_map_ptr);
if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
- blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
+ blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
mr_device_priv_data->is_tm_capable =
raid->capability.tmCapable;
@@ -8033,7 +8033,7 @@ skip_firing_dcmds:
if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
- pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
+ pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
(sizeof(struct MR_PD_CFG_SEQ) *
(MAX_PHYSICAL_DEVICES - 1));
for (i = 0; i < 2 ; i++) {
@@ -8773,8 +8773,7 @@ int megasas_update_device_list(struct megasas_instance *instance,
if (event_type & SCAN_VD_CHANNEL) {
if (!instance->requestorId ||
- (instance->requestorId &&
- megasas_get_ld_vf_affiliation(instance, 0))) {
+ megasas_get_ld_vf_affiliation(instance, 0)) {
dcmd_ret = megasas_ld_list_query(instance,
MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
if (dcmd_ret != DCMD_SUCCESS)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 6c82435bc9cc..27eb652b564f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -1582,8 +1582,10 @@ mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
* wait for current poll to complete.
*/
for (qid = 0; qid < iopoll_q_count; qid++) {
- while (atomic_read(&ioc->io_uring_poll_queues[qid].busy))
+ while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
+ cpu_relax();
udelay(500);
+ }
}
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 770b241d7bb2..1b79f01f03a4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -2178,7 +2178,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
mpt3sas_check_cmd_timeout(ioc,
ioc->ctl_cmds.status, mpi_request,
sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed);
- *issue_reset = reset_needed;
+ *issue_reset = reset_needed;
rc = -EFAULT;
goto out;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 2f82b1e629af..d383d4a03436 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -10749,8 +10749,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
_scsih_pcie_topology_change_event(ioc, fw_event);
ioc->current_event = NULL;
- return;
- break;
+ return;
}
out:
fw_event_work_put(fw_event);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 7a4f5d4dd670..2b8c6fa5e775 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -1939,11 +1939,8 @@ static void ncr_start_next_ccb (struct ncb *np, struct lcb * lp, int maxn);
static void ncr_put_start_queue(struct ncb *np, struct ccb *cp);
static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd);
-static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd);
static void process_waiting_list(struct ncb *np, int sts);
-#define remove_from_waiting_list(np, cmd) \
- retrieve_from_waiting_list(1, (np), (cmd))
#define requeue_waiting_list(np) process_waiting_list((np), DID_OK)
#define reset_waiting_list(np) process_waiting_list((np), DID_RESET)
@@ -7997,26 +7994,6 @@ static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd)
}
}
-static struct scsi_cmnd *retrieve_from_waiting_list(int to_remove, struct ncb *np, struct scsi_cmnd *cmd)
-{
- struct scsi_cmnd **pcmd = &np->waiting_list;
-
- while (*pcmd) {
- if (cmd == *pcmd) {
- if (to_remove) {
- *pcmd = (struct scsi_cmnd *) cmd->next_wcmd;
- cmd->next_wcmd = NULL;
- }
-#ifdef DEBUG_WAITING_LIST
- printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_long) cmd);
-#endif
- return cmd;
- }
- pcmd = (struct scsi_cmnd **) &(*pcmd)->next_wcmd;
- }
- return NULL;
-}
-
static void process_waiting_list(struct ncb *np, int sts)
{
struct scsi_cmnd *waiting_list, *wcmd;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 1e4e3e83b5c7..5fc7697f0af4 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -7169,7 +7169,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
return 0;
break;
case QLA2XXX_INI_MODE_DUAL:
- if (!qla_dual_mode_enabled(vha))
+ if (!qla_dual_mode_enabled(vha) &&
+ !qla_ini_mode_enabled(vha))
return 0;
break;
case QLA2XXX_INI_MODE_ENABLED:
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ece60267b971..b26f2699adb2 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2634,7 +2634,7 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
if (unlikely(logit))
- ql_log(ql_log_warn, fcport->vha, 0x5060,
+ ql_log(ql_dbg_io, fcport->vha, 0x5060,
"NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
sp->name, sp->handle, comp_status,
fd->transferred_length, le32_to_cpu(sts->residual_len),
@@ -3491,7 +3491,7 @@ check_scsi_status:
out:
if (logit)
- ql_log(ql_log_warn, fcport->vha, 0x3022,
+ ql_log(ql_dbg_io, fcport->vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index d8b05d8b5470..922e4c7bd88e 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -441,9 +441,7 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct iscsi_transport *t = iface->transport;
int param = -1;
- if (attr == &dev_attr_iface_enabled.attr)
- param = ISCSI_NET_PARAM_IFACE_ENABLE;
- else if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
+ if (attr == &dev_attr_iface_def_taskmgmt_tmo.attr)
param = ISCSI_IFACE_PARAM_DEF_TASKMGMT_TMO;
else if (attr == &dev_attr_iface_header_digest.attr)
param = ISCSI_IFACE_PARAM_HDRDGST_EN;
@@ -483,7 +481,9 @@ static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
if (param != -1)
return t->attr_is_visible(ISCSI_IFACE_PARAM, param);
- if (attr == &dev_attr_iface_vlan_id.attr)
+ if (attr == &dev_attr_iface_enabled.attr)
+ param = ISCSI_NET_PARAM_IFACE_ENABLE;
+ else if (attr == &dev_attr_iface_vlan_id.attr)
param = ISCSI_NET_PARAM_VLAN_ID;
else if (attr == &dev_attr_iface_vlan_priority.attr)
param = ISCSI_NET_PARAM_VLAN_PRIORITY;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index cbd9999f93a6..523bf2fdc253 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2124,6 +2124,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
retries = 0;
do {
+ bool media_was_present = sdkp->media_present;
+
cmd[0] = TEST_UNIT_READY;
memset((void *) &cmd[1], 0, 9);
@@ -2138,7 +2140,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
* with any more polling.
*/
if (media_not_present(sdkp, &sshdr)) {
- sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
+ if (media_was_present)
+ sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
return;
}
@@ -3401,15 +3404,16 @@ static int sd_probe(struct device *dev)
}
device_initialize(&sdkp->dev);
- sdkp->dev.parent = dev;
+ sdkp->dev.parent = get_device(dev);
sdkp->dev.class = &sd_disk_class;
dev_set_name(&sdkp->dev, "%s", dev_name(dev));
error = device_add(&sdkp->dev);
- if (error)
- goto out_free_index;
+ if (error) {
+ put_device(&sdkp->dev);
+ goto out;
+ }
- get_device(dev);
dev_set_drvdata(dev, sdkp);
gd->major = sd_major((index & 0xf0) >> 4);
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index b9757f24b0d6..ed06798983f8 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -154,8 +154,8 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
/*
* Report zone buffer size should be at most 64B times the number of
- * zones requested plus the 64B reply header, but should be at least
- * SECTOR_SIZE for ATA devices.
+ * zones requested plus the 64B reply header, but should be aligned
+ * to SECTOR_SIZE for ATA devices.
* Make sure that this size does not exceed the hardware capabilities.
* Furthermore, since the report zone command cannot be split, make
* sure that the allocated buffer can always be mapped by limiting the
@@ -174,7 +174,7 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp,
*buflen = bufsize;
return buf;
}
- bufsize >>= 1;
+ bufsize = rounddown(bufsize >> 1, SECTOR_SIZE);
}
return NULL;
@@ -280,7 +280,7 @@ static void sd_zbc_update_wp_offset_workfn(struct work_struct *work)
{
struct scsi_disk *sdkp;
unsigned long flags;
- unsigned int zno;
+ sector_t zno;
int ret;
sdkp = container_of(work, struct scsi_disk, zone_wp_offset_work);
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index c2afba2a5414..0a1734f34587 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -87,9 +87,16 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
0
};
unsigned char recv_page_code;
+ unsigned int retries = SES_RETRIES;
+ struct scsi_sense_hdr sshdr;
+
+ do {
+ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+ &sshdr, SES_TIMEOUT, 1, NULL);
+ } while (ret > 0 && --retries && scsi_sense_valid(&sshdr) &&
+ (sshdr.sense_key == NOT_READY ||
+ (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
- ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
- NULL, SES_TIMEOUT, SES_RETRIES, NULL);
if (unlikely(ret))
return ret;
@@ -111,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
static int ses_send_diag(struct scsi_device *sdev, int page_code,
void *buf, int bufflen)
{
- u32 result;
+ int result;
unsigned char cmd[] = {
SEND_DIAGNOSTIC,
@@ -121,9 +128,16 @@ static int ses_send_diag(struct scsi_device *sdev, int page_code,
bufflen & 0xff,
0
};
+ struct scsi_sense_hdr sshdr;
+ unsigned int retries = SES_RETRIES;
+
+ do {
+ result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
+ &sshdr, SES_TIMEOUT, 1, NULL);
+ } while (result > 0 && --retries && scsi_sense_valid(&sshdr) &&
+ (sshdr.sense_key == NOT_READY ||
+ (sshdr.sense_key == UNIT_ATTENTION && sshdr.asc == 0x29)));
- result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, buf, bufflen,
- NULL, SES_TIMEOUT, SES_RETRIES, NULL);
if (result)
sdev_printk(KERN_ERR, sdev, "SEND DIAGNOSTIC result: %8x\n",
result);
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index 79d9aa2df528..ddd00efc4882 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -523,7 +523,7 @@ static int sr_read_sector(Scsi_CD *cd, int lba, int blksize, unsigned char *dest
return rc;
cd->readcd_known = 0;
sr_printk(KERN_INFO, cd,
- "CDROM does'nt support READ CD (0xbe) command\n");
+ "CDROM doesn't support READ CD (0xbe) command\n");
/* fall & retry the other way */
}
/* ... if this fails, we switch the blocksize using MODE SELECT */
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 9d04929f03a1..ae8636d3780b 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -3823,6 +3823,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
case CDROM_SEND_PACKET:
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
+ break;
default:
break;
}
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index b3bcc5c882da..149c1aa09103 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -128,6 +128,81 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
return err;
}
+static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
+{
+ struct ufs_pa_layer_attr pwr_info = hba->pwr_info;
+ int ret;
+
+ pwr_info.lane_rx = lanes;
+ pwr_info.lane_tx = lanes;
+ ret = ufshcd_config_pwr_mode(hba, &pwr_info);
+ if (ret)
+ dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
+ __func__, lanes, ret);
+ return ret;
+}
+
+static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ int err = 0;
+
+ switch (status) {
+ case PRE_CHANGE:
+ if (ufshcd_is_hs_mode(dev_max_params) &&
+ (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
+ ufs_intel_set_lanes(hba, 2);
+ memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
+ break;
+ case POST_CHANGE:
+ if (ufshcd_is_hs_mode(dev_req_params)) {
+ u32 peer_granularity;
+
+ usleep_range(1000, 1250);
+ err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba)
+{
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ if (granularity == peer_granularity) {
+ u32 new_peer_pa_tactivate = pa_tactivate + 2;
+
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate);
+ }
+out:
+ return ret;
+}
+
#define INTEL_ACTIVELTR 0x804
#define INTEL_IDLELTR 0x808
@@ -351,6 +426,7 @@ static int ufs_intel_lkf_init(struct ufs_hba *hba)
struct ufs_host *ufs_host;
int err;
+ hba->nop_out_timeout = 200;
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
hba->caps |= UFSHCD_CAP_CRYPTO;
err = ufs_intel_common_init(hba);
@@ -381,6 +457,8 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.exit = ufs_intel_common_exit,
.hce_enable_notify = ufs_intel_hce_enable_notify,
.link_startup_notify = ufs_intel_link_startup_notify,
+ .pwr_change_notify = ufs_intel_lkf_pwr_change_notify,
+ .apply_dev_quirks = ufs_intel_lkf_apply_dev_quirks,
.resume = ufs_intel_resume,
.device_reset = ufs_intel_device_reset,
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 3841ab49f556..188de6f91050 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -17,8 +17,6 @@
#include <linux/blk-pm.h>
#include <linux/blkdev.h>
#include <scsi/scsi_driver.h>
-#include <scsi/scsi_transport.h>
-#include "../scsi_transport_api.h"
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
@@ -237,6 +235,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -319,8 +318,7 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
enum ufs_trace_str_t str_t)
{
- int off = (int)tag - hba->nutrs;
- struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
+ struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
if (!trace_ufshcd_upiu_enabled())
return;
@@ -2759,8 +2757,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
out:
up_read(&hba->clk_scaling_lock);
- if (ufs_trigger_eh())
- scsi_schedule_eh(hba->host);
+ if (ufs_trigger_eh()) {
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_schedule_eh_work(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
return err;
}
@@ -3919,35 +3922,6 @@ out:
}
EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
-static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
-{
- lockdep_assert_held(hba->host->host_lock);
-
- return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
- (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
-}
-
-static void ufshcd_schedule_eh(struct ufs_hba *hba)
-{
- bool schedule_eh = false;
- unsigned long flags;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
- /* handle fatal errors only when link is not in error state */
- if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
- if (hba->force_reset || ufshcd_is_link_broken(hba) ||
- ufshcd_is_saved_err_fatal(hba))
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
- else
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
- schedule_eh = true;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (schedule_eh)
- scsi_schedule_eh(hba->host);
-}
-
/**
* ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
* state) and waits for it to take effect.
@@ -3968,7 +3942,6 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
{
DECLARE_COMPLETION_ONSTACK(uic_async_done);
unsigned long flags;
- bool schedule_eh = false;
u8 status;
int ret;
bool reenable_intr = false;
@@ -4038,14 +4011,10 @@ out:
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
if (ret) {
ufshcd_set_link_broken(hba);
- schedule_eh = true;
+ ufshcd_schedule_eh_work(hba);
}
-
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- if (schedule_eh)
- ufshcd_schedule_eh(hba);
mutex_unlock(&hba->uic_cmd_mutex);
return ret;
@@ -4776,7 +4745,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
- NOP_OUT_TIMEOUT);
+ hba->nop_out_timeout);
if (!err || err == -ETIMEDOUT)
break;
@@ -5911,6 +5880,27 @@ out:
return err_handling;
}
+/* host lock must be held before calling this func */
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+ return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+ (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+/* host lock must be held before calling this func */
+static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
+{
+ /* handle fatal errors only when link is not in error state */
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba))
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+ else
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+ queue_work(hba->eh_wq, &hba->eh_work);
+ }
+}
+
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
down_write(&hba->clk_scaling_lock);
@@ -6044,11 +6034,11 @@ static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
- * @host: SCSI host pointer
+ * @work: pointer to work structure
*/
-static void ufshcd_err_handler(struct Scsi_Host *host)
+static void ufshcd_err_handler(struct work_struct *work)
{
- struct ufs_hba *hba = shost_priv(host);
+ struct ufs_hba *hba;
unsigned long flags;
bool err_xfer = false;
bool err_tm = false;
@@ -6056,9 +6046,10 @@ static void ufshcd_err_handler(struct Scsi_Host *host)
int tag;
bool needs_reset = false, needs_restore = false;
+ hba = container_of(work, struct ufs_hba, eh_work);
+
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
- hba->host->host_eh_scheduled = 0;
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
@@ -6371,6 +6362,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
"host_regs: ");
ufshcd_print_pwr_info(hba);
}
+ ufshcd_schedule_eh_work(hba);
retval |= IRQ_HANDLED;
}
/*
@@ -6382,10 +6374,6 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
hba->errors = 0;
hba->uic_error = 0;
spin_unlock(hba->host->host_lock);
-
- if (queue_eh_work)
- ufshcd_schedule_eh(hba);
-
return retval;
}
@@ -6876,7 +6864,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
- __ufshcd_transfer_req_compl(hba, pos, /*retry_requests=*/true);
+ __ufshcd_transfer_req_compl(hba, 1U << pos, false);
}
}
@@ -7048,17 +7036,15 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
* will be to send LU reset which, again, is a spec violation.
* To avoid these unnecessary/illegal steps, first we clean up
* the lrb taken by this cmd and re-set it in outstanding_reqs,
- * then queue the error handler and bail.
+ * then queue the eh_work and bail.
*/
if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN) {
ufshcd_update_evt_hist(hba, UFS_EVT_ABORT, lrbp->lun);
spin_lock_irqsave(host->host_lock, flags);
hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
spin_unlock_irqrestore(host->host_lock, flags);
-
- ufshcd_schedule_eh(hba);
-
goto release;
}
@@ -7191,10 +7177,11 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_err_handler(hba->host);
+ flush_work(&hba->eh_work);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
@@ -8604,6 +8591,8 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
+ if (hba->eh_wq)
+ destroy_workqueue(hba->eh_wq);
ufs_debugfs_hba_exit(hba);
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
@@ -9448,10 +9437,6 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
}
-static struct scsi_transport_template ufshcd_transport_template = {
- .eh_strategy_handler = ufshcd_err_handler,
-};
-
/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
@@ -9478,11 +9463,11 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
- host->transportt = &ufshcd_transport_template;
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
+ hba->nop_out_timeout = NOP_OUT_TIMEOUT;
INIT_LIST_HEAD(&hba->clk_list_head);
spin_lock_init(&hba->outstanding_lock);
@@ -9517,6 +9502,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
+ char eh_wq_name[sizeof("ufs_eh_wq_00")];
if (!mmio_base) {
dev_err(hba->dev,
@@ -9570,6 +9556,17 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
+ /* Initialize work queues */
+ snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
+ hba->host->host_no);
+ hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+ if (!hba->eh_wq) {
+ dev_err(hba->dev, "%s: failed to create eh workqueue\n",
+ __func__);
+ err = -ENOMEM;
+ goto out_disable;
+ }
+ INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
sema_init(&hba->host_sem, 1);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 52ea6f350b18..f0da5d3db1fa 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -741,6 +741,8 @@ struct ufs_hba_monitor {
* @is_powered: flag to check if HBA is powered
* @shutting_down: flag to check if shutdown has been invoked
* @host_sem: semaphore used to serialize concurrent contexts
+ * @eh_wq: Workqueue that eh_work works on
+ * @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
* @errors: HBA errors
* @uic_error: UFS interconnect layer error status
@@ -843,6 +845,8 @@ struct ufs_hba {
struct semaphore host_sem;
/* Work Queues */
+ struct workqueue_struct *eh_wq;
+ struct work_struct eh_work;
struct work_struct eeh_work;
/* HBA Errors */
@@ -858,6 +862,7 @@ struct ufs_hba {
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
ktime_t last_dme_cmd_tstamp;
+ int nop_out_timeout;
/* Keeps information of the UFS device connected to this host */
struct ufs_dev_info dev_info;
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 02fb51ae8b25..589af5f6b940 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -333,9 +333,8 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
}
static void
-ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshpb_lu *hpb,
- struct ufshcd_lrb *lrbp, u32 lpn, __be64 ppn,
- u8 transfer_len, int read_id)
+ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+ __be64 ppn, u8 transfer_len, int read_id)
{
unsigned char *cdb = lrbp->cmd->cmnd;
__be64 ppn_tmp = ppn;
@@ -703,8 +702,7 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
}
}
- ufshpb_set_hpb_read_to_upiu(hba, hpb, lrbp, lpn, ppn, transfer_len,
- read_id);
+ ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
hpb->stats.hit_cnt++;
return 0;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index c25ce8f0e0af..07d0250f17c3 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -300,7 +300,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
}
break;
default:
- pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
+ pr_info("Unsupported virtio scsi event reason %x\n", event->reason);
}
}
@@ -392,7 +392,7 @@ static void virtscsi_handle_event(struct work_struct *work)
virtscsi_handle_param_change(vscsi, event);
break;
default:
- pr_err("Unsupport virtio scsi event %x\n", event->event);
+ pr_err("Unsupported virtio scsi event %x\n", event->event);
}
virtscsi_kick_event(vscsi, event_node);
}
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 540861ca2ba3..553b6b9d0222 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -600,6 +600,12 @@ static int rockchip_spi_transfer_one(
int ret;
bool use_dma;
+ /* Zero length transfers won't trigger an interrupt on completion */
+ if (!xfer->len) {
+ spi_finalize_current_transfer(ctlr);
+ return 1;
+ }
+
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 56c6feb2ba3e..8ce840c7ecc8 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -204,9 +204,6 @@ struct tegra_slink_data {
struct dma_async_tx_descriptor *tx_dma_desc;
};
-static int tegra_slink_runtime_suspend(struct device *dev);
-static int tegra_slink_runtime_resume(struct device *dev);
-
static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
unsigned long reg)
{
@@ -1185,7 +1182,8 @@ static int tegra_slink_resume(struct device *dev)
}
#endif
-static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int tegra_slink_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
@@ -1197,7 +1195,7 @@ static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
return 0;
}
-static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
+static int tegra_slink_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
@@ -1210,6 +1208,7 @@ static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
}
return 0;
}
+#endif /* CONFIG_PM */
static const struct dev_pm_ops slink_pm_ops = {
SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 57e2499ec1ed..aea037c65985 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -58,10 +58,6 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
const struct spi_device *spi = to_spi_device(dev);
int len;
- len = of_device_modalias(dev, buf, PAGE_SIZE);
- if (len != -ENODEV)
- return len;
-
len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
if (len != -ENODEV)
return len;
@@ -367,10 +363,6 @@ static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
const struct spi_device *spi = to_spi_device(dev);
int rc;
- rc = of_device_uevent_modalias(dev, env);
- if (rc != -ENODEV)
- return rc;
-
rc = acpi_device_uevent_modalias(dev, env);
if (rc != -ENODEV)
return rc;
diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
index e6d860a9678e..dc4ed0ff1ae2 100644
--- a/drivers/staging/greybus/uart.c
+++ b/drivers/staging/greybus/uart.c
@@ -761,6 +761,17 @@ out:
gbphy_runtime_put_autosuspend(gb_tty->gbphy_dev);
}
+static void gb_tty_port_destruct(struct tty_port *port)
+{
+ struct gb_tty *gb_tty = container_of(port, struct gb_tty, port);
+
+ if (gb_tty->minor != GB_NUM_MINORS)
+ release_minor(gb_tty);
+ kfifo_free(&gb_tty->write_fifo);
+ kfree(gb_tty->buffer);
+ kfree(gb_tty);
+}
+
static const struct tty_operations gb_ops = {
.install = gb_tty_install,
.open = gb_tty_open,
@@ -786,6 +797,7 @@ static const struct tty_port_operations gb_port_ops = {
.dtr_rts = gb_tty_dtr_rts,
.activate = gb_tty_port_activate,
.shutdown = gb_tty_port_shutdown,
+ .destruct = gb_tty_port_destruct,
};
static int gb_uart_probe(struct gbphy_device *gbphy_dev,
@@ -798,17 +810,11 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
int retval;
int minor;
- gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
- if (!gb_tty)
- return -ENOMEM;
-
connection = gb_connection_create(gbphy_dev->bundle,
le16_to_cpu(gbphy_dev->cport_desc->id),
gb_uart_request_handler);
- if (IS_ERR(connection)) {
- retval = PTR_ERR(connection);
- goto exit_tty_free;
- }
+ if (IS_ERR(connection))
+ return PTR_ERR(connection);
max_payload = gb_operation_get_payload_size_max(connection);
if (max_payload < sizeof(struct gb_uart_send_data_request)) {
@@ -816,13 +822,23 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
goto exit_connection_destroy;
}
+ gb_tty = kzalloc(sizeof(*gb_tty), GFP_KERNEL);
+ if (!gb_tty) {
+ retval = -ENOMEM;
+ goto exit_connection_destroy;
+ }
+
+ tty_port_init(&gb_tty->port);
+ gb_tty->port.ops = &gb_port_ops;
+ gb_tty->minor = GB_NUM_MINORS;
+
gb_tty->buffer_payload_max = max_payload -
sizeof(struct gb_uart_send_data_request);
gb_tty->buffer = kzalloc(gb_tty->buffer_payload_max, GFP_KERNEL);
if (!gb_tty->buffer) {
retval = -ENOMEM;
- goto exit_connection_destroy;
+ goto exit_put_port;
}
INIT_WORK(&gb_tty->tx_work, gb_uart_tx_write_work);
@@ -830,7 +846,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
retval = kfifo_alloc(&gb_tty->write_fifo, GB_UART_WRITE_FIFO_SIZE,
GFP_KERNEL);
if (retval)
- goto exit_buf_free;
+ goto exit_put_port;
gb_tty->credits = GB_UART_FIRMWARE_CREDITS;
init_completion(&gb_tty->credits_complete);
@@ -844,7 +860,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
} else {
retval = minor;
}
- goto exit_kfifo_free;
+ goto exit_put_port;
}
gb_tty->minor = minor;
@@ -853,9 +869,6 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
init_waitqueue_head(&gb_tty->wioctl);
mutex_init(&gb_tty->mutex);
- tty_port_init(&gb_tty->port);
- gb_tty->port.ops = &gb_port_ops;
-
gb_tty->connection = connection;
gb_tty->gbphy_dev = gbphy_dev;
gb_connection_set_data(connection, gb_tty);
@@ -863,7 +876,7 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
retval = gb_connection_enable_tx(connection);
if (retval)
- goto exit_release_minor;
+ goto exit_put_port;
send_control(gb_tty, gb_tty->ctrlout);
@@ -890,16 +903,10 @@ static int gb_uart_probe(struct gbphy_device *gbphy_dev,
exit_connection_disable:
gb_connection_disable(connection);
-exit_release_minor:
- release_minor(gb_tty);
-exit_kfifo_free:
- kfifo_free(&gb_tty->write_fifo);
-exit_buf_free:
- kfree(gb_tty->buffer);
+exit_put_port:
+ tty_port_put(&gb_tty->port);
exit_connection_destroy:
gb_connection_destroy(connection);
-exit_tty_free:
- kfree(gb_tty);
return retval;
}
@@ -930,15 +937,10 @@ static void gb_uart_remove(struct gbphy_device *gbphy_dev)
gb_connection_disable_rx(connection);
tty_unregister_device(gb_tty_driver, gb_tty->minor);
- /* FIXME - free transmit / receive buffers */
-
gb_connection_disable(connection);
- tty_port_destroy(&gb_tty->port);
gb_connection_destroy(connection);
- release_minor(gb_tty);
- kfifo_free(&gb_tty->write_fifo);
- kfree(gb_tty->buffer);
- kfree(gb_tty);
+
+ tty_port_put(&gb_tty->port);
}
static int gb_tty_init(void)
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 8a2edd67f2c6..20e508158871 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -919,7 +919,7 @@ static int hantro_probe(struct platform_device *pdev)
if (!vpu->variant->irqs[i].handler)
continue;
- if (vpu->variant->num_clocks > 1) {
+ if (vpu->variant->num_irqs > 1) {
irq_name = vpu->variant->irqs[i].name;
irq = platform_get_irq_byname(vpu->pdev, irq_name);
} else {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index c589fe9dae70..825af5fd35e0 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -135,7 +135,7 @@ void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
sizeimage = bytesperline * height;
/* Chroma plane size. */
- sizeimage += bytesperline * height / 2;
+ sizeimage += bytesperline * ALIGN(height, 64) / 2;
break;
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 81d4255d1785..1fd375076001 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -5372,8 +5372,8 @@ static int rtw_mp_read_reg(struct net_device *dev,
pnext++;
if (*pnext != '\0') {
- strtout = simple_strtoul(pnext, &ptmp, 16);
- sprintf(extra, "%s %d", extra, strtout);
+ strtout = simple_strtoul(pnext, &ptmp, 16);
+ sprintf(extra + strlen(extra), " %d", strtout);
} else {
break;
}
@@ -5405,7 +5405,7 @@ static int rtw_mp_read_reg(struct net_device *dev,
pnext++;
if (*pnext != '\0') {
strtout = simple_strtoul(pnext, &ptmp, 16);
- sprintf(extra, "%s %d", extra, strtout);
+ sprintf(extra + strlen(extra), " %d", strtout);
} else {
break;
}
@@ -5512,7 +5512,7 @@ static int rtw_mp_read_rf(struct net_device *dev,
pnext++;
if (*pnext != '\0') {
strtou = simple_strtoul(pnext, &ptmp, 16);
- sprintf(extra, "%s %d", extra, strtou);
+ sprintf(extra + strlen(extra), " %d", strtou);
} else {
break;
}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 102ec644bc8a..023bd4516a68 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1110,20 +1110,24 @@ static ssize_t alua_support_store(struct config_item *item,
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
- bool flag;
+ bool flag, oldflag;
int ret;
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA);
+ if (flag == oldflag)
+ return count;
+
if (!(dev->transport->transport_flags_changeable &
TRANSPORT_FLAG_PASSTHROUGH_ALUA)) {
pr_err("dev[%p]: Unable to change SE Device alua_support:"
" alua_support has fixed value\n", dev);
- return -EINVAL;
+ return -ENOSYS;
}
- ret = strtobool(page, &flag);
- if (ret < 0)
- return ret;
-
if (flag)
dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_ALUA;
else
@@ -1145,20 +1149,24 @@ static ssize_t pgr_support_store(struct config_item *item,
{
struct se_dev_attrib *da = to_attrib(item);
struct se_device *dev = da->da_dev;
- bool flag;
+ bool flag, oldflag;
int ret;
+ ret = strtobool(page, &flag);
+ if (ret < 0)
+ return ret;
+
+ oldflag = !(dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_PGR);
+ if (flag == oldflag)
+ return count;
+
if (!(dev->transport->transport_flags_changeable &
TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
pr_err("dev[%p]: Unable to change SE Device pgr_support:"
" pgr_support has fixed value\n", dev);
- return -EINVAL;
+ return -ENOSYS;
}
- ret = strtobool(page, &flag);
- if (ret < 0)
- return ret;
-
if (flag)
dev->transport_flags &= ~TRANSPORT_FLAG_PASSTHROUGH_PGR;
else
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 4b94b085625b..3829b61b56c1 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -269,7 +269,7 @@ target_scsi2_reservation_reserve(struct se_cmd *cmd)
spin_lock(&dev->dev_reservation_lock);
if (dev->reservation_holder &&
dev->reservation_holder->se_node_acl != sess->se_node_acl) {
- pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+ pr_err("SCSI-2 RESERVATION CONFLICT for %s fabric\n",
tpg->se_tpg_tfo->fabric_name);
pr_err("Original reserver LUN: %llu %s\n",
cmd->se_lun->unpacked_lun,
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 0f0038af2ad4..fb64acfd5e07 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -107,7 +107,7 @@ static int tcc_offset_update(unsigned int tcc)
return 0;
}
-static unsigned int tcc_offset_save;
+static int tcc_offset_save = -1;
static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
struct device_attribute *attr, const char *buf,
@@ -352,7 +352,8 @@ int proc_thermal_resume(struct device *dev)
proc_dev = dev_get_drvdata(dev);
proc_thermal_read_ppcc(proc_dev);
- tcc_offset_update(tcc_offset_save);
+ if (tcc_offset_save >= 0)
+ tcc_offset_update(tcc_offset_save);
return 0;
}
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 4c7ebd1d3f9c..b1162e566a70 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -417,7 +417,7 @@ static irqreturn_t tsens_critical_irq_thread(int irq, void *data)
const struct tsens_sensor *s = &priv->sensor[i];
u32 hw_id = s->hw_id;
- if (IS_ERR(s->tzd))
+ if (!s->tzd)
continue;
if (!tsens_threshold_violated(priv, hw_id, &d))
continue;
@@ -467,7 +467,7 @@ static irqreturn_t tsens_irq_thread(int irq, void *data)
const struct tsens_sensor *s = &priv->sensor[i];
u32 hw_id = s->hw_id;
- if (IS_ERR(s->tzd))
+ if (!s->tzd)
continue;
if (!tsens_threshold_violated(priv, hw_id, &d))
continue;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 97ef9b040b84..51374f4e1cca 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -222,15 +222,14 @@ int thermal_build_list_of_policies(char *buf)
{
struct thermal_governor *pos;
ssize_t count = 0;
- ssize_t size = PAGE_SIZE;
mutex_lock(&thermal_governor_lock);
list_for_each_entry(pos, &thermal_governor_list, governor_list) {
- size = PAGE_SIZE - count;
- count += scnprintf(buf + count, size, "%s ", pos->name);
+ count += scnprintf(buf + count, PAGE_SIZE - count, "%s ",
+ pos->name);
}
- count += scnprintf(buf + count, size, "\n");
+ count += scnprintf(buf + count, PAGE_SIZE - count, "\n");
mutex_unlock(&thermal_governor_lock);
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 891fd8345e25..73e5f1dbd075 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -106,7 +106,7 @@
#define UART_OMAP_EFR2_TIMEOUT_BEHAVE BIT(6)
/* RX FIFO occupancy indicator */
-#define UART_OMAP_RX_LVL 0x64
+#define UART_OMAP_RX_LVL 0x19
struct omap8250_priv {
int line;
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 231de29a6452..ab226da75f7b 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -163,7 +163,7 @@ static unsigned int mvebu_uart_tx_empty(struct uart_port *port)
st = readl(port->membase + UART_STAT);
spin_unlock_irqrestore(&port->lock, flags);
- return (st & STAT_TX_FIFO_EMP) ? TIOCSER_TEMT : 0;
+ return (st & STAT_TX_EMP) ? TIOCSER_TEMT : 0;
}
static unsigned int mvebu_uart_get_mctrl(struct uart_port *port)
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index a9acd93e85b7..25c558e65ece 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -438,8 +438,8 @@ static void reset_tbufs(struct slgt_info *info);
static void tdma_reset(struct slgt_info *info);
static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
-static void get_signals(struct slgt_info *info);
-static void set_signals(struct slgt_info *info);
+static void get_gtsignals(struct slgt_info *info);
+static void set_gtsignals(struct slgt_info *info);
static void set_rate(struct slgt_info *info, u32 data_rate);
static void bh_transmit(struct slgt_info *info);
@@ -720,7 +720,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -730,7 +730,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
if (!C_CRTSCTS(tty) || !tty_throttled(tty))
info->signals |= SerialSignal_RTS;
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -1181,7 +1181,7 @@ static inline void line_info(struct seq_file *m, struct slgt_info *info)
/* output current serial signal states */
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
stat_buf[0] = 0;
@@ -1281,7 +1281,7 @@ static void throttle(struct tty_struct * tty)
if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals &= ~SerialSignal_RTS;
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1306,7 +1306,7 @@ static void unthrottle(struct tty_struct * tty)
if (C_CRTSCTS(tty)) {
spin_lock_irqsave(&info->lock,flags);
info->signals |= SerialSignal_RTS;
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
}
@@ -1477,7 +1477,7 @@ static int hdlcdev_open(struct net_device *dev)
/* inform generic HDLC layer of current DCD status */
spin_lock_irqsave(&info->lock, flags);
- get_signals(info);
+ get_gtsignals(info);
spin_unlock_irqrestore(&info->lock, flags);
if (info->signals & SerialSignal_DCD)
netif_carrier_on(dev);
@@ -2229,7 +2229,7 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
info->signals &= ~SerialSignal_RTS;
info->drop_rts_on_tx_done = false;
- set_signals(info);
+ set_gtsignals(info);
}
#if SYNCLINK_GENERIC_HDLC
@@ -2394,7 +2394,7 @@ static void shutdown(struct slgt_info *info)
if (!info->port.tty || info->port.tty->termios.c_cflag & HUPCL) {
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_gtsignals(info);
}
flush_cond_wait(&info->gpio_wait_q);
@@ -2422,7 +2422,7 @@ static void program_hw(struct slgt_info *info)
else
async_mode(info);
- set_signals(info);
+ set_gtsignals(info);
info->dcd_chkcount = 0;
info->cts_chkcount = 0;
@@ -2430,7 +2430,7 @@ static void program_hw(struct slgt_info *info)
info->dsr_chkcount = 0;
slgt_irq_on(info, IRQ_DCD | IRQ_CTS | IRQ_DSR | IRQ_RI);
- get_signals(info);
+ get_gtsignals(info);
if (info->netcount ||
(info->port.tty && info->port.tty->termios.c_cflag & CREAD))
@@ -2667,7 +2667,7 @@ static int wait_mgsl_event(struct slgt_info *info, int __user *mask_ptr)
spin_lock_irqsave(&info->lock,flags);
/* return immediately if state matches requested events */
- get_signals(info);
+ get_gtsignals(info);
s = info->signals;
events = mask &
@@ -3085,7 +3085,7 @@ static int tiocmget(struct tty_struct *tty)
unsigned long flags;
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
result = ((info->signals & SerialSignal_RTS) ? TIOCM_RTS:0) +
@@ -3124,7 +3124,7 @@ static int tiocmset(struct tty_struct *tty,
info->signals &= ~SerialSignal_DTR;
spin_lock_irqsave(&info->lock,flags);
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
return 0;
}
@@ -3135,7 +3135,7 @@ static int carrier_raised(struct tty_port *port)
struct slgt_info *info = container_of(port, struct slgt_info, port);
spin_lock_irqsave(&info->lock,flags);
- get_signals(info);
+ get_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
return (info->signals & SerialSignal_DCD) ? 1 : 0;
}
@@ -3150,7 +3150,7 @@ static void dtr_rts(struct tty_port *port, int on)
info->signals |= SerialSignal_RTS | SerialSignal_DTR;
else
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_gtsignals(info);
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -3948,10 +3948,10 @@ static void tx_start(struct slgt_info *info)
if (info->params.mode != MGSL_MODE_ASYNC) {
if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
- get_signals(info);
+ get_gtsignals(info);
if (!(info->signals & SerialSignal_RTS)) {
info->signals |= SerialSignal_RTS;
- set_signals(info);
+ set_gtsignals(info);
info->drop_rts_on_tx_done = true;
}
}
@@ -4005,7 +4005,7 @@ static void reset_port(struct slgt_info *info)
rx_stop(info);
info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
- set_signals(info);
+ set_gtsignals(info);
slgt_irq_off(info, IRQ_ALL | IRQ_MASTER);
}
@@ -4427,7 +4427,7 @@ static void tx_set_idle(struct slgt_info *info)
/*
* get state of V24 status (input) signals
*/
-static void get_signals(struct slgt_info *info)
+static void get_gtsignals(struct slgt_info *info)
{
unsigned short status = rd_reg16(info, SSR);
@@ -4489,7 +4489,7 @@ static void msc_set_vcr(struct slgt_info *info)
/*
* set state of V24 control (output) signals
*/
-static void set_signals(struct slgt_info *info)
+static void set_gtsignals(struct slgt_info *info)
{
unsigned char val = rd_reg8(info, VCR);
if (info->signals & SerialSignal_DTR)
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 756a4bfa6a69..3e4e0b20b4bb 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -812,7 +812,6 @@ void tty_ldisc_release(struct tty_struct *tty)
tty_ldisc_debug(tty, "released\n");
}
-EXPORT_SYMBOL_GPL(tty_ldisc_release);
/**
* tty_ldisc_init - ldisc setup for new tty
diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c
index 5d8c982019af..1f3b4a142212 100644
--- a/drivers/usb/cdns3/cdns3-gadget.c
+++ b/drivers/usb/cdns3/cdns3-gadget.c
@@ -1100,6 +1100,19 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep,
return 0;
}
+static void cdns3_rearm_drdy_if_needed(struct cdns3_endpoint *priv_ep)
+{
+ struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
+
+ if (priv_dev->dev_ver < DEV_VER_V3)
+ return;
+
+ if (readl(&priv_dev->regs->ep_sts) & EP_STS_TRBERR) {
+ writel(EP_STS_TRBERR, &priv_dev->regs->ep_sts);
+ writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+ }
+}
+
/**
* cdns3_ep_run_transfer - start transfer on no-default endpoint hardware
* @priv_ep: endpoint object
@@ -1351,6 +1364,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
/*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/
writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts);
writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd);
+ cdns3_rearm_drdy_if_needed(priv_ep);
trace_cdns3_doorbell_epx(priv_ep->name,
readl(&priv_dev->regs->ep_traddr));
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 8bbd8e29e60d..4e2f1552f4b7 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -726,7 +726,8 @@ static void acm_port_destruct(struct tty_port *port)
{
struct acm *acm = container_of(port, struct acm, port);
- acm_release_minor(acm);
+ if (acm->minor != ACM_MINOR_INVALID)
+ acm_release_minor(acm);
usb_put_intf(acm->control);
kfree(acm->country_codes);
kfree(acm);
@@ -1323,8 +1324,10 @@ made_compressed_probe:
usb_get_intf(acm->control); /* undone in destruct() */
minor = acm_alloc_minor(acm);
- if (minor < 0)
+ if (minor < 0) {
+ acm->minor = ACM_MINOR_INVALID;
goto err_put_port;
+ }
acm->minor = minor;
acm->dev = usb_dev;
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 8aef5eb769a0..3aa7f0a3ad71 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -22,6 +22,8 @@
#define ACM_TTY_MAJOR 166
#define ACM_TTY_MINORS 256
+#define ACM_MINOR_INVALID ACM_TTY_MINORS
+
/*
* Requests.
*/
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 0f8b7c93310e..7ee6e4cc0d89 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2761,6 +2761,26 @@ static void usb_put_invalidate_rhdev(struct usb_hcd *hcd)
}
/**
+ * usb_stop_hcd - Halt the HCD
+ * @hcd: the usb_hcd that has to be halted
+ *
+ * Stop the root-hub polling timer and invoke the HCD's ->stop callback.
+ */
+static void usb_stop_hcd(struct usb_hcd *hcd)
+{
+ hcd->rh_pollable = 0;
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+
+ hcd->driver->stop(hcd);
+ hcd->state = HC_STATE_HALT;
+
+ /* In case the HCD restarted the timer, stop it again. */
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ del_timer_sync(&hcd->rh_timer);
+}
+
+/**
* usb_add_hcd - finish generic HCD structure initialization and register
* @hcd: the usb_hcd structure to initialize
* @irqnum: Interrupt line to allocate
@@ -2775,6 +2795,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
{
int retval;
struct usb_device *rhdev;
+ struct usb_hcd *shared_hcd;
if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
@@ -2935,24 +2956,31 @@ int usb_add_hcd(struct usb_hcd *hcd,
goto err_hcd_driver_start;
}
+ /* starting here, usbcore will pay attention to the shared HCD roothub */
+ shared_hcd = hcd->shared_hcd;
+ if (!usb_hcd_is_primary_hcd(hcd) && shared_hcd && HCD_DEFER_RH_REGISTER(shared_hcd)) {
+ retval = register_root_hub(shared_hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
+
+ if (shared_hcd->uses_new_polling && HCD_POLL_RH(shared_hcd))
+ usb_hcd_poll_rh_status(shared_hcd);
+ }
+
/* starting here, usbcore will pay attention to this root hub */
- retval = register_root_hub(hcd);
- if (retval != 0)
- goto err_register_root_hub;
+ if (!HCD_DEFER_RH_REGISTER(hcd)) {
+ retval = register_root_hub(hcd);
+ if (retval != 0)
+ goto err_register_root_hub;
- if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
- usb_hcd_poll_rh_status(hcd);
+ if (hcd->uses_new_polling && HCD_POLL_RH(hcd))
+ usb_hcd_poll_rh_status(hcd);
+ }
return retval;
err_register_root_hub:
- hcd->rh_pollable = 0;
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- del_timer_sync(&hcd->rh_timer);
- hcd->driver->stop(hcd);
- hcd->state = HC_STATE_HALT;
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- del_timer_sync(&hcd->rh_timer);
+ usb_stop_hcd(hcd);
err_hcd_driver_start:
if (usb_hcd_is_primary_hcd(hcd) && hcd->irq > 0)
free_irq(irqnum, hcd);
@@ -2985,6 +3013,7 @@ EXPORT_SYMBOL_GPL(usb_add_hcd);
void usb_remove_hcd(struct usb_hcd *hcd)
{
struct usb_device *rhdev = hcd->self.root_hub;
+ bool rh_registered;
dev_info(hcd->self.controller, "remove, state %x\n", hcd->state);
@@ -2995,6 +3024,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
dev_dbg(hcd->self.controller, "roothub graceful disconnect\n");
spin_lock_irq (&hcd_root_hub_lock);
+ rh_registered = hcd->rh_registered;
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
@@ -3004,7 +3034,8 @@ void usb_remove_hcd(struct usb_hcd *hcd)
cancel_work_sync(&hcd->died_work);
mutex_lock(&usb_bus_idr_lock);
- usb_disconnect(&rhdev); /* Sets rhdev to NULL */
+ if (rh_registered)
+ usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_idr_lock);
/*
@@ -3022,16 +3053,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
* interrupt occurs), but usb_hcd_poll_rh_status() won't invoke
* the hub_status_data() callback.
*/
- hcd->rh_pollable = 0;
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- del_timer_sync(&hcd->rh_timer);
-
- hcd->driver->stop(hcd);
- hcd->state = HC_STATE_HALT;
-
- /* In case the HCD restarted the timer, stop it again. */
- clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- del_timer_sync(&hcd->rh_timer);
+ usb_stop_hcd(hcd);
if (usb_hcd_is_primary_hcd(hcd)) {
if (hcd->irq > 0)
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 837237e4bc96..11d85a6e0b0d 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -115,10 +115,16 @@ static inline bool using_desc_dma(struct dwc2_hsotg *hsotg)
*/
static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u16 limit = DSTS_SOFFN_LIMIT;
+
+ if (hsotg->gadget.speed != USB_SPEED_HIGH)
+ limit >>= 3;
+
hs_ep->target_frame += hs_ep->interval;
- if (hs_ep->target_frame > DSTS_SOFFN_LIMIT) {
+ if (hs_ep->target_frame > limit) {
hs_ep->frame_overrun = true;
- hs_ep->target_frame &= DSTS_SOFFN_LIMIT;
+ hs_ep->target_frame &= limit;
} else {
hs_ep->frame_overrun = false;
}
@@ -136,10 +142,16 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
*/
static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
{
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ u16 limit = DSTS_SOFFN_LIMIT;
+
+ if (hsotg->gadget.speed != USB_SPEED_HIGH)
+ limit >>= 3;
+
if (hs_ep->target_frame)
hs_ep->target_frame -= 1;
else
- hs_ep->target_frame = DSTS_SOFFN_LIMIT;
+ hs_ep->target_frame = limit;
}
/**
@@ -1018,6 +1030,12 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
dwc2_writel(hsotg, ctrl, depctl);
}
+static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep);
+static void dwc2_hsotg_complete_request(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep,
+ struct dwc2_hsotg_req *hs_req,
+ int result);
+
/**
* dwc2_hsotg_start_req - start a USB request from an endpoint's queue
* @hsotg: The controller state.
@@ -1170,14 +1188,19 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
}
}
- if (hs_ep->isochronous && hs_ep->interval == 1) {
- hs_ep->target_frame = dwc2_hsotg_read_frameno(hsotg);
- dwc2_gadget_incr_frame_num(hs_ep);
-
- if (hs_ep->target_frame & 0x1)
- ctrl |= DXEPCTL_SETODDFR;
- else
- ctrl |= DXEPCTL_SETEVENFR;
+ if (hs_ep->isochronous) {
+ if (!dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ if (hs_ep->interval == 1) {
+ if (hs_ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
+ }
+ ctrl |= DXEPCTL_CNAK;
+ } else {
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+ return;
+ }
}
ctrl |= DXEPCTL_EPENA; /* ensure ep enabled */
@@ -1325,12 +1348,16 @@ static bool dwc2_gadget_target_frame_elapsed(struct dwc2_hsotg_ep *hs_ep)
u32 target_frame = hs_ep->target_frame;
u32 current_frame = hsotg->frame_number;
bool frame_overrun = hs_ep->frame_overrun;
+ u16 limit = DSTS_SOFFN_LIMIT;
+
+ if (hsotg->gadget.speed != USB_SPEED_HIGH)
+ limit >>= 3;
if (!frame_overrun && current_frame >= target_frame)
return true;
if (frame_overrun && current_frame >= target_frame &&
- ((current_frame - target_frame) < DSTS_SOFFN_LIMIT / 2))
+ ((current_frame - target_frame) < limit / 2))
return true;
return false;
@@ -1713,11 +1740,9 @@ static struct dwc2_hsotg_req *get_ep_head(struct dwc2_hsotg_ep *hs_ep)
*/
static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
{
- u32 mask;
struct dwc2_hsotg *hsotg = hs_ep->parent;
int dir_in = hs_ep->dir_in;
struct dwc2_hsotg_req *hs_req;
- u32 epmsk_reg = dir_in ? DIEPMSK : DOEPMSK;
if (!list_empty(&hs_ep->queue)) {
hs_req = get_ep_head(hs_ep);
@@ -1733,9 +1758,6 @@ static void dwc2_gadget_start_next_request(struct dwc2_hsotg_ep *hs_ep)
} else {
dev_dbg(hsotg->dev, "%s: No more ISOC-OUT requests\n",
__func__);
- mask = dwc2_readl(hsotg, epmsk_reg);
- mask |= DOEPMSK_OUTTKNEPDISMSK;
- dwc2_writel(hsotg, mask, epmsk_reg);
}
}
@@ -2306,19 +2328,6 @@ static void dwc2_hsotg_ep0_zlp(struct dwc2_hsotg *hsotg, bool dir_in)
dwc2_hsotg_program_zlp(hsotg, hsotg->eps_out[0]);
}
-static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg,
- u32 epctl_reg)
-{
- u32 ctrl;
-
- ctrl = dwc2_readl(hsotg, epctl_reg);
- if (ctrl & DXEPCTL_EOFRNUM)
- ctrl |= DXEPCTL_SETEVENFR;
- else
- ctrl |= DXEPCTL_SETODDFR;
- dwc2_writel(hsotg, ctrl, epctl_reg);
-}
-
/*
* dwc2_gadget_get_xfersize_ddma - get transferred bytes amount from desc
* @hs_ep - The endpoint on which transfer went
@@ -2439,20 +2448,11 @@ static void dwc2_hsotg_handle_outdone(struct dwc2_hsotg *hsotg, int epnum)
dwc2_hsotg_ep0_zlp(hsotg, true);
}
- /*
- * Slave mode OUT transfers do not go through XferComplete so
- * adjust the ISOC parity here.
- */
- if (!using_dma(hsotg)) {
- if (hs_ep->isochronous && hs_ep->interval == 1)
- dwc2_hsotg_change_ep_iso_parity(hsotg, DOEPCTL(epnum));
- else if (hs_ep->isochronous && hs_ep->interval > 1)
- dwc2_gadget_incr_frame_num(hs_ep);
- }
-
/* Set actual frame number for completed transfers */
- if (!using_desc_dma(hsotg) && hs_ep->isochronous)
- req->frame_number = hsotg->frame_number;
+ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
+ req->frame_number = hs_ep->target_frame;
+ dwc2_gadget_incr_frame_num(hs_ep);
+ }
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, result);
}
@@ -2766,6 +2766,12 @@ static void dwc2_hsotg_complete_in(struct dwc2_hsotg *hsotg,
return;
}
+ /* Set actual frame number for completed transfers */
+ if (!using_desc_dma(hsotg) && hs_ep->isochronous) {
+ hs_req->req.frame_number = hs_ep->target_frame;
+ dwc2_gadget_incr_frame_num(hs_ep);
+ }
+
dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
}
@@ -2826,23 +2832,18 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
- if (hs_ep->isochronous) {
- dwc2_hsotg_complete_in(hsotg, hs_ep);
- return;
- }
-
if ((epctl & DXEPCTL_STALL) && (epctl & DXEPCTL_EPTYPE_BULK)) {
int dctl = dwc2_readl(hsotg, DCTL);
dctl |= DCTL_CGNPINNAK;
dwc2_writel(hsotg, dctl, DCTL);
}
- return;
- }
+ } else {
- if (dctl & DCTL_GOUTNAKSTS) {
- dctl |= DCTL_CGOUTNAK;
- dwc2_writel(hsotg, dctl, DCTL);
+ if (dctl & DCTL_GOUTNAKSTS) {
+ dctl |= DCTL_CGOUTNAK;
+ dwc2_writel(hsotg, dctl, DCTL);
+ }
}
if (!hs_ep->isochronous)
@@ -2863,8 +2864,6 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
/* Update current frame number value. */
hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
} while (dwc2_gadget_target_frame_elapsed(hs_ep));
-
- dwc2_gadget_start_next_request(hs_ep);
}
/**
@@ -2881,8 +2880,8 @@ static void dwc2_gadget_handle_ep_disabled(struct dwc2_hsotg_ep *hs_ep)
static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
{
struct dwc2_hsotg *hsotg = ep->parent;
+ struct dwc2_hsotg_req *hs_req;
int dir_in = ep->dir_in;
- u32 doepmsk;
if (dir_in || !ep->isochronous)
return;
@@ -2896,28 +2895,39 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
return;
}
- if (ep->interval > 1 &&
- ep->target_frame == TARGET_FRAME_INITIAL) {
+ if (ep->target_frame == TARGET_FRAME_INITIAL) {
u32 ctrl;
ep->target_frame = hsotg->frame_number;
- dwc2_gadget_incr_frame_num(ep);
+ if (ep->interval > 1) {
+ ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
+ if (ep->target_frame & 0x1)
+ ctrl |= DXEPCTL_SETODDFR;
+ else
+ ctrl |= DXEPCTL_SETEVENFR;
- ctrl = dwc2_readl(hsotg, DOEPCTL(ep->index));
- if (ep->target_frame & 0x1)
- ctrl |= DXEPCTL_SETODDFR;
- else
- ctrl |= DXEPCTL_SETEVENFR;
+ dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
+ }
+ }
+
+ while (dwc2_gadget_target_frame_elapsed(ep)) {
+ hs_req = get_ep_head(ep);
+ if (hs_req)
+ dwc2_hsotg_complete_request(hsotg, ep, hs_req, -ENODATA);
- dwc2_writel(hsotg, ctrl, DOEPCTL(ep->index));
+ dwc2_gadget_incr_frame_num(ep);
+ /* Update current frame number value. */
+ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
}
- dwc2_gadget_start_next_request(ep);
- doepmsk = dwc2_readl(hsotg, DOEPMSK);
- doepmsk &= ~DOEPMSK_OUTTKNEPDISMSK;
- dwc2_writel(hsotg, doepmsk, DOEPMSK);
+ if (!ep->req)
+ dwc2_gadget_start_next_request(ep);
+
}
+static void dwc2_hsotg_ep_stop_xfr(struct dwc2_hsotg *hsotg,
+ struct dwc2_hsotg_ep *hs_ep);
+
/**
* dwc2_gadget_handle_nak - handle NAK interrupt
* @hs_ep: The endpoint on which interrupt is asserted.
@@ -2935,7 +2945,9 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
{
struct dwc2_hsotg *hsotg = hs_ep->parent;
+ struct dwc2_hsotg_req *hs_req;
int dir_in = hs_ep->dir_in;
+ u32 ctrl;
if (!dir_in || !hs_ep->isochronous)
return;
@@ -2977,13 +2989,29 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
dwc2_writel(hsotg, ctrl, DIEPCTL(hs_ep->index));
}
-
- dwc2_hsotg_complete_request(hsotg, hs_ep,
- get_ep_head(hs_ep), 0);
}
- if (!using_desc_dma(hsotg))
+ if (using_desc_dma(hsotg))
+ return;
+
+ ctrl = dwc2_readl(hsotg, DIEPCTL(hs_ep->index));
+ if (ctrl & DXEPCTL_EPENA)
+ dwc2_hsotg_ep_stop_xfr(hsotg, hs_ep);
+ else
+ dwc2_hsotg_txfifo_flush(hsotg, hs_ep->fifo_index);
+
+ while (dwc2_gadget_target_frame_elapsed(hs_ep)) {
+ hs_req = get_ep_head(hs_ep);
+ if (hs_req)
+ dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, -ENODATA);
+
dwc2_gadget_incr_frame_num(hs_ep);
+ /* Update current frame number value. */
+ hsotg->frame_number = dwc2_hsotg_read_frameno(hsotg);
+ }
+
+ if (!hs_ep->req)
+ dwc2_gadget_start_next_request(hs_ep);
}
/**
@@ -3039,21 +3067,15 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
/* In DDMA handle isochronous requests separately */
if (using_desc_dma(hsotg) && hs_ep->isochronous) {
- /* XferCompl set along with BNA */
- if (!(ints & DXEPINT_BNAINTR))
- dwc2_gadget_complete_isoc_request_ddma(hs_ep);
+ dwc2_gadget_complete_isoc_request_ddma(hs_ep);
} else if (dir_in) {
/*
* We get OutDone from the FIFO, so we only
* need to look at completing IN requests here
* if operating slave mode
*/
- if (hs_ep->isochronous && hs_ep->interval > 1)
- dwc2_gadget_incr_frame_num(hs_ep);
-
- dwc2_hsotg_complete_in(hsotg, hs_ep);
- if (ints & DXEPINT_NAKINTRPT)
- ints &= ~DXEPINT_NAKINTRPT;
+ if (!hs_ep->isochronous || !(ints & DXEPINT_NAKINTRPT))
+ dwc2_hsotg_complete_in(hsotg, hs_ep);
if (idx == 0 && !hs_ep->req)
dwc2_hsotg_enqueue_setup(hsotg);
@@ -3062,10 +3084,8 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
* We're using DMA, we need to fire an OutDone here
* as we ignore the RXFIFO.
*/
- if (hs_ep->isochronous && hs_ep->interval > 1)
- dwc2_gadget_incr_frame_num(hs_ep);
-
- dwc2_hsotg_handle_outdone(hsotg, idx);
+ if (!hs_ep->isochronous || !(ints & DXEPINT_OUTTKNEPDIS))
+ dwc2_hsotg_handle_outdone(hsotg, idx);
}
}
@@ -4085,6 +4105,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
mask |= DIEPMSK_NAKMSK;
dwc2_writel(hsotg, mask, DIEPMSK);
} else {
+ epctrl |= DXEPCTL_SNAK;
mask = dwc2_readl(hsotg, DOEPMSK);
mask |= DOEPMSK_OUTTKNEPDISMSK;
dwc2_writel(hsotg, mask, DOEPMSK);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 2a7828971d05..a215ec9e172e 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -5191,6 +5191,10 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
hcd->has_tt = 1;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ retval = -EINVAL;
+ goto error1;
+ }
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 01866dcb953b..0104a80b185e 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -264,19 +264,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
{
u32 reg;
int retries = 1000;
- int ret;
-
- usb_phy_init(dwc->usb2_phy);
- usb_phy_init(dwc->usb3_phy);
- ret = phy_init(dwc->usb2_generic_phy);
- if (ret < 0)
- return ret;
-
- ret = phy_init(dwc->usb3_generic_phy);
- if (ret < 0) {
- phy_exit(dwc->usb2_generic_phy);
- return ret;
- }
/*
* We're resetting only the device side because, if we're in host mode,
@@ -310,9 +297,6 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
udelay(1);
} while (--retries);
- phy_exit(dwc->usb3_generic_phy);
- phy_exit(dwc->usb2_generic_phy);
-
return -ETIMEDOUT;
done:
@@ -982,9 +966,21 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc->phys_ready = true;
}
+ usb_phy_init(dwc->usb2_phy);
+ usb_phy_init(dwc->usb3_phy);
+ ret = phy_init(dwc->usb2_generic_phy);
+ if (ret < 0)
+ goto err0a;
+
+ ret = phy_init(dwc->usb3_generic_phy);
+ if (ret < 0) {
+ phy_exit(dwc->usb2_generic_phy);
+ goto err0a;
+ }
+
ret = dwc3_core_soft_reset(dwc);
if (ret)
- goto err0a;
+ goto err1;
if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD &&
!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A)) {
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 3c34995276e7..be864560bfea 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -406,6 +406,14 @@ static struct usb_endpoint_descriptor ss_epin_fback_desc = {
.bInterval = 4,
};
+static struct usb_ss_ep_comp_descriptor ss_epin_fback_desc_comp = {
+ .bLength = sizeof(ss_epin_fback_desc_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 0,
+ .bmAttributes = 0,
+ .wBytesPerInterval = cpu_to_le16(4),
+};
+
/* Audio Streaming IN Interface - Alt0 */
static struct usb_interface_descriptor std_as_in_if0_desc = {
@@ -597,6 +605,7 @@ static struct usb_descriptor_header *ss_audio_desc[] = {
(struct usb_descriptor_header *)&ss_epout_desc_comp,
(struct usb_descriptor_header *)&as_iso_out_desc,
(struct usb_descriptor_header *)&ss_epin_fback_desc,
+ (struct usb_descriptor_header *)&ss_epin_fback_desc_comp,
(struct usb_descriptor_header *)&std_as_in_if0_desc,
(struct usb_descriptor_header *)&std_as_in_if1_desc,
@@ -705,6 +714,7 @@ static void setup_headers(struct f_uac2_opts *opts,
{
struct usb_ss_ep_comp_descriptor *epout_desc_comp = NULL;
struct usb_ss_ep_comp_descriptor *epin_desc_comp = NULL;
+ struct usb_ss_ep_comp_descriptor *epin_fback_desc_comp = NULL;
struct usb_endpoint_descriptor *epout_desc;
struct usb_endpoint_descriptor *epin_desc;
struct usb_endpoint_descriptor *epin_fback_desc;
@@ -730,6 +740,7 @@ static void setup_headers(struct f_uac2_opts *opts,
epout_desc_comp = &ss_epout_desc_comp;
epin_desc_comp = &ss_epin_desc_comp;
epin_fback_desc = &ss_epin_fback_desc;
+ epin_fback_desc_comp = &ss_epin_fback_desc_comp;
ep_int_desc = &ss_ep_int_desc;
}
@@ -773,8 +784,11 @@ static void setup_headers(struct f_uac2_opts *opts,
headers[i++] = USBDHDR(&as_iso_out_desc);
- if (EPOUT_FBACK_IN_EN(opts))
+ if (EPOUT_FBACK_IN_EN(opts)) {
headers[i++] = USBDHDR(epin_fback_desc);
+ if (epin_fback_desc_comp)
+ headers[i++] = USBDHDR(epin_fback_desc_comp);
+ }
}
if (EPIN_EN(opts)) {
@@ -1164,6 +1178,9 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
agdev->out_ep_maxpsize = max_t(u16, agdev->out_ep_maxpsize,
le16_to_cpu(ss_epout_desc.wMaxPacketSize));
+ ss_epin_desc_comp.wBytesPerInterval = ss_epin_desc.wMaxPacketSize;
+ ss_epout_desc_comp.wBytesPerInterval = ss_epout_desc.wMaxPacketSize;
+
// HS and SS endpoint addresses are copied from autoconfigured FS descriptors
hs_ep_int_desc.bEndpointAddress = fs_ep_int_desc.bEndpointAddress;
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 32ef22857083..ad16163b5ff8 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -96,11 +96,13 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
};
static void u_audio_set_fback_frequency(enum usb_device_speed speed,
+ struct usb_ep *out_ep,
unsigned long long freq,
unsigned int pitch,
void *buf)
{
u32 ff = 0;
+ const struct usb_endpoint_descriptor *ep_desc;
/*
* Because the pitch base is 1000000, the final divider here
@@ -128,8 +130,13 @@ static void u_audio_set_fback_frequency(enum usb_device_speed speed,
* byte fromat (that is Q16.16)
*
* ff = (freq << 16) / 8000
+ *
+ * Win10 and OSX UAC2 drivers require number of samples per packet
+ * in order to honor the feedback value.
+ * Linux snd-usb-audio detects the applied bit-shift automatically.
*/
- freq <<= 4;
+ ep_desc = out_ep->desc;
+ freq <<= 4 + (ep_desc->bInterval - 1);
}
ff = DIV_ROUND_CLOSEST_ULL((freq * pitch), 1953125);
@@ -267,7 +274,7 @@ static void u_audio_iso_fback_complete(struct usb_ep *ep,
pr_debug("%s: iso_complete status(%d) %d/%d\n",
__func__, status, req->actual, req->length);
- u_audio_set_fback_frequency(audio_dev->gadget->speed,
+ u_audio_set_fback_frequency(audio_dev->gadget->speed, audio_dev->out_ep,
params->c_srate, prm->pitch,
req->buf);
@@ -526,7 +533,7 @@ int u_audio_start_capture(struct g_audio *audio_dev)
* be meauserd at start of playback
*/
prm->pitch = 1000000;
- u_audio_set_fback_frequency(audio_dev->gadget->speed,
+ u_audio_set_fback_frequency(audio_dev->gadget->speed, ep,
params->c_srate, prm->pitch,
req_fback->buf);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index 65cae4883454..38e4d6b505a0 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -1250,7 +1250,7 @@ static void set_feature(struct r8a66597 *r8a66597, struct usb_ctrlrequest *ctrl)
do {
tmp = r8a66597_read(r8a66597, INTSTS0) & CTSQ;
udelay(1);
- } while (tmp != CS_IDST || timeout-- > 0);
+ } while (tmp != CS_IDST && timeout-- > 0);
if (tmp == CS_IDST)
r8a66597_bset(r8a66597,
diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c
index 337b425dd4b0..2df52f75f6b3 100644
--- a/drivers/usb/host/bcma-hcd.c
+++ b/drivers/usb/host/bcma-hcd.c
@@ -406,12 +406,9 @@ static int bcma_hcd_probe(struct bcma_device *core)
return -ENOMEM;
usb_dev->core = core;
- if (core->dev.of_node) {
+ if (core->dev.of_node)
usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc",
GPIOD_OUT_HIGH);
- if (IS_ERR(usb_dev->gpio_desc))
- return PTR_ERR(usb_dev->gpio_desc);
- }
switch (core->id.id) {
case BCMA_CORE_USB20_HOST:
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 6bdc6d6bf74d..1776c05d0a48 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -26,6 +26,7 @@
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include <asm/byteorder.h>
@@ -1278,29 +1279,39 @@ MODULE_LICENSE ("GPL");
#ifdef CONFIG_USB_EHCI_SH
#include "ehci-sh.c"
-#define PLATFORM_DRIVER ehci_hcd_sh_driver
#endif
#ifdef CONFIG_PPC_PS3
#include "ehci-ps3.c"
-#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver
#endif
#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
#include "ehci-ppc-of.c"
-#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver
#endif
#ifdef CONFIG_XPS_USB_HCD_XILINX
#include "ehci-xilinx-of.c"
-#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver
#endif
#ifdef CONFIG_SPARC_LEON
#include "ehci-grlib.c"
-#define PLATFORM_DRIVER ehci_grlib_driver
#endif
+static struct platform_driver * const platform_drivers[] = {
+#ifdef CONFIG_USB_EHCI_SH
+ &ehci_hcd_sh_driver,
+#endif
+#ifdef CONFIG_USB_EHCI_HCD_PPC_OF
+ &ehci_hcd_ppc_of_driver,
+#endif
+#ifdef CONFIG_XPS_USB_HCD_XILINX
+ &ehci_hcd_xilinx_of_driver,
+#endif
+#ifdef CONFIG_SPARC_LEON
+ &ehci_grlib_driver,
+#endif
+};
+
static int __init ehci_hcd_init(void)
{
int retval = 0;
@@ -1324,47 +1335,23 @@ static int __init ehci_hcd_init(void)
ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
#endif
-#ifdef PLATFORM_DRIVER
- retval = platform_driver_register(&PLATFORM_DRIVER);
+ retval = platform_register_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
if (retval < 0)
goto clean0;
-#endif
-
-#ifdef PS3_SYSTEM_BUS_DRIVER
- retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER);
- if (retval < 0)
- goto clean2;
-#endif
-#ifdef OF_PLATFORM_DRIVER
- retval = platform_driver_register(&OF_PLATFORM_DRIVER);
+#ifdef CONFIG_PPC_PS3
+ retval = ps3_ehci_driver_register(&ps3_ehci_driver);
if (retval < 0)
- goto clean3;
+ goto clean1;
#endif
-#ifdef XILINX_OF_PLATFORM_DRIVER
- retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER);
- if (retval < 0)
- goto clean4;
-#endif
- return retval;
+ return 0;
-#ifdef XILINX_OF_PLATFORM_DRIVER
- /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */
-clean4:
-#endif
-#ifdef OF_PLATFORM_DRIVER
- platform_driver_unregister(&OF_PLATFORM_DRIVER);
-clean3:
-#endif
-#ifdef PS3_SYSTEM_BUS_DRIVER
- ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
-clean2:
+#ifdef CONFIG_PPC_PS3
+clean1:
#endif
-#ifdef PLATFORM_DRIVER
- platform_driver_unregister(&PLATFORM_DRIVER);
+ platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
clean0:
-#endif
#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
ehci_debug_root = NULL;
@@ -1376,18 +1363,10 @@ module_init(ehci_hcd_init);
static void __exit ehci_hcd_cleanup(void)
{
-#ifdef XILINX_OF_PLATFORM_DRIVER
- platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER);
-#endif
-#ifdef OF_PLATFORM_DRIVER
- platform_driver_unregister(&OF_PLATFORM_DRIVER);
-#endif
-#ifdef PLATFORM_DRIVER
- platform_driver_unregister(&PLATFORM_DRIVER);
-#endif
-#ifdef PS3_SYSTEM_BUS_DRIVER
- ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
+#ifdef CONFIG_PPC_PS3
+ ps3_ehci_driver_unregister(&ps3_ehci_driver);
#endif
+ platform_unregister_drivers(platform_drivers, ARRAY_SIZE(platform_drivers));
#ifdef CONFIG_DYNAMIC_DEBUG
debugfs_remove(ehci_debug_root);
#endif
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f3dabd02382c..93c38b557afd 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -692,6 +692,7 @@ int xhci_run(struct usb_hcd *hcd)
if (ret)
xhci_free_command(xhci, command);
}
+ set_bit(HCD_FLAG_DEFER_RH_REGISTER, &hcd->flags);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Finished xhci_run for USB2 roothub");
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index c42937692207..c968ecda42aa 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -190,6 +190,7 @@ tusb_fifo_write_unaligned(void __iomem *fifo, const u8 *buf, u16 len)
}
if (len > 0) {
/* Write the rest 1 - 3 bytes to FIFO */
+ val = 0;
memcpy(&val, buf, len);
musb_writel(fifo, 0, val);
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 66a6ac50a4cd..189279869a8b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -233,6 +233,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1FB9, 0x0602) }, /* Lake Shore Model 648 Magnet Power Supply */
{ USB_DEVICE(0x1FB9, 0x0700) }, /* Lake Shore Model 737 VSM Controller */
{ USB_DEVICE(0x1FB9, 0x0701) }, /* Lake Shore Model 776 Hall Matrix */
+ { USB_DEVICE(0x2184, 0x0030) }, /* GW Instek GDM-834x Digital Multimeter */
{ USB_DEVICE(0x2626, 0xEA60) }, /* Aruba Networks 7xxx USB Serial Console */
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
@@ -258,6 +259,7 @@ struct cp210x_serial_private {
speed_t max_speed;
bool use_actual_rate;
bool no_flow_control;
+ bool no_event_mode;
};
enum cp210x_event_state {
@@ -1113,12 +1115,16 @@ static void cp210x_change_speed(struct tty_struct *tty,
static void cp210x_enable_event_mode(struct usb_serial_port *port)
{
+ struct cp210x_serial_private *priv = usb_get_serial_data(port->serial);
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
int ret;
if (port_priv->event_mode)
return;
+ if (priv->no_event_mode)
+ return;
+
port_priv->event_state = ES_DATA;
port_priv->event_mode = true;
@@ -2074,6 +2080,33 @@ static void cp210x_init_max_speed(struct usb_serial *serial)
priv->use_actual_rate = use_actual_rate;
}
+static void cp2102_determine_quirks(struct usb_serial *serial)
+{
+ struct cp210x_serial_private *priv = usb_get_serial_data(serial);
+ u8 *buf;
+ int ret;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return;
+ /*
+ * Some (possibly counterfeit) CP2102 do not support event-insertion
+ * mode and respond differently to malformed vendor requests.
+ * Specifically, they return one instead of two bytes when sent a
+ * two-byte part-number request.
+ */
+ ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ CP210X_VENDOR_SPECIFIC, REQTYPE_DEVICE_TO_HOST,
+ CP210X_GET_PARTNUM, 0, buf, 2, USB_CTRL_GET_TIMEOUT);
+ if (ret == 1) {
+ dev_dbg(&serial->interface->dev,
+ "device does not support event-insertion mode\n");
+ priv->no_event_mode = true;
+ }
+
+ kfree(buf);
+}
+
static int cp210x_get_fw_version(struct usb_serial *serial, u16 value)
{
struct cp210x_serial_private *priv = usb_get_serial_data(serial);
@@ -2108,7 +2141,12 @@ static void cp210x_determine_type(struct usb_serial *serial)
return;
}
+ dev_dbg(&serial->interface->dev, "partnum = 0x%02x\n", priv->partnum);
+
switch (priv->partnum) {
+ case CP210X_PARTNUM_CP2102:
+ cp2102_determine_quirks(serial);
+ break;
case CP210X_PARTNUM_CP2105:
case CP210X_PARTNUM_CP2108:
cp210x_get_fw_version(serial, CP210X_GET_FW_VER);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index d7fe33ca73e4..925067a7978d 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -107,7 +107,6 @@
#define BANDB_DEVICE_ID_USOPTL4_2P 0xBC02
#define BANDB_DEVICE_ID_USOPTL4_4 0xAC44
#define BANDB_DEVICE_ID_USOPTL4_4P 0xBC03
-#define BANDB_DEVICE_ID_USOPTL2_4 0xAC24
/* Interrupt Routine Defines */
@@ -186,7 +185,6 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_2P) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4) },
{ USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL4_4P) },
- { USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USOPTL2_4) },
{} /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 29c765cc8495..6cfb5d33609f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1205,6 +1205,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1056, 0xff), /* Telit FD980 */
.driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1060, 0xff), /* Telit LN920 (rmnet) */
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1061, 0xff), /* Telit LN920 (MBIM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1062, 0xff), /* Telit LN920 (RNDIS) */
+ .driver_info = NCTRL(2) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1650,7 +1658,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
@@ -2068,6 +2075,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
{ USB_DEVICE(0x0489, 0xe0b5), /* Foxconn T77W968 ESIM */
.driver_info = RSVD(0) | RSVD(1) | RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe0db, 0xff), /* Foxconn T99W265 MBIM */
+ .driver_info = RSVD(3) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
{ USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index efa972be2ee3..c6b3fcf90180 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -416,9 +416,16 @@ UNUSUAL_DEV( 0x04cb, 0x0100, 0x0000, 0x2210,
USB_SC_UFI, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY | US_FL_SINGLE_LUN),
/*
- * Reported by Ondrej Zary <linux@rainbow-software.org>
+ * Reported by Ondrej Zary <linux@zary.sk>
* The device reports one sector more and breaks when that sector is accessed
+ * Firmwares older than 2.6c (the latest one and the only that claims Linux
+ * support) have also broken tag handling
*/
+UNUSUAL_DEV( 0x04ce, 0x0002, 0x0000, 0x026b,
+ "ScanLogic",
+ "SL11R-IDE",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_FIX_CAPACITY | US_FL_BULK_IGNORE_TAG),
UNUSUAL_DEV( 0x04ce, 0x0002, 0x026c, 0x026c,
"ScanLogic",
"SL11R-IDE",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index c35a6db993f1..4051c8cd0cd8 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -50,7 +50,7 @@ UNUSUAL_DEV(0x059f, 0x1061, 0x0000, 0x9999,
"LaCie",
"Rugged USB3-FW",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_IGNORE_UAS),
+ US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
/*
* Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 294ba05e6fc9..bd56de7484dc 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -1714,6 +1714,9 @@ static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
+ if (!mvdev->actual_features)
+ return;
+
if (!is_index_valid(mvdev, idx))
return;
@@ -2145,6 +2148,8 @@ static void clear_vqs_ready(struct mlx5_vdpa_net *ndev)
for (i = 0; i < ndev->mvdev.max_vqs; i++)
ndev->vqs[i].ready = false;
+
+ ndev->mvdev.cvq.ready = false;
}
static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 29a38ecba19e..26e3d90d1e7c 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -665,13 +665,11 @@ static void vduse_vdpa_set_config(struct vdpa_device *vdpa, unsigned int offset,
static int vduse_vdpa_reset(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
-
- if (vduse_dev_set_status(dev, 0))
- return -EIO;
+ int ret = vduse_dev_set_status(dev, 0);
vduse_dev_reset(dev);
- return 0;
+ return ret;
}
static u32 vduse_vdpa_get_generation(struct vdpa_device *vdpa)
@@ -1593,8 +1591,10 @@ static int vduse_init(void)
vduse_irq_wq = alloc_workqueue("vduse-irq",
WQ_HIGHPRI | WQ_SYSFS | WQ_UNBOUND, 0);
- if (!vduse_irq_wq)
+ if (!vduse_irq_wq) {
+ ret = -ENOMEM;
goto err_wq;
+ }
ret = vduse_domain_init();
if (ret)
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index 68198e0f2a63..a03b5a99c2da 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -565,7 +565,7 @@ static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
}
struct vfio_pci_walk_info {
- int (*fn)(struct pci_dev *, void *data);
+ int (*fn)(struct pci_dev *pdev, void *data);
void *data;
struct pci_dev *pdev;
bool slot;
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index f41d081777f5..35927ceb26ff 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -640,7 +640,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
u64 offset, map_size, map_iova = iova;
struct vdpa_map_file *map_file;
struct vm_area_struct *vma;
- int ret;
+ int ret = 0;
mmap_read_lock(dev->mm);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 588e02fb91d3..0a5b54034d4b 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -345,8 +345,13 @@ static int virtio_device_of_init(struct virtio_device *dev)
ret = snprintf(compat, sizeof(compat), "virtio,device%x", dev->id.device);
BUG_ON(ret >= sizeof(compat));
+ /*
+ * On powerpc/pseries virtio devices are PCI devices so PCI
+ * vendor/device ids play the role of the "compatible" property.
+ * Simply don't init of_node in this case.
+ */
if (!of_device_is_compatible(np, compat)) {
- ret = -EINVAL;
+ ret = 0;
goto out;
}
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index b81fe4f7d434..bf59faeb3de1 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1666,7 +1666,7 @@ config WDT_MTX1
config SIBYTE_WDOG
tristate "Sibyte SoC hardware watchdog"
- depends on CPU_SB1 || (MIPS && COMPILE_TEST)
+ depends on CPU_SB1
help
Watchdog driver for the built in watchdog hardware in Sibyte
SoC processors. There are apparently two watchdog timers
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index a37eb52fb401..22f5aff0c136 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -177,6 +177,7 @@ config XEN_GRANT_DMA_ALLOC
config SWIOTLB_XEN
def_bool y
+ depends on XEN_PV || ARM || ARM64
select DMA_OPS
select SWIOTLB
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 2d2803883306..43ebfe36ac27 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -522,8 +522,8 @@ static int balloon_thread(void *unused)
timeout = 3600 * HZ;
credit = current_credit();
- wait_event_interruptible_timeout(balloon_thread_wq,
- balloon_thread_cond(state, credit), timeout);
+ wait_event_freezable_timeout(balloon_thread_wq,
+ balloon_thread_cond(state, credit), timeout);
if (kthread_should_stop())
return 0;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 1e7f6b1c0c97..fec1b6537166 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -381,6 +381,14 @@ static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset,
map->unmap_ops[offset+i].handle,
map->unmap_ops[offset+i].status);
map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
+ if (use_ptemod) {
+ if (map->kunmap_ops[offset+i].status)
+ err = -EINVAL;
+ pr_debug("kunmap handle=%u st=%d\n",
+ map->kunmap_ops[offset+i].handle,
+ map->kunmap_ops[offset+i].status);
+ map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;
+ }
}
return err;
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 8c10edf9efe6..e56a5faac395 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -230,10 +230,11 @@ retry:
/*
* Get IO TLB memory from any location.
*/
- start = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
+ start = memblock_alloc(PAGE_ALIGN(bytes),
+ IO_TLB_SEGSIZE << IO_TLB_SHIFT);
if (!start)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, PAGE_ALIGN(bytes), PAGE_SIZE);
+ panic("%s: Failed to allocate %lu bytes\n",
+ __func__, PAGE_ALIGN(bytes));
/*
* And replace that memory with pages under 4GB.
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index 7d9b23d981bf..1b4d5809808d 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -21,6 +21,37 @@
#include "internal.h"
/*
+ * Handle invalidation of an mmap'd file. We invalidate all the PTEs referring
+ * to the pages in this file's pagecache, forcing the kernel to go through
+ * ->fault() or ->page_mkwrite() - at which point we can handle invalidation
+ * more fully.
+ */
+void afs_invalidate_mmap_work(struct work_struct *work)
+{
+ struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
+
+ unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
+}
+
+void afs_server_init_callback_work(struct work_struct *work)
+{
+ struct afs_server *server = container_of(work, struct afs_server, initcb_work);
+ struct afs_vnode *vnode;
+ struct afs_cell *cell = server->cell;
+
+ down_read(&cell->fs_open_mmaps_lock);
+
+ list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) {
+ if (vnode->cb_server == server) {
+ clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
+ queue_work(system_unbound_wq, &vnode->cb_work);
+ }
+ }
+
+ up_read(&cell->fs_open_mmaps_lock);
+}
+
+/*
* Allow the fileserver to request callback state (re-)initialisation.
* Unfortunately, UUIDs are not guaranteed unique.
*/
@@ -29,8 +60,11 @@ void afs_init_callback_state(struct afs_server *server)
rcu_read_lock();
do {
server->cb_s_break++;
- server = rcu_dereference(server->uuid_next);
- } while (0);
+ atomic_inc(&server->cell->fs_s_break);
+ if (!list_empty(&server->cell->fs_open_mmaps))
+ queue_work(system_unbound_wq, &server->initcb_work);
+
+ } while ((server = rcu_dereference(server->uuid_next)));
rcu_read_unlock();
}
@@ -44,11 +78,17 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
clear_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
vnode->cb_break++;
+ vnode->cb_v_break = vnode->volume->cb_v_break;
afs_clear_permits(vnode);
if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
afs_lock_may_be_available(vnode);
+ if (reason != afs_cb_break_for_deleted &&
+ vnode->status.type == AFS_FTYPE_FILE &&
+ atomic_read(&vnode->cb_nr_mmap))
+ queue_work(system_unbound_wq, &vnode->cb_work);
+
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
} else {
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 887b673f6223..d88407fb9bc0 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -166,6 +166,8 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
seqlock_init(&cell->volume_lock);
cell->fs_servers = RB_ROOT;
seqlock_init(&cell->fs_lock);
+ INIT_LIST_HEAD(&cell->fs_open_mmaps);
+ init_rwsem(&cell->fs_open_mmaps_lock);
rwlock_init(&cell->vl_servers_lock);
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index ac829e63c570..4579bbda4634 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -1077,9 +1077,9 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
*/
static int afs_d_revalidate_rcu(struct dentry *dentry)
{
- struct afs_vnode *dvnode, *vnode;
+ struct afs_vnode *dvnode;
struct dentry *parent;
- struct inode *dir, *inode;
+ struct inode *dir;
long dir_version, de_version;
_enter("%p", dentry);
@@ -1109,18 +1109,6 @@ static int afs_d_revalidate_rcu(struct dentry *dentry)
return -ECHILD;
}
- /* Check to see if the vnode referred to by the dentry still
- * has a callback.
- */
- if (d_really_is_positive(dentry)) {
- inode = d_inode_rcu(dentry);
- if (inode) {
- vnode = AFS_FS_I(inode);
- if (!afs_check_validity(vnode))
- return -ECHILD;
- }
- }
-
return 1; /* Still valid */
}
@@ -1156,17 +1144,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (IS_ERR(key))
key = NULL;
- if (d_really_is_positive(dentry)) {
- inode = d_inode(dentry);
- if (inode) {
- vnode = AFS_FS_I(inode);
- afs_validate(vnode, key);
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- goto out_bad;
- }
- }
-
- /* lock down the parent dentry so we can peer at it */
+ /* Hold the parent dentry so we can peer at it */
parent = dget_parent(dentry);
dir = AFS_FS_I(d_inode(parent));
@@ -1175,7 +1153,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (test_bit(AFS_VNODE_DELETED, &dir->flags)) {
_debug("%pd: parent dir deleted", dentry);
- goto out_bad_parent;
+ goto not_found;
}
/* We only need to invalidate a dentry if the server's copy changed
@@ -1201,12 +1179,12 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
case 0:
/* the filename maps to something */
if (d_really_is_negative(dentry))
- goto out_bad_parent;
+ goto not_found;
inode = d_inode(dentry);
if (is_bad_inode(inode)) {
printk("kAFS: afs_d_revalidate: %pd2 has bad inode\n",
dentry);
- goto out_bad_parent;
+ goto not_found;
}
vnode = AFS_FS_I(inode);
@@ -1228,9 +1206,6 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
dentry, fid.unique,
vnode->fid.unique,
vnode->vfs_inode.i_generation);
- write_seqlock(&vnode->cb_lock);
- set_bit(AFS_VNODE_DELETED, &vnode->flags);
- write_sequnlock(&vnode->cb_lock);
goto not_found;
}
goto out_valid;
@@ -1245,7 +1220,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
default:
_debug("failed to iterate dir %pd: %d",
parent, ret);
- goto out_bad_parent;
+ goto not_found;
}
out_valid:
@@ -1256,16 +1231,9 @@ out_valid_noupdate:
_leave(" = 1 [valid]");
return 1;
- /* the dirent, if it exists, now points to a different vnode */
not_found:
- spin_lock(&dentry->d_lock);
- dentry->d_flags |= DCACHE_NFSFS_RENAMED;
- spin_unlock(&dentry->d_lock);
-
-out_bad_parent:
_debug("dropping dentry %pd2", dentry);
dput(parent);
-out_bad:
key_put(key);
_leave(" = 0 [bad]");
@@ -1792,6 +1760,10 @@ static int afs_link(struct dentry *from, struct inode *dir,
goto error;
}
+ ret = afs_validate(vnode, op->key);
+ if (ret < 0)
+ goto error_op;
+
afs_op_set_vnode(op, 0, dvnode);
afs_op_set_vnode(op, 1, vnode);
op->file[0].dv_delta = 1;
@@ -1805,6 +1777,8 @@ static int afs_link(struct dentry *from, struct inode *dir,
op->create.reason = afs_edit_dir_for_link;
return afs_do_sync_operation(op);
+error_op:
+ afs_put_operation(op);
error:
d_drop(dentry);
_leave(" = %d", ret);
@@ -1989,6 +1963,11 @@ static int afs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
if (IS_ERR(op))
return PTR_ERR(op);
+ ret = afs_validate(vnode, op->key);
+ op->error = ret;
+ if (ret < 0)
+ goto error;
+
afs_op_set_vnode(op, 0, orig_dvnode);
afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
op->file[0].dv_delta = 1;
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index f4600c1353ad..540b9fc96824 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -263,7 +263,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
if (b == nr_blocks) {
_debug("init %u", b);
afs_edit_init_block(meta, block, b);
- i_size_write(&vnode->vfs_inode, (b + 1) * AFS_DIR_BLOCK_SIZE);
+ afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
}
/* Only lower dir pages have a counter in the header. */
@@ -296,7 +296,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
new_directory:
afs_edit_init_block(meta, meta, 0);
i_size = AFS_DIR_BLOCK_SIZE;
- i_size_write(&vnode->vfs_inode, i_size);
+ afs_set_i_size(vnode, i_size);
slot = AFS_DIR_RESV_BLOCKS0;
page = page0;
block = meta;
diff --git a/fs/afs/file.c b/fs/afs/file.c
index db035ae2a134..e6c447ae91f3 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -24,12 +24,16 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
static void afs_readahead(struct readahead_control *ractl);
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
+static void afs_vm_open(struct vm_area_struct *area);
+static void afs_vm_close(struct vm_area_struct *area);
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
const struct file_operations afs_file_operations = {
.open = afs_open,
.release = afs_release,
.llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = afs_file_read_iter,
.write_iter = afs_file_write,
.mmap = afs_file_mmap,
.splice_read = generic_file_splice_read,
@@ -59,8 +63,10 @@ const struct address_space_operations afs_fs_aops = {
};
static const struct vm_operations_struct afs_vm_ops = {
+ .open = afs_vm_open,
+ .close = afs_vm_close,
.fault = filemap_fault,
- .map_pages = filemap_map_pages,
+ .map_pages = afs_vm_map_pages,
.page_mkwrite = afs_page_mkwrite,
};
@@ -295,7 +301,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
fsreq->subreq = subreq;
fsreq->pos = subreq->start + subreq->transferred;
fsreq->len = subreq->len - subreq->transferred;
- fsreq->key = subreq->rreq->netfs_priv;
+ fsreq->key = key_get(subreq->rreq->netfs_priv);
fsreq->vnode = vnode;
fsreq->iter = &fsreq->def_iter;
@@ -304,6 +310,7 @@ static void afs_req_issue_op(struct netfs_read_subrequest *subreq)
fsreq->pos, fsreq->len);
afs_fetch_data(fsreq->vnode, fsreq);
+ afs_put_read(fsreq);
}
static int afs_symlink_readpage(struct page *page)
@@ -490,15 +497,88 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
return 1;
}
+static void afs_add_open_mmap(struct afs_vnode *vnode)
+{
+ if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
+ down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+ list_add_tail(&vnode->cb_mmap_link,
+ &vnode->volume->cell->fs_open_mmaps);
+
+ up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ }
+}
+
+static void afs_drop_open_mmap(struct afs_vnode *vnode)
+{
+ if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
+ return;
+
+ down_write(&vnode->volume->cell->fs_open_mmaps_lock);
+
+ if (atomic_read(&vnode->cb_nr_mmap) == 0)
+ list_del_init(&vnode->cb_mmap_link);
+
+ up_write(&vnode->volume->cell->fs_open_mmaps_lock);
+ flush_work(&vnode->cb_work);
+}
+
/*
* Handle setting up a memory mapping on an AFS file.
*/
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
int ret;
+ afs_add_open_mmap(vnode);
+
ret = generic_file_mmap(file, vma);
if (ret == 0)
vma->vm_ops = &afs_vm_ops;
+ else
+ afs_drop_open_mmap(vnode);
return ret;
}
+
+static void afs_vm_open(struct vm_area_struct *vma)
+{
+ afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static void afs_vm_close(struct vm_area_struct *vma)
+{
+ afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
+}
+
+static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
+{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
+ struct afs_file *af = vmf->vma->vm_file->private_data;
+
+ switch (afs_validate(vnode, af->key)) {
+ case 0:
+ return filemap_map_pages(vmf, start_pgoff, end_pgoff);
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ case -EINTR:
+ case -ERESTARTSYS:
+ return VM_FAULT_RETRY;
+ case -ESTALE:
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+ struct afs_file *af = iocb->ki_filp->private_data;
+ int ret;
+
+ ret = afs_validate(vnode, af->key);
+ if (ret < 0)
+ return ret;
+
+ return generic_file_read_iter(iocb, iter);
+}
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index e7e98ad63a91..c0031a3ab42f 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include "afs_fs.h"
#include "internal.h"
+#include "protocol_afs.h"
#include "protocol_yfs.h"
static unsigned int afs_fs_probe_fast_poll_interval = 30 * HZ;
@@ -102,7 +103,7 @@ void afs_fileserver_probe_result(struct afs_call *call)
struct afs_addr_list *alist = call->alist;
struct afs_server *server = call->server;
unsigned int index = call->addr_ix;
- unsigned int rtt_us = 0;
+ unsigned int rtt_us = 0, cap0;
int ret = call->error;
_enter("%pU,%u", &server->uuid, index);
@@ -159,6 +160,11 @@ responded:
clear_bit(AFS_SERVER_FL_IS_YFS, &server->flags);
alist->addrs[index].srx_service = call->service_id;
}
+ cap0 = ntohl(call->tmp);
+ if (cap0 & AFS3_VICED_CAPABILITY_64BITFILES)
+ set_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
+ else
+ clear_bit(AFS_SERVER_FL_HAS_FS64, &server->flags);
}
if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index dd3f45d906d2..4943413d9c5f 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -456,9 +456,7 @@ void afs_fs_fetch_data(struct afs_operation *op)
struct afs_read *req = op->fetch.req;
__be32 *bp;
- if (upper_32_bits(req->pos) ||
- upper_32_bits(req->len) ||
- upper_32_bits(req->pos + req->len))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_fetch_data64(op);
_enter("");
@@ -1113,9 +1111,7 @@ void afs_fs_store_data(struct afs_operation *op)
(unsigned long long)op->store.pos,
(unsigned long long)op->store.i_size);
- if (upper_32_bits(op->store.pos) ||
- upper_32_bits(op->store.size) ||
- upper_32_bits(op->store.i_size))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_store_data64(op);
call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData,
@@ -1229,7 +1225,7 @@ static void afs_fs_setattr_size(struct afs_operation *op)
key_serial(op->key), vp->fid.vid, vp->fid.vnode);
ASSERT(attr->ia_valid & ATTR_SIZE);
- if (upper_32_bits(attr->ia_size))
+ if (test_bit(AFS_SERVER_FL_HAS_FS64, &op->server->flags))
return afs_fs_setattr_size64(op);
call = afs_alloc_flat_call(op->net, &afs_RXFSStoreData_as_Status,
@@ -1657,20 +1653,33 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
return ret;
count = ntohl(call->tmp);
-
call->count = count;
call->count2 = count;
- afs_extract_discard(call, count * sizeof(__be32));
+ if (count == 0) {
+ call->unmarshall = 4;
+ call->tmp = 0;
+ break;
+ }
+
+ /* Extract the first word of the capabilities to call->tmp */
+ afs_extract_to_tmp(call);
call->unmarshall++;
fallthrough;
- /* Extract capabilities words */
case 2:
ret = afs_extract_data(call, false);
if (ret < 0)
return ret;
- /* TODO: Examine capabilities */
+ afs_extract_discard(call, (count - 1) * sizeof(__be32));
+ call->unmarshall++;
+ fallthrough;
+
+ /* Extract remaining capabilities words */
+ case 3:
+ ret = afs_extract_data(call, false);
+ if (ret < 0)
+ return ret;
call->unmarshall++;
break;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 80b6c8d967d5..8fcffea2daf5 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -54,16 +54,6 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
}
/*
- * Set the file size and block count. Estimate the number of 512 bytes blocks
- * used, rounded up to nearest 1K for consistency with other AFS clients.
- */
-static void afs_set_i_size(struct afs_vnode *vnode, u64 size)
-{
- i_size_write(&vnode->vfs_inode, size);
- vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
-}
-
-/*
* Initialise an inode from the vnode status.
*/
static int afs_inode_init_from_status(struct afs_operation *op,
@@ -587,22 +577,32 @@ static void afs_zap_data(struct afs_vnode *vnode)
}
/*
- * Get the server reinit counter for a vnode's current server.
+ * Check to see if we have a server currently serving this volume and that it
+ * hasn't been reinitialised or dropped from the list.
*/
-static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
+static bool afs_check_server_good(struct afs_vnode *vnode)
{
- struct afs_server_list *slist = rcu_dereference(vnode->volume->servers);
+ struct afs_server_list *slist;
struct afs_server *server;
+ bool good;
int i;
+ if (vnode->cb_fs_s_break == atomic_read(&vnode->volume->cell->fs_s_break))
+ return true;
+
+ rcu_read_lock();
+
+ slist = rcu_dereference(vnode->volume->servers);
for (i = 0; i < slist->nr_servers; i++) {
server = slist->servers[i].server;
if (server == vnode->cb_server) {
- *_s_break = READ_ONCE(server->cb_s_break);
- return true;
+ good = (vnode->cb_s_break == server->cb_s_break);
+ rcu_read_unlock();
+ return good;
}
}
+ rcu_read_unlock();
return false;
}
@@ -611,57 +611,46 @@ static bool afs_get_s_break_rcu(struct afs_vnode *vnode, unsigned int *_s_break)
*/
bool afs_check_validity(struct afs_vnode *vnode)
{
- struct afs_volume *volume = vnode->volume;
enum afs_cb_break_reason need_clear = afs_cb_break_no_break;
time64_t now = ktime_get_real_seconds();
- bool valid;
- unsigned int cb_break, cb_s_break, cb_v_break;
+ unsigned int cb_break;
int seq = 0;
do {
read_seqbegin_or_lock(&vnode->cb_lock, &seq);
- cb_v_break = READ_ONCE(volume->cb_v_break);
cb_break = vnode->cb_break;
- if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
- afs_get_s_break_rcu(vnode, &cb_s_break)) {
- if (vnode->cb_s_break != cb_s_break ||
- vnode->cb_v_break != cb_v_break) {
- vnode->cb_s_break = cb_s_break;
- vnode->cb_v_break = cb_v_break;
- need_clear = afs_cb_break_for_vsbreak;
- valid = false;
- } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) {
+ if (vnode->cb_v_break != vnode->volume->cb_v_break)
+ need_clear = afs_cb_break_for_v_break;
+ else if (!afs_check_server_good(vnode))
+ need_clear = afs_cb_break_for_s_reinit;
+ else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
need_clear = afs_cb_break_for_zap;
- valid = false;
- } else if (vnode->cb_expires_at - 10 <= now) {
+ else if (vnode->cb_expires_at - 10 <= now)
need_clear = afs_cb_break_for_lapsed;
- valid = false;
- } else {
- valid = true;
- }
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
- valid = true;
+ ;
} else {
- vnode->cb_v_break = cb_v_break;
- valid = false;
+ need_clear = afs_cb_break_no_promise;
}
} while (need_seqretry(&vnode->cb_lock, seq));
done_seqretry(&vnode->cb_lock, seq);
- if (need_clear != afs_cb_break_no_break) {
- write_seqlock(&vnode->cb_lock);
- if (cb_break == vnode->cb_break)
- __afs_break_callback(vnode, need_clear);
- else
- trace_afs_cb_miss(&vnode->fid, need_clear);
- write_sequnlock(&vnode->cb_lock);
- valid = false;
- }
+ if (need_clear == afs_cb_break_no_break)
+ return true;
- return valid;
+ write_seqlock(&vnode->cb_lock);
+ if (need_clear == afs_cb_break_no_promise)
+ vnode->cb_v_break = vnode->volume->cb_v_break;
+ else if (cb_break == vnode->cb_break)
+ __afs_break_callback(vnode, need_clear);
+ else
+ trace_afs_cb_miss(&vnode->fid, need_clear);
+ write_sequnlock(&vnode->cb_lock);
+ return false;
}
/*
@@ -675,21 +664,20 @@ bool afs_check_validity(struct afs_vnode *vnode)
*/
int afs_validate(struct afs_vnode *vnode, struct key *key)
{
- bool valid;
int ret;
_enter("{v={%llx:%llu} fl=%lx},%x",
vnode->fid.vid, vnode->fid.vnode, vnode->flags,
key_serial(key));
- rcu_read_lock();
- valid = afs_check_validity(vnode);
- rcu_read_unlock();
-
- if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
- clear_nlink(&vnode->vfs_inode);
+ if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) {
+ if (vnode->vfs_inode.i_nlink)
+ clear_nlink(&vnode->vfs_inode);
+ goto valid;
+ }
- if (valid)
+ if (test_bit(AFS_VNODE_CB_PROMISED, &vnode->flags) &&
+ afs_check_validity(vnode))
goto valid;
down_write(&vnode->validate_lock);
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 5ed416f4ff33..0ad97a8fc0d4 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -390,6 +390,9 @@ struct afs_cell {
/* Active fileserver interaction state. */
struct rb_root fs_servers; /* afs_server (by server UUID) */
seqlock_t fs_lock; /* For fs_servers */
+ struct rw_semaphore fs_open_mmaps_lock;
+ struct list_head fs_open_mmaps; /* List of vnodes that are mmapped */
+ atomic_t fs_s_break; /* Counter of CB.InitCallBackState messages */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
@@ -503,6 +506,7 @@ struct afs_server {
struct hlist_node addr4_link; /* Link in net->fs_addresses4 */
struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
struct hlist_node proc_link; /* Link in net->fs_proc */
+ struct work_struct initcb_work; /* Work for CB.InitCallBackState* */
struct afs_server *gc_next; /* Next server in manager's list */
time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
@@ -516,6 +520,7 @@ struct afs_server {
#define AFS_SERVER_FL_IS_YFS 16 /* Server is YFS not AFS */
#define AFS_SERVER_FL_NO_IBULK 17 /* Fileserver doesn't support FS.InlineBulkStatus */
#define AFS_SERVER_FL_NO_RM2 18 /* Fileserver doesn't support YFS.RemoveFile2 */
+#define AFS_SERVER_FL_HAS_FS64 19 /* Fileserver supports FS.{Fetch,Store}Data64 */
atomic_t ref; /* Object refcount */
atomic_t active; /* Active user count */
u32 addr_version; /* Address list version */
@@ -657,7 +662,11 @@ struct afs_vnode {
afs_lock_type_t lock_type : 8;
/* outstanding callback notification on this file */
+ struct work_struct cb_work; /* Work for mmap'd files */
+ struct list_head cb_mmap_link; /* Link in cell->fs_open_mmaps */
void *cb_server; /* Server with callback/filelock */
+ atomic_t cb_nr_mmap; /* Number of mmaps */
+ unsigned int cb_fs_s_break; /* Mass server break counter (cell->fs_s_break) */
unsigned int cb_s_break; /* Mass break counter on ->server */
unsigned int cb_v_break; /* Mass break counter on ->volume */
unsigned int cb_break; /* Break counter on vnode */
@@ -965,6 +974,8 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def;
/*
* callback.c
*/
+extern void afs_invalidate_mmap_work(struct work_struct *);
+extern void afs_server_init_callback_work(struct work_struct *work);
extern void afs_init_callback_state(struct afs_server *);
extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
@@ -1586,6 +1597,16 @@ static inline void afs_update_dentry_version(struct afs_operation *op,
}
/*
+ * Set the file size and block count. Estimate the number of 512 bytes blocks
+ * used, rounded up to nearest 1K for consistency with other AFS clients.
+ */
+static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size)
+{
+ i_size_write(&vnode->vfs_inode, size);
+ vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1;
+}
+
+/*
* Check for a conflicting operation on a directory that we just unlinked from.
* If someone managed to sneak a link or an unlink in on the file we just
* unlinked, we won't be able to trust nlink on an AFS file (but not YFS).
diff --git a/fs/afs/protocol_afs.h b/fs/afs/protocol_afs.h
new file mode 100644
index 000000000000..0c39358c8b70
--- /dev/null
+++ b/fs/afs/protocol_afs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* AFS protocol bits
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+
+#define AFSCAPABILITIESMAX 196 /* Maximum number of words in a capability set */
+
+/* AFS3 Fileserver capabilities word 0 */
+#define AFS3_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Uses UAE errors */
+#define AFS3_VICED_CAPABILITY_64BITFILES 0x0002 /* FetchData64 & StoreData64 supported */
+#define AFS3_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
+#define AFS3_VICED_CAPABILITY_SANEACLS 0x0008 /* ACLs reviewed for sanity - don't use */
diff --git a/fs/afs/protocol_yfs.h b/fs/afs/protocol_yfs.h
index b5bd03b1d3c7..e4cd89c44c46 100644
--- a/fs/afs/protocol_yfs.h
+++ b/fs/afs/protocol_yfs.h
@@ -168,3 +168,9 @@ enum yfs_lock_type {
yfs_LockMandatoryWrite = 0x101,
yfs_LockMandatoryExtend = 0x102,
};
+
+/* RXYFS Viced Capability Flags */
+#define YFS_VICED_CAPABILITY_ERRORTRANS 0x0001 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_64BITFILES 0x0002 /* Deprecated v0.195 */
+#define YFS_VICED_CAPABILITY_WRITELOCKACL 0x0004 /* Can lock a file even without lock perm */
+#define YFS_VICED_CAPABILITY_SANEACLS 0x0008 /* Deprecated v0.195 */
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index d83f13c44b92..79e1a5f6701b 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -374,6 +374,7 @@ selected_server:
if (vnode->cb_server != server) {
vnode->cb_server = server;
vnode->cb_s_break = server->cb_s_break;
+ vnode->cb_fs_s_break = atomic_read(&server->cell->fs_s_break);
vnode->cb_v_break = vnode->volume->cb_v_break;
clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index 684a2b02b9ff..6e5b9a19b234 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -235,6 +235,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
server->addr_version = alist->version;
server->uuid = *uuid;
rwlock_init(&server->fs_lock);
+ INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
init_waitqueue_head(&server->probe_wq);
INIT_LIST_HEAD(&server->probe_link);
spin_lock_init(&server->probe_lock);
@@ -467,6 +468,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
afs_give_up_callbacks(net, server);
+ flush_work(&server->initcb_work);
afs_put_server(net, server, afs_server_trace_destroy);
}
diff --git a/fs/afs/super.c b/fs/afs/super.c
index e38bb1e7a4d2..d110def8aa8e 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -698,6 +698,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
vnode->lock_state = AFS_VNODE_LOCK_NONE;
init_rwsem(&vnode->rmdir_lock);
+ INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
_leave(" = %p", &vnode->vfs_inode);
return &vnode->vfs_inode;
diff --git a/fs/afs/write.c b/fs/afs/write.c
index c0534697268e..2dfe3b3a53d6 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -137,7 +137,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
write_seqlock(&vnode->cb_lock);
i_size = i_size_read(&vnode->vfs_inode);
if (maybe_i_size > i_size)
- i_size_write(&vnode->vfs_inode, maybe_i_size);
+ afs_set_i_size(vnode, maybe_i_size);
write_sequnlock(&vnode->cb_lock);
}
@@ -471,13 +471,18 @@ static void afs_extend_writeback(struct address_space *mapping,
}
/* Has the page moved or been split? */
- if (unlikely(page != xas_reload(&xas)))
+ if (unlikely(page != xas_reload(&xas))) {
+ put_page(page);
break;
+ }
- if (!trylock_page(page))
+ if (!trylock_page(page)) {
+ put_page(page);
break;
+ }
if (!PageDirty(page) || PageWriteback(page)) {
unlock_page(page);
+ put_page(page);
break;
}
@@ -487,6 +492,7 @@ static void afs_extend_writeback(struct address_space *mapping,
t = afs_page_dirty_to(page, priv);
if (f != 0 && !new_content) {
unlock_page(page);
+ put_page(page);
break;
}
@@ -801,6 +807,7 @@ int afs_writepages(struct address_space *mapping,
ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
+ struct afs_file *af = iocb->ki_filp->private_data;
ssize_t result;
size_t count = iov_iter_count(from);
@@ -816,6 +823,10 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
if (!count)
return 0;
+ result = afs_validate(vnode, af->key);
+ if (result < 0)
+ return result;
+
result = generic_file_write_iter(iocb, from);
_leave(" = %zd", result);
@@ -829,13 +840,18 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
*/
int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
- struct inode *inode = file_inode(file);
- struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct afs_file *af = file->private_data;
+ int ret;
_enter("{%llx:%llu},{n=%pD},%d",
vnode->fid.vid, vnode->fid.vnode, file,
datasync);
+ ret = afs_validate(vnode, af->key);
+ if (ret < 0)
+ return ret;
+
return file_write_and_wait_range(file, start, end);
}
@@ -849,11 +865,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_file *af = file->private_data;
unsigned long priv;
vm_fault_t ret = VM_FAULT_RETRY;
_enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
+ afs_validate(vnode, af->key);
+
sb_start_pagefault(inode->i_sb);
/* Wait for the page to be written to the cache before we allow it to
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 69d900a8473d..a813b70f594e 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -630,7 +630,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
vaddr = eppnt->p_vaddr;
if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
- elf_type |= MAP_FIXED_NOREPLACE;
+ elf_type |= MAP_FIXED;
else if (no_base && interp_elf_ex->e_type == ET_DYN)
load_addr = -vaddr;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 2673c6ba7a4e..0b9401a5afd3 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -665,7 +665,18 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_inode *inode, struct bio *bio,
if (!ordered) {
ordered = btrfs_lookup_ordered_extent(inode, offset);
- BUG_ON(!ordered); /* Logic error */
+ /*
+ * The bio range is not covered by any ordered extent,
+ * must be a code logic error.
+ */
+ if (unlikely(!ordered)) {
+ WARN(1, KERN_WARNING
+ "no ordered extent for root %llu ino %llu offset %llu\n",
+ inode->root->root_key.objectid,
+ btrfs_ino(inode), offset);
+ kvfree(sums);
+ return BLK_STS_IOERR;
+ }
}
nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 5ada02e0e629..aa5be0b24987 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -414,9 +414,10 @@ static void __btrfs_dump_space_info(struct btrfs_fs_info *fs_info,
{
lockdep_assert_held(&info->lock);
- btrfs_info(fs_info, "space_info %llu has %llu free, is %sfull",
+ /* The free space could be negative in case of overcommit */
+ btrfs_info(fs_info, "space_info %llu has %lld free, is %sfull",
info->flags,
- info->total_bytes - btrfs_space_info_used(info, true),
+ (s64)(info->total_bytes - btrfs_space_info_used(info, true)),
info->full ? "" : "not ");
btrfs_info(fs_info,
"space_info total=%llu, used=%llu, pinned=%llu, reserved=%llu, may_use=%llu, readonly=%llu zone_unusable=%llu",
diff --git a/fs/btrfs/verity.c b/fs/btrfs/verity.c
index 28d443d3ef93..4968535dfff0 100644
--- a/fs/btrfs/verity.c
+++ b/fs/btrfs/verity.c
@@ -451,7 +451,7 @@ static int del_orphan(struct btrfs_trans_handle *trans, struct btrfs_inode *inod
*/
static int rollback_verity(struct btrfs_inode *inode)
{
- struct btrfs_trans_handle *trans;
+ struct btrfs_trans_handle *trans = NULL;
struct btrfs_root *root = inode->root;
int ret;
@@ -473,6 +473,7 @@ static int rollback_verity(struct btrfs_inode *inode)
trans = btrfs_start_transaction(root, 2);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
+ trans = NULL;
btrfs_handle_fs_error(root->fs_info, ret,
"failed to start transaction in verity rollback %llu",
(u64)inode->vfs_inode.i_ino);
@@ -490,8 +491,9 @@ static int rollback_verity(struct btrfs_inode *inode)
btrfs_abort_transaction(trans, ret);
goto out;
}
- btrfs_end_transaction(trans);
out:
+ if (trans)
+ btrfs_end_transaction(trans);
return ret;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 464485aa7318..2ec3b8ac8fa3 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1137,6 +1137,19 @@ static void btrfs_close_one_device(struct btrfs_device *device)
atomic_set(&device->dev_stats_ccnt, 0);
extent_io_tree_release(&device->alloc_state);
+ /*
+ * Reset the flush error record. We might have a transient flush error
+ * in this mount, and if so we aborted the current transaction and set
+ * the fs to an error state, guaranteeing no super blocks can be further
+ * committed. However that error might be transient and if we unmount the
+ * filesystem and mount it again, we should allow the mount to succeed
+ * (btrfs_check_rw_degradable() should not fail) - if after mounting the
+ * filesystem again we still get flush errors, then we will again abort
+ * any transaction and set the error state, guaranteeing no commits of
+ * unsafe super blocks.
+ */
+ device->last_flush_error = 0;
+
/* Verify the device is back in a pristine state */
ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
diff --git a/fs/buffer.c b/fs/buffer.c
index ab7573d72dd7..c615387aedca 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1425,12 +1425,16 @@ void invalidate_bh_lrus(void)
}
EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
-void invalidate_bh_lrus_cpu(int cpu)
+/*
+ * It's called from workqueue context so we need a bh_lru_lock to close
+ * the race with preemption/irq.
+ */
+void invalidate_bh_lrus_cpu(void)
{
struct bh_lru *b;
bh_lru_lock();
- b = per_cpu_ptr(&bh_lrus, cpu);
+ b = this_cpu_ptr(&bh_lrus);
__invalidate_bh_lrus(b);
bh_lru_unlock();
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 6c0e52fd0743..3e42d0466521 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2263,7 +2263,7 @@ retry:
list_for_each_entry(req, &ci->i_unsafe_dirops,
r_unsafe_dir_item) {
s = req->r_session;
- if (unlikely(s->s_mds > max)) {
+ if (unlikely(s->s_mds >= max)) {
spin_unlock(&ci->i_unsafe_lock);
goto retry;
}
@@ -2277,7 +2277,7 @@ retry:
list_for_each_entry(req, &ci->i_unsafe_iops,
r_unsafe_target_item) {
s = req->r_session;
- if (unlikely(s->s_mds > max)) {
+ if (unlikely(s->s_mds >= max)) {
spin_unlock(&ci->i_unsafe_lock);
goto retry;
}
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index 8a3b30ec860c..8be57aaedab6 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cache.c - CIFS filesystem cache index structure definitions
+ * CIFS filesystem cache index structure definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 51a824fc926a..de2c12bcfa4b 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs_debug.c
*
* Copyright (C) International Business Machines Corp., 2000,2005
*
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 4fd788586399..f97407520ea1 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_fs_sb.h
*
* Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index ef723be358af..b87cbbe6d2d4 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_ioctl.h
*
* Structure definitions for io control for cifs/smb3
*
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 8fa26a8530f8..353bd0dd7026 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifs_spnego.c -- SPNEGO upcall management for CIFS
+ * SPNEGO upcall management for CIFS
*
* Copyright (c) 2007 Red Hat, Inc.
* Author(s): Jeff Layton (jlayton@redhat.com)
diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h
index 31387d0ea32e..e6a0451877d4 100644
--- a/fs/cifs/cifs_spnego.h
+++ b/fs/cifs/cifs_spnego.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifs_spnego.h -- SPNEGO upcall management for CIFS
+ * SPNEGO upcall management for CIFS
*
* Copyright (c) 2007 Red Hat, Inc.
* Author(s): Jeff Layton (jlayton@redhat.com)
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 171ad8b42107..e7582dd79179 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/cifs_unicode.c
*
* Copyright (c) International Business Machines Corp., 2000,2009
* Modified by Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 388eb536cff1..ee3aab3dd4ac 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsacl.c
*
* Copyright (C) International Business Machines Corp., 2007,2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h
index f8292bcf8594..ccbfc754bd3c 100644
--- a/fs/cifs/cifsacl.h
+++ b/fs/cifs/cifsacl.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsacl.h
*
* Copyright (c) International Business Machines Corp., 2007
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 2e6f40344037..d118282071b3 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsencrypt.c
*
* Encryption and hashing operations relating to NTLM, NTLMv2. See MS-NLMP
* for more detailed information
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8c20bfa187ac..9fa930dfd78d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifsfs.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d25a4099b32e..b50da1901ebd 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsfs.h
*
* Copyright (c) International Business Machines Corp., 2002, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index c068f7d8d879..e916470468ea 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsglob.h
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
@@ -1400,6 +1399,7 @@ struct cifsInodeInfo {
#define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
#define CIFS_INO_LOCK (5) /* lock bit for synchronization */
#define CIFS_INO_MODIFIED_ATTR (6) /* Indicate change in mtime/ctime */
+#define CIFS_INO_CLOSE_ON_LOCK (7) /* Not to defer the close when lock is set */
unsigned long flags;
spinlock_t writers_lock;
unsigned int writers; /* Number of writers on this inode */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index 98e8e5aa0613..d2ff438fd31f 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifspdu.h
*
* Copyright (c) International Business Machines Corp., 2002,2009
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f9740c21ca3d..d0f85b666662 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/cifsproto.h
*
* Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
@@ -268,6 +267,9 @@ extern void cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode);
extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
+extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
+ const char *path);
+
extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx);
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
int from_reconnect);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index a8e41c1e80ca..243d17696f06 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/cifssmb.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0db344807ef1..c3b94c1e4591 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/connect.c
*
* Copyright (C) International Business Machines Corp., 2002,2011
* Author(s): Steve French (sfrench@us.ibm.com)
@@ -1090,7 +1089,7 @@ next_pdu:
module_put_and_exit(0);
}
-/**
+/*
* Returns true if srcaddr isn't specified and rhs isn't specified, or
* if srcaddr is specified and matches the IP address of the rhs argument
*/
@@ -1550,6 +1549,9 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
/**
* cifs_setup_ipc - helper to setup the IPC tcon for the session
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
+ * new tree connection for the IPC (interprocess communication RPC)
*
* A new IPC connection is made and stored in the session
* tcon_ipc. The IPC tcon has the same lifetime as the session.
@@ -1605,6 +1607,7 @@ out:
/**
* cifs_free_ipc - helper to release the session IPC tcon
+ * @ses: smb session to unmount the IPC from
*
* Needs to be called everytime a session is destroyed.
*
@@ -1855,6 +1858,8 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx __attribute__((unused)),
/**
* cifs_get_smb_ses - get a session matching @ctx data from @server
+ * @server: server to setup the session to
+ * @ctx: superblock configuration context to use to setup the session
*
* This function assumes it is being called from cifs_mount() where we
* already got a server reference (server refcount +1). See
@@ -2065,6 +2070,8 @@ cifs_put_tcon(struct cifs_tcon *tcon)
/**
* cifs_get_tcon - get a tcon matching @ctx data from @ses
+ * @ses: smb session to issue the request on
+ * @ctx: the superblock configuration context to use for building the
*
* - tcon refcount is the number of mount points using the tcon.
* - ses refcount is the number of tcon using the session.
@@ -2382,9 +2389,10 @@ cifs_match_super(struct super_block *sb, void *data)
spin_lock(&cifs_tcp_ses_lock);
cifs_sb = CIFS_SB(sb);
tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
- if (IS_ERR(tlink)) {
+ if (tlink == NULL) {
+ /* can not match superblock if tlink were ever null */
spin_unlock(&cifs_tcp_ses_lock);
- return rc;
+ return 0;
}
tcon = tlink_tcon(tlink);
ses = tcon->ses;
@@ -3030,7 +3038,7 @@ build_unc_path_to_root(const struct smb3_fs_context *ctx,
return full_path;
}
-/**
+/*
* expand_dfs_referral - Perform a dfs referral query and update the cifs_sb
*
* If a referral is found, cifs_sb->ctx->mount_options will be (re-)allocated
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 5f8a302ffcb2..6e8e7cc26ae2 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/dir.c
*
* vfs operations that deal with dentries
*
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 8c616aaeb7c4..0458d28d71aa 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/dns_resolve.c
*
* Copyright (c) 2007 Igor Mammedov
* Author(s): Igor Mammedov (niallain@gmail.com)
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
index 9fa2807ef79e..afc0df381246 100644
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
- * Handles host name to IP address resolution
+ * DNS Resolver upcall management for CIFS DFS
+ * Handles host name to IP address resolution
*
* Copyright (c) International Business Machines Corp., 2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index 747a540db954..37c28415df1e 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/export.c
*
* Copyright (C) International Business Machines Corp., 2007
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d0216472f1c6..13f3182cf796 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/file.c
*
* vfs operations that deal with files
*
@@ -883,8 +882,9 @@ int cifs_close(struct inode *inode, struct file *file)
dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL);
if ((cinode->oplock == CIFS_CACHE_RHW_FLG) &&
cinode->lease_granted &&
+ !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) &&
dclose) {
- if (test_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
+ if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
inode->i_ctime = inode->i_mtime = current_time(inode);
cifs_fscache_update_inode_cookie(inode);
}
@@ -1865,6 +1865,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
cifs_sb = CIFS_FILE_SB(file);
+ set_bit(CIFS_INO_CLOSE_ON_LOCK, &CIFS_I(d_inode(cfile->dentry))->flags);
if (cap_unix(tcon->ses) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
@@ -3112,7 +3113,7 @@ static void collect_uncached_write_data(struct cifs_aio_ctx *ctx)
struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb;
struct dentry *dentry = ctx->cfile->dentry;
- int rc;
+ ssize_t rc;
tcon = tlink_tcon(ctx->cfile->tlink);
cifs_sb = CIFS_SB(dentry->d_sb);
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index fab47fa7df74..8eedd20c44ab 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/fscache.c - CIFS filesystem cache interface
+ * CIFS filesystem cache interface
*
* Copyright (c) 2010 Novell, Inc.
* Author(s): Suresh Jayaraman <sjayaraman@suse.de>
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index 82e856b9cf89..9baa1d0f22bd 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/fscache.h - CIFS filesystem cache interface definitions
+ * CIFS filesystem cache interface definitions
*
* Copyright (c) 2010 Novell, Inc.
* Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 50c01cff4c84..82848412ad85 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/inode.c
*
* Copyright (C) International Business Machines Corp., 2002,2010
* Author(s): Steve French (sfrench@us.ibm.com)
@@ -1625,7 +1624,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry)
goto unlink_out;
}
- cifs_close_deferred_file(CIFS_I(inode));
+ cifs_close_deferred_file_under_dentry(tcon, full_path);
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = CIFSPOSIXDelFile(xid, tcon, full_path,
@@ -2114,9 +2113,9 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir,
goto cifs_rename_exit;
}
- cifs_close_deferred_file(CIFS_I(d_inode(source_dentry)));
+ cifs_close_deferred_file_under_dentry(tcon, from_name);
if (d_inode(target_dentry) != NULL)
- cifs_close_deferred_file(CIFS_I(d_inode(target_dentry)));
+ cifs_close_deferred_file_under_dentry(tcon, to_name);
rc = cifs_do_rename(xid, source_dentry, from_name, target_dentry,
to_name);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 42c6a0bac6c8..0359b604bdbc 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/ioctl.c
*
* vfs operations that deal with io control
*
@@ -359,7 +358,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
if (pSMBFile == NULL)
break;
tcon = tlink_tcon(pSMBFile->tlink);
- caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+ /* caps = le64_to_cpu(tcon->fsUnixInfo.Capability); */
if (get_user(ExtAttrBits, (int __user *)arg)) {
rc = -EFAULT;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index f0a6d63bc08c..852e54ee82c2 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/link.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 9469f1cf0b46..bb1185fff8cc 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/misc.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
@@ -265,7 +264,8 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
/* Uid is not converted */
buffer->Uid = treeCon->ses->Suid;
- buffer->Mid = get_next_mid(treeCon->ses->server);
+ if (treeCon->ses->server)
+ buffer->Mid = get_next_mid(treeCon->ses->server);
}
if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
buffer->Flags2 |= SMBFLG2_DFS;
@@ -591,6 +591,7 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
/**
* cifs_queue_oplock_break - queue the oplock break handler for cfile
+ * @cfile: The file to break the oplock on
*
* This function is called from the demultiplex thread when it
* receives an oplock break for @cfile.
@@ -736,7 +737,7 @@ cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
if (cancel_delayed_work(&cfile->deferred)) {
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
- continue;
+ break;
tmp_list->cfile = cfile;
list_add_tail(&tmp_list->list, &file_head);
}
@@ -767,7 +768,7 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
if (cancel_delayed_work(&cfile->deferred)) {
tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
if (tmp_list == NULL)
- continue;
+ break;
tmp_list->cfile = cfile;
list_add_tail(&tmp_list->list, &file_head);
}
@@ -781,6 +782,43 @@ cifs_close_all_deferred_files(struct cifs_tcon *tcon)
kfree(tmp_list);
}
}
+void
+cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
+{
+ struct cifsFileInfo *cfile;
+ struct list_head *tmp;
+ struct file_list *tmp_list, *tmp_next_list;
+ struct list_head file_head;
+ void *page;
+ const char *full_path;
+
+ INIT_LIST_HEAD(&file_head);
+ page = alloc_dentry_path();
+ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp, &tcon->openFileList) {
+ cfile = list_entry(tmp, struct cifsFileInfo, tlist);
+ full_path = build_path_from_dentry(cfile->dentry, page);
+ if (strstr(full_path, path)) {
+ if (delayed_work_pending(&cfile->deferred)) {
+ if (cancel_delayed_work(&cfile->deferred)) {
+ tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
+ if (tmp_list == NULL)
+ break;
+ tmp_list->cfile = cfile;
+ list_add_tail(&tmp_list->list, &file_head);
+ }
+ }
+ }
+ }
+ spin_unlock(&tcon->open_file_lock);
+
+ list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
+ _cifsFileInfo_put(tmp_list->cfile, true, false);
+ list_del(&tmp_list->list);
+ kfree(tmp_list);
+ }
+ free_dentry_path(page);
+}
/* parses DFS refferal V3 structure
* caller is responsible for freeing target_nodes
@@ -1029,6 +1067,9 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
/**
* cifs_alloc_hash - allocate hash and hash context together
+ * @name: The name of the crypto hash algo
+ * @shash: Where to put the pointer to the hash algo
+ * @sdesc: Where to put the pointer to the hash descriptor
*
* The caller has to make sure @sdesc is initialized to either NULL or
* a valid context. Both can be freed via cifs_free_hash().
@@ -1067,6 +1108,8 @@ cifs_alloc_hash(const char *name,
/**
* cifs_free_hash - free hash and hash context together
+ * @shash: Where to find the pointer to the hash algo
+ * @sdesc: Where to find the pointer to the hash descriptor
*
* Freeing a NULL hash or context is safe.
*/
@@ -1082,8 +1125,10 @@ cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
/**
* rqst_page_get_length - obtain the length and offset for a page in smb_rqst
- * Input: rqst - a smb_rqst, page - a page index for rqst
- * Output: *len - the length for this page, *offset - the offset for this page
+ * @rqst: The request descriptor
+ * @page: The index of the page to query
+ * @len: Where to store the length for this page:
+ * @offset: Where to store the offset for this page
*/
void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
unsigned int *len, unsigned int *offset)
@@ -1116,6 +1161,8 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
/**
* copy_path_name - copy src path to dst, possibly truncating
+ * @dst: The destination buffer
+ * @src: The source name
*
* returns number of bytes written (including trailing nul)
*/
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 0e728aac67e9..fa9fbd6a819c 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/netmisc.c
*
* Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 378133ce8869..25a2b8ef88b9 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/ntlmssp.h
*
* Copyright (c) International Business Machines Corp., 2002,2007
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 54d77c99e21c..1929e80c09ee 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/readdir.c
*
* Directory search handling
*
diff --git a/fs/cifs/rfc1002pdu.h b/fs/cifs/rfc1002pdu.h
index 137f7c95afd6..ae1d025da294 100644
--- a/fs/cifs/rfc1002pdu.h
+++ b/fs/cifs/rfc1002pdu.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/rfc1002pdu.h
*
* Protocol Data Unit definitions for RFC 1001/1002 support
*
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 118403fbeda2..23e02db7923f 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/sess.c
*
* SMB/CIFS session setup handling routines
*
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index c9d8a50062b8..f5dcc4940b6d 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2file.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Author(s): Steve French (sfrench@us.ibm.com),
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index d0e9f3782bd9..ca692b2283cd 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2glob.h
*
* Definitions for various global variables and structures
*
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 957b2594f02e..8297703492ee 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2inode.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index 668f77108831..29b5554f6263 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2misc.c
*
* Copyright (C) International Business Machines Corp., 2002,2011
* Etersoft, 2012
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index b6d2e3591927..7829c590eeac 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2pdu.c
*
* Copyright (C) International Business Machines Corp., 2009, 2013
* Etersoft, 2012
@@ -2398,7 +2397,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
buf->sd.OffsetDacl = cpu_to_le32(ptr - (__u8 *)&buf->sd);
/* Ship the ACL for now. we will copy it into buf later. */
aclptr = ptr;
- ptr += sizeof(struct cifs_acl);
+ ptr += sizeof(struct smb3_acl);
/* create one ACE to hold the mode embedded in reserved special SID */
acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
@@ -2423,7 +2422,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
acl.AclRevision = ACL_REVISION; /* See 2.4.4.1 of MS-DTYP */
acl.AclSize = cpu_to_le16(acl_size);
acl.AceCount = cpu_to_le16(ace_count);
- memcpy(aclptr, &acl, sizeof(struct cifs_acl));
+ memcpy(aclptr, &acl, sizeof(struct smb3_acl));
buf->ccontext.DataLength = cpu_to_le32(ptr - (__u8 *)&buf->sd);
*len = roundup(ptr - (__u8 *)buf, 8);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index e9cac7970b66..f32c99c9ba13 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2pdu.h
*
* Copyright (c) International Business Machines Corp., 2009, 2013
* Etersoft, 2012
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 263767f644f8..547945443fa7 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2proto.h
*
* Copyright (c) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
diff --git a/fs/cifs/smb2status.h b/fs/cifs/smb2status.h
index 0215ef36e240..a9e958166fc5 100644
--- a/fs/cifs/smb2status.h
+++ b/fs/cifs/smb2status.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smb2status.h
*
* SMB2 Status code (network error) definitions
* Definitions are from MS-ERREF
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 6f7952ea4941..f59b956f9d25 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/smb2transport.c
*
* Copyright (C) International Business Machines Corp., 2002, 2011
* Etersoft, 2012
diff --git a/fs/cifs/smberr.h b/fs/cifs/smberr.h
index 60189efb3236..aeffdad829e2 100644
--- a/fs/cifs/smberr.h
+++ b/fs/cifs/smberr.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1 */
/*
- * fs/cifs/smberr.h
*
* Copyright (c) International Business Machines Corp., 2002,2004
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 75a95de320cf..b7379329b741 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/transport.c
*
* Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/cifs/winucase.c b/fs/cifs/winucase.c
index 59b6c577aa0a..2f075b5b50df 100644
--- a/fs/cifs/winucase.c
+++ b/fs/cifs/winucase.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * fs/cifs/winucase.c
*
* Copyright (c) Jeffrey Layton <jlayton@redhat.com>, 2013
*
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 9ed481e79ce0..7d8b72d67c80 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: LGPL-2.1
/*
- * fs/cifs/xattr.c
*
* Copyright (c) International Business Machines Corp., 2003, 2007
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 8129a430d789..2f117c57160d 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -528,7 +528,7 @@ void debugfs_create_file_size(const char *name, umode_t mode,
{
struct dentry *de = debugfs_create_file(name, mode, parent, data, fops);
- if (de)
+ if (!IS_ERR(de))
d_inode(de)->i_size = file_size;
}
EXPORT_SYMBOL_GPL(debugfs_create_file_size);
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
index 31ac3a73b390..a552399e211d 100644
--- a/fs/erofs/inode.c
+++ b/fs/erofs/inode.c
@@ -176,7 +176,7 @@ static struct page *erofs_read_inode(struct inode *inode,
}
if (vi->datalayout == EROFS_INODE_CHUNK_BASED) {
- if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_ALL)) {
+ if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) {
erofs_err(inode->i_sb,
"unsupported chunk format %x of nid %llu",
vi->chunkformat, vi->nid);
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 9fb98d85a3ce..7a6df35fdc91 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -369,7 +369,8 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
if (compacted_4b_initial == 32 / 4)
compacted_4b_initial = 0;
- if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
+ if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) &&
+ compacted_4b_initial < totalidx)
compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
else
compacted_2b = 0;
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 1f3f4326bf3c..c17ccc19b938 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -48,10 +48,9 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
struct ext2_sb_info *sbi = EXT2_SB(sb);
if (block_group >= sbi->s_groups_count) {
- ext2_error (sb, "ext2_get_group_desc",
- "block_group >= groups_count - "
- "block_group = %d, groups_count = %lu",
- block_group, sbi->s_groups_count);
+ WARN(1, "block_group >= groups_count - "
+ "block_group = %d, groups_count = %lu",
+ block_group, sbi->s_groups_count);
return NULL;
}
@@ -59,10 +58,9 @@ struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
group_desc = block_group >> EXT2_DESC_PER_BLOCK_BITS(sb);
offset = block_group & (EXT2_DESC_PER_BLOCK(sb) - 1);
if (!sbi->s_group_desc[group_desc]) {
- ext2_error (sb, "ext2_get_group_desc",
- "Group descriptor not loaded - "
- "block_group = %d, group_desc = %lu, desc = %lu",
- block_group, group_desc, offset);
+ WARN(1, "Group descriptor not loaded - "
+ "block_group = %d, group_desc = %lu, desc = %lu",
+ block_group, group_desc, offset);
return NULL;
}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ffb295aa891c..74b172a4adda 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -551,7 +551,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
struct dir_private_info *info = file->private_data;
struct inode *inode = file_inode(file);
struct fname *fname;
- int ret;
+ int ret = 0;
if (!info) {
info = ext4_htree_create_dir_info(file, ctx->pos);
@@ -599,7 +599,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
info->curr_minor_hash,
&info->next_hash);
if (ret < 0)
- return ret;
+ goto finished;
if (ret == 0) {
ctx->pos = ext4_get_htree_eof(file);
break;
@@ -630,7 +630,7 @@ static int ext4_dx_readdir(struct file *file, struct dir_context *ctx)
}
finished:
info->last_pos = ctx->pos;
- return 0;
+ return ret < 0 ? ret : 0;
}
static int ext4_release_dir(struct inode *inode, struct file *filp)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 90ff5acaf11f..3825195539d7 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -3593,9 +3593,6 @@ extern int ext4_da_write_inline_data_begin(struct address_space *mapping,
unsigned flags,
struct page **pagep,
void **fsdata);
-extern int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
- unsigned len, unsigned copied,
- struct page *page);
extern int ext4_try_add_inline_entry(handle_t *handle,
struct ext4_filename *fname,
struct inode *dir, struct inode *inode);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index c0de30f25185..0e02571f2f82 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5916,7 +5916,7 @@ void ext4_ext_replay_shrink_inode(struct inode *inode, ext4_lblk_t end)
}
/* Check if *cur is a hole and if it is, skip it */
-static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
+static int skip_hole(struct inode *inode, ext4_lblk_t *cur)
{
int ret;
struct ext4_map_blocks map;
@@ -5925,9 +5925,12 @@ static void skip_hole(struct inode *inode, ext4_lblk_t *cur)
map.m_len = ((inode->i_size) >> inode->i_sb->s_blocksize_bits) - *cur;
ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret < 0)
+ return ret;
if (ret != 0)
- return;
+ return 0;
*cur = *cur + map.m_len;
+ return 0;
}
/* Count number of blocks used by this inode and update i_blocks */
@@ -5976,7 +5979,9 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
* iblocks by total number of differences found.
*/
cur = 0;
- skip_hole(inode, &cur);
+ ret = skip_hole(inode, &cur);
+ if (ret < 0)
+ goto out;
path = ext4_find_extent(inode, cur, NULL, 0);
if (IS_ERR(path))
goto out;
@@ -5995,8 +6000,12 @@ int ext4_ext_replay_set_iblocks(struct inode *inode)
}
cur = max(cur + 1, le32_to_cpu(ex->ee_block) +
ext4_ext_get_actual_len(ex));
- skip_hole(inode, &cur);
-
+ ret = skip_hole(inode, &cur);
+ if (ret < 0) {
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ break;
+ }
path2 = ext4_find_extent(inode, cur, NULL, 0);
if (IS_ERR(path2)) {
ext4_ext_drop_refs(path);
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 8e610a381862..8ea5a81e6554 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -892,6 +892,12 @@ static int ext4_fc_write_inode_data(struct inode *inode, u32 *crc)
sizeof(lrange), (u8 *)&lrange, crc))
return -ENOSPC;
} else {
+ unsigned int max = (map.m_flags & EXT4_MAP_UNWRITTEN) ?
+ EXT_UNWRITTEN_MAX_LEN : EXT_INIT_MAX_LEN;
+
+ /* Limit the number of blocks in one extent */
+ map.m_len = min(max, map.m_len);
+
fc_ext.fc_ino = cpu_to_le32(inode->i_ino);
ex = (struct ext4_extent *)&fc_ext.fc_ex;
ex->ee_block = cpu_to_le32(map.m_lblk);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 82bf4ff6be28..39a1ab129fdc 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -7,6 +7,7 @@
#include <linux/iomap.h>
#include <linux/fiemap.h>
#include <linux/iversion.h>
+#include <linux/backing-dev.h>
#include "ext4_jbd2.h"
#include "ext4.h"
@@ -733,45 +734,83 @@ convert:
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
unsigned copied, struct page *page)
{
- int ret, no_expand;
+ handle_t *handle = ext4_journal_current_handle();
+ int no_expand;
void *kaddr;
struct ext4_iloc iloc;
+ int ret = 0, ret2;
+
+ if (unlikely(copied < len) && !PageUptodate(page))
+ copied = 0;
- if (unlikely(copied < len)) {
- if (!PageUptodate(page)) {
- copied = 0;
+ if (likely(copied)) {
+ ret = ext4_get_inode_loc(inode, &iloc);
+ if (ret) {
+ unlock_page(page);
+ put_page(page);
+ ext4_std_error(inode->i_sb, ret);
goto out;
}
- }
+ ext4_write_lock_xattr(inode, &no_expand);
+ BUG_ON(!ext4_has_inline_data(inode));
- ret = ext4_get_inode_loc(inode, &iloc);
- if (ret) {
- ext4_std_error(inode->i_sb, ret);
- copied = 0;
- goto out;
- }
+ /*
+ * ei->i_inline_off may have changed since
+ * ext4_write_begin() called
+ * ext4_try_to_write_inline_data()
+ */
+ (void) ext4_find_inline_data_nolock(inode);
- ext4_write_lock_xattr(inode, &no_expand);
- BUG_ON(!ext4_has_inline_data(inode));
+ kaddr = kmap_atomic(page);
+ ext4_write_inline_data(inode, &iloc, kaddr, pos, copied);
+ kunmap_atomic(kaddr);
+ SetPageUptodate(page);
+ /* clear page dirty so that writepages wouldn't work for us. */
+ ClearPageDirty(page);
- /*
- * ei->i_inline_off may have changed since ext4_write_begin()
- * called ext4_try_to_write_inline_data()
- */
- (void) ext4_find_inline_data_nolock(inode);
+ ext4_write_unlock_xattr(inode, &no_expand);
+ brelse(iloc.bh);
- kaddr = kmap_atomic(page);
- ext4_write_inline_data(inode, &iloc, kaddr, pos, len);
- kunmap_atomic(kaddr);
- SetPageUptodate(page);
- /* clear page dirty so that writepages wouldn't work for us. */
- ClearPageDirty(page);
+ /*
+ * It's important to update i_size while still holding page
+ * lock: page writeout could otherwise come in and zero
+ * beyond i_size.
+ */
+ ext4_update_inode_size(inode, pos + copied);
+ }
+ unlock_page(page);
+ put_page(page);
- ext4_write_unlock_xattr(inode, &no_expand);
- brelse(iloc.bh);
- mark_inode_dirty(inode);
+ /*
+ * Don't mark the inode dirty under page lock. First, it unnecessarily
+ * makes the holding time of page lock longer. Second, it forces lock
+ * ordering of page lock and transaction start for journaling
+ * filesystems.
+ */
+ if (likely(copied))
+ mark_inode_dirty(inode);
out:
- return copied;
+ /*
+ * If we didn't copy as much data as expected, we need to trim back
+ * size of xattr containing inline data.
+ */
+ if (pos + len > inode->i_size && ext4_can_truncate(inode))
+ ext4_orphan_add(handle, inode);
+
+ ret2 = ext4_journal_stop(handle);
+ if (!ret)
+ ret = ret2;
+ if (pos + len > inode->i_size) {
+ ext4_truncate_failed_write(inode);
+ /*
+ * If truncate failed early the inode might still be
+ * on the orphan list; we need to make sure the inode
+ * is removed from the orphan list in that case.
+ */
+ if (inode->i_nlink)
+ ext4_orphan_del(NULL, inode);
+ }
+ return ret ? ret : copied;
}
struct buffer_head *
@@ -953,43 +992,6 @@ out:
return ret;
}
-int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
- unsigned len, unsigned copied,
- struct page *page)
-{
- int ret;
-
- ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
- if (ret < 0) {
- unlock_page(page);
- put_page(page);
- return ret;
- }
- copied = ret;
-
- /*
- * No need to use i_size_read() here, the i_size
- * cannot change under us because we hold i_mutex.
- *
- * But it's important to update i_size while still holding page lock:
- * page writeout could otherwise come in and zero beyond i_size.
- */
- if (pos+copied > inode->i_size)
- i_size_write(inode, pos+copied);
- unlock_page(page);
- put_page(page);
-
- /*
- * Don't mark the inode dirty under page lock. First, it unnecessarily
- * makes the holding time of page lock longer. Second, it forces lock
- * ordering of page lock and transaction start for journaling
- * filesystems.
- */
- mark_inode_dirty(inode);
-
- return copied;
-}
-
#ifdef INLINE_DIR_DEBUG
void ext4_show_inline_dir(struct inode *dir, struct buffer_head *bh,
void *inline_start, int inline_size)
@@ -1917,6 +1919,24 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
EXT4_I(inode)->i_disksize = i_size;
if (i_size < inline_size) {
+ /*
+ * if there's inline data to truncate and this file was
+ * converted to extents after that inline data was written,
+ * the extent status cache must be cleared to avoid leaving
+ * behind stale delayed allocated extent entries
+ */
+ if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
+retry:
+ err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
+ if (err == -ENOMEM) {
+ cond_resched();
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
+ if (err)
+ goto out_error;
+ }
+
/* Clear the content in the xattr space. */
if (inline_size > EXT4_MIN_INLINE_DATA_SIZE) {
if ((err = ext4_xattr_ibody_find(inode, &i, &is)) != 0)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index d18852d6029c..0f06305167d5 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1284,22 +1284,14 @@ static int ext4_write_end(struct file *file,
loff_t old_size = inode->i_size;
int ret = 0, ret2;
int i_size_changed = 0;
- int inline_data = ext4_has_inline_data(inode);
bool verity = ext4_verity_in_progress(inode);
trace_ext4_write_end(inode, pos, len, copied);
- if (inline_data) {
- ret = ext4_write_inline_data_end(inode, pos, len,
- copied, page);
- if (ret < 0) {
- unlock_page(page);
- put_page(page);
- goto errout;
- }
- copied = ret;
- } else
- copied = block_write_end(file, mapping, pos,
- len, copied, page, fsdata);
+
+ if (ext4_has_inline_data(inode))
+ return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
+ copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
/*
* it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
@@ -1320,7 +1312,7 @@ static int ext4_write_end(struct file *file,
* ordering of page lock and transaction start for journaling
* filesystems.
*/
- if (i_size_changed || inline_data)
+ if (i_size_changed)
ret = ext4_mark_inode_dirty(handle, inode);
if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode))
@@ -1329,7 +1321,7 @@ static int ext4_write_end(struct file *file,
* inode->i_size. So truncate them
*/
ext4_orphan_add(handle, inode);
-errout:
+
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
@@ -1395,7 +1387,6 @@ static int ext4_journalled_write_end(struct file *file,
int partial = 0;
unsigned from, to;
int size_changed = 0;
- int inline_data = ext4_has_inline_data(inode);
bool verity = ext4_verity_in_progress(inode);
trace_ext4_journalled_write_end(inode, pos, len, copied);
@@ -1404,16 +1395,10 @@ static int ext4_journalled_write_end(struct file *file,
BUG_ON(!ext4_handle_valid(handle));
- if (inline_data) {
- ret = ext4_write_inline_data_end(inode, pos, len,
- copied, page);
- if (ret < 0) {
- unlock_page(page);
- put_page(page);
- goto errout;
- }
- copied = ret;
- } else if (unlikely(copied < len) && !PageUptodate(page)) {
+ if (ext4_has_inline_data(inode))
+ return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
+ if (unlikely(copied < len) && !PageUptodate(page)) {
copied = 0;
ext4_journalled_zero_new_buffers(handle, inode, page, from, to);
} else {
@@ -1436,7 +1421,7 @@ static int ext4_journalled_write_end(struct file *file,
if (old_size < pos && !verity)
pagecache_isize_extended(inode, old_size, pos);
- if (size_changed || inline_data) {
+ if (size_changed) {
ret2 = ext4_mark_inode_dirty(handle, inode);
if (!ret)
ret = ret2;
@@ -1449,7 +1434,6 @@ static int ext4_journalled_write_end(struct file *file,
*/
ext4_orphan_add(handle, inode);
-errout:
ret2 = ext4_journal_stop(handle);
if (!ret)
ret = ret2;
@@ -1644,6 +1628,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
int ret;
bool allocated = false;
+ bool reserved = false;
/*
* If the cluster containing lblk is shared with a delayed,
@@ -1660,6 +1645,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
ret = ext4_da_reserve_space(inode);
if (ret != 0) /* ENOSPC */
goto errout;
+ reserved = true;
} else { /* bigalloc */
if (!ext4_es_scan_clu(inode, &ext4_es_is_delonly, lblk)) {
if (!ext4_es_scan_clu(inode,
@@ -1672,6 +1658,7 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
ret = ext4_da_reserve_space(inode);
if (ret != 0) /* ENOSPC */
goto errout;
+ reserved = true;
} else {
allocated = true;
}
@@ -1682,6 +1669,8 @@ static int ext4_insert_delayed_block(struct inode *inode, ext4_lblk_t lblk)
}
ret = ext4_es_insert_delayed_block(inode, lblk, allocated);
+ if (ret && reserved)
+ ext4_da_release_space(inode, 1);
errout:
return ret;
@@ -1722,13 +1711,16 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
}
/*
- * Delayed extent could be allocated by fallocate.
- * So we need to check it.
+ * the buffer head associated with a delayed and not unwritten
+ * block found in the extent status cache must contain an
+ * invalid block number and have its BH_New and BH_Delay bits
+ * set, reflecting the state assigned when the block was
+ * initially delayed allocated
*/
- if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
- map_bh(bh, inode->i_sb, invalid_block);
- set_buffer_new(bh);
- set_buffer_delay(bh);
+ if (ext4_es_is_delonly(&es)) {
+ BUG_ON(bh->b_blocknr != invalid_block);
+ BUG_ON(!buffer_new(bh));
+ BUG_ON(!buffer_delay(bh));
return 0;
}
@@ -2932,19 +2924,6 @@ static int ext4_nonda_switch(struct super_block *sb)
return 0;
}
-/* We always reserve for an inode update; the superblock could be there too */
-static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
-{
- if (likely(ext4_has_feature_large_file(inode->i_sb)))
- return 1;
-
- if (pos + len <= 0x7fffffffULL)
- return 1;
-
- /* We might need to update the superblock to set LARGE_FILE */
- return 2;
-}
-
static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
@@ -2953,7 +2932,6 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
struct page *page;
pgoff_t index;
struct inode *inode = mapping->host;
- handle_t *handle;
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
@@ -2979,41 +2957,11 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
return 0;
}
- /*
- * grab_cache_page_write_begin() can take a long time if the
- * system is thrashing due to memory pressure, or if the page
- * is being written back. So grab it first before we start
- * the transaction handle. This also allows us to allocate
- * the page (if needed) without using GFP_NOFS.
- */
-retry_grab:
+retry:
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
return -ENOMEM;
- unlock_page(page);
- /*
- * With delayed allocation, we don't log the i_disksize update
- * if there is delayed block allocation. But we still need
- * to journalling the i_disksize update if writes to the end
- * of file which has an already mapped buffer.
- */
-retry_journal:
- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
- ext4_da_write_credits(inode, pos, len));
- if (IS_ERR(handle)) {
- put_page(page);
- return PTR_ERR(handle);
- }
-
- lock_page(page);
- if (page->mapping != mapping) {
- /* The page got truncated from under us */
- unlock_page(page);
- put_page(page);
- ext4_journal_stop(handle);
- goto retry_grab;
- }
/* In case writeback began while the page was unlocked */
wait_for_stable_page(page);
@@ -3025,20 +2973,18 @@ retry_journal:
#endif
if (ret < 0) {
unlock_page(page);
- ext4_journal_stop(handle);
+ put_page(page);
/*
* block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
+ * i_size_read because we hold inode lock.
*/
if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
- goto retry_journal;
-
- put_page(page);
+ goto retry;
return ret;
}
@@ -3075,8 +3021,6 @@ static int ext4_da_write_end(struct file *file,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
- int ret = 0, ret2;
- handle_t *handle = ext4_journal_current_handle();
loff_t new_i_size;
unsigned long start, end;
int write_mode = (int)(unsigned long)fsdata;
@@ -3086,44 +3030,36 @@ static int ext4_da_write_end(struct file *file,
len, copied, page, fsdata);
trace_ext4_da_write_end(inode, pos, len, copied);
+
+ if (write_mode != CONVERT_INLINE_DATA &&
+ ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
+ ext4_has_inline_data(inode))
+ return ext4_write_inline_data_end(inode, pos, len, copied, page);
+
start = pos & (PAGE_SIZE - 1);
end = start + copied - 1;
/*
- * generic_write_end() will run mark_inode_dirty() if i_size
- * changes. So let's piggyback the i_disksize mark_inode_dirty
- * into that.
+ * Since we are holding inode lock, we are sure i_disksize <=
+ * i_size. We also know that if i_disksize < i_size, there are
+ * delalloc writes pending in the range upto i_size. If the end of
+ * the current write is <= i_size, there's no need to touch
+ * i_disksize since writeback will push i_disksize upto i_size
+ * eventually. If the end of the current write is > i_size and
+ * inside an allocated block (ext4_da_should_update_i_disksize()
+ * check), we need to update i_disksize here as neither
+ * ext4_writepage() nor certain ext4_writepages() paths not
+ * allocating blocks update i_disksize.
+ *
+ * Note that we defer inode dirtying to generic_write_end() /
+ * ext4_da_write_inline_data_end().
*/
new_i_size = pos + copied;
- if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
- if (ext4_has_inline_data(inode) ||
- ext4_da_should_update_i_disksize(page, end)) {
- ext4_update_i_disksize(inode, new_i_size);
- /* We need to mark inode dirty even if
- * new_i_size is less that inode->i_size
- * bu greater than i_disksize.(hint delalloc)
- */
- ret = ext4_mark_inode_dirty(handle, inode);
- }
- }
+ if (copied && new_i_size > inode->i_size &&
+ ext4_da_should_update_i_disksize(page, end))
+ ext4_update_i_disksize(inode, new_i_size);
- if (write_mode != CONVERT_INLINE_DATA &&
- ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
- ext4_has_inline_data(inode))
- ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
- page);
- else
- ret2 = generic_write_end(file, mapping, pos, len, copied,
- page, fsdata);
-
- copied = ret2;
- if (ret2 < 0)
- ret = ret2;
- ret2 = ext4_journal_stop(handle);
- if (unlikely(ret2 && !ret))
- ret = ret2;
-
- return ret ? ret : copied;
+ return generic_write_end(file, mapping, pos, len, copied, page, fsdata);
}
/*
@@ -4340,6 +4276,12 @@ static int __ext4_get_inode_loc(struct super_block *sb, unsigned long ino,
goto has_buffer;
lock_buffer(bh);
+ if (ext4_buffer_uptodate(bh)) {
+ /* Someone brought it uptodate while we waited */
+ unlock_buffer(bh);
+ goto has_buffer;
+ }
+
/*
* If we have all information of the inode in memory and this
* is the only valid inode in the block, we need not read the
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0775950ee84e..88d5d274a868 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -658,7 +658,7 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
* constraints, it may not be safe to do it right here so we
* defer superblock flushing to a workqueue.
*/
- if (continue_fs)
+ if (continue_fs && journal)
schedule_work(&EXT4_SB(sb)->s_error_work);
else
ext4_commit_super(sb);
@@ -1350,6 +1350,12 @@ static void ext4_destroy_inode(struct inode *inode)
true);
dump_stack();
}
+
+ if (EXT4_I(inode)->i_reserved_data_blocks)
+ ext4_msg(inode->i_sb, KERN_ERR,
+ "Inode %lu (%p): i_reserved_data_blocks (%u) not cleared!",
+ inode->i_ino, EXT4_I(inode),
+ EXT4_I(inode)->i_reserved_data_blocks);
}
static void init_once(void *foo)
@@ -3021,17 +3027,17 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
*/
static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
{
- loff_t res = EXT4_NDIR_BLOCKS;
+ unsigned long long upper_limit, res = EXT4_NDIR_BLOCKS;
int meta_blocks;
- loff_t upper_limit;
- /* This is calculated to be the largest file size for a dense, block
+
+ /*
+ * This is calculated to be the largest file size for a dense, block
* mapped file such that the file's total number of 512-byte sectors,
* including data and all indirect blocks, does not exceed (2^48 - 1).
*
* __u32 i_blocks_lo and _u16 i_blocks_high represent the total
* number of 512-byte sectors of the file.
*/
-
if (!has_huge_files) {
/*
* !has_huge_files or implies that the inode i_block field
@@ -3074,7 +3080,7 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
if (res > MAX_LFS_FILESIZE)
res = MAX_LFS_FILESIZE;
- return res;
+ return (loff_t)res;
}
static ext4_fsblk_t descriptor_loc(struct super_block *sb,
@@ -5042,12 +5048,15 @@ failed_mount_wq:
sbi->s_ea_block_cache = NULL;
if (sbi->s_journal) {
+ /* flush s_error_work before journal destroy. */
+ flush_work(&sbi->s_error_work);
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
failed_mount3a:
ext4_es_unregister_shrinker(sbi);
failed_mount3:
+ /* flush s_error_work before sbi destroy */
flush_work(&sbi->s_error_work);
del_timer_sync(&sbi->s_err_report);
ext4_stop_mmpd(sbi);
diff --git a/fs/inode.c b/fs/inode.c
index 37710ca863b5..ed0cab8a32db 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -190,8 +190,10 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
mapping->private_data = NULL;
mapping->writeback_index = 0;
- __init_rwsem(&mapping->invalidate_lock, "mapping.invalidate_lock",
- &sb->s_type->invalidate_lock_key);
+ init_rwsem(&mapping->invalidate_lock);
+ lockdep_set_class_and_name(&mapping->invalidate_lock,
+ &sb->s_type->invalidate_lock_key,
+ "mapping.invalidate_lock");
inode->i_private = NULL;
inode->i_mapping = mapping;
INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
diff --git a/fs/io-wq.c b/fs/io-wq.c
index c2e0e8e80949..5bf8aa81715e 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -584,9 +584,7 @@ loop:
if (!get_signal(&ksig))
continue;
- if (fatal_signal_pending(current))
- break;
- continue;
+ break;
}
last_timeout = !ret;
}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e372d5b9f6dc..6b9e70208782 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -403,7 +403,6 @@ struct io_ring_ctx {
struct wait_queue_head cq_wait;
unsigned cq_extra;
atomic_t cq_timeouts;
- struct fasync_struct *cq_fasync;
unsigned cq_last_tm_flush;
} ____cacheline_aligned_in_smp;
@@ -502,6 +501,7 @@ struct io_poll_update {
struct io_close {
struct file *file;
int fd;
+ u32 file_slot;
};
struct io_timeout_data {
@@ -1098,6 +1098,8 @@ static int io_req_prep_async(struct io_kiocb *req);
static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
unsigned int issue_flags, u32 slot_index);
+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
+
static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer);
static struct kmem_cache *req_cachep;
@@ -1611,10 +1613,8 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
wake_up(&ctx->sq_data->wait);
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
- if (waitqueue_active(&ctx->poll_wait)) {
+ if (waitqueue_active(&ctx->poll_wait))
wake_up_interruptible(&ctx->poll_wait);
- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
- }
}
static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
@@ -1628,10 +1628,8 @@ static void io_cqring_ev_posted_iopoll(struct io_ring_ctx *ctx)
}
if (io_should_trigger_evfd(ctx))
eventfd_signal(ctx->cq_ev_fd, 1);
- if (waitqueue_active(&ctx->poll_wait)) {
+ if (waitqueue_active(&ctx->poll_wait))
wake_up_interruptible(&ctx->poll_wait);
- kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
- }
}
/* Returns true if there are no backlogged entries after the flush */
@@ -3605,7 +3603,6 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
iov_iter_save_state(iter, state);
}
req->result = iov_iter_count(iter);
- ret2 = 0;
/* Ensure we clear previously set non-block flag */
if (!force_nonblock)
@@ -3670,8 +3667,6 @@ done:
} else {
copy_iov:
iov_iter_restore(iter, state);
- if (ret2 > 0)
- iov_iter_advance(iter, ret2);
ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
return ret ?: -EAGAIN;
}
@@ -4387,7 +4382,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
int i, bid = pbuf->bid;
for (i = 0; i < pbuf->nbufs; i++) {
- buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
if (!buf)
break;
@@ -4594,12 +4589,16 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
return -EINVAL;
if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
- sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
+ sqe->rw_flags || sqe->buf_index)
return -EINVAL;
if (req->flags & REQ_F_FIXED_FILE)
return -EBADF;
req->close.fd = READ_ONCE(sqe->fd);
+ req->close.file_slot = READ_ONCE(sqe->file_index);
+ if (req->close.file_slot && req->close.fd)
+ return -EINVAL;
+
return 0;
}
@@ -4611,6 +4610,11 @@ static int io_close(struct io_kiocb *req, unsigned int issue_flags)
struct file *file = NULL;
int ret = -EBADF;
+ if (req->close.file_slot) {
+ ret = io_close_fixed(req, issue_flags);
+ goto err;
+ }
+
spin_lock(&files->file_lock);
fdt = files_fdtable(files);
if (close->fd >= fdt->max_fds) {
@@ -5338,7 +5342,7 @@ static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
if (req->poll.events & EPOLLONESHOT)
flags = 0;
if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
- req->poll.done = true;
+ req->poll.events |= EPOLLONESHOT;
flags = 0;
}
if (flags & IORING_CQE_F_MORE)
@@ -5367,10 +5371,15 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
} else {
bool done;
+ if (req->poll.done) {
+ spin_unlock(&ctx->completion_lock);
+ return;
+ }
done = __io_poll_complete(req, req->result);
if (done) {
io_poll_remove_double(req);
hash_del(&req->hash_node);
+ req->poll.done = true;
} else {
req->result = 0;
add_wait_queue(req->poll.head, &req->poll.wait);
@@ -5508,6 +5517,7 @@ static void io_async_task_func(struct io_kiocb *req, bool *locked)
hash_del(&req->hash_node);
io_poll_remove_double(req);
+ apoll->poll.done = true;
spin_unlock(&ctx->completion_lock);
if (!READ_ONCE(apoll->poll.canceled))
@@ -5828,6 +5838,7 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
struct io_ring_ctx *ctx = req->ctx;
struct io_poll_table ipt;
__poll_t mask;
+ bool done;
ipt.pt._qproc = io_poll_queue_proc;
@@ -5836,13 +5847,13 @@ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
if (mask) { /* no async, we'd stolen it */
ipt.error = 0;
- io_poll_complete(req, mask);
+ done = io_poll_complete(req, mask);
}
spin_unlock(&ctx->completion_lock);
if (mask) {
io_cqring_ev_posted(ctx);
- if (poll->events & EPOLLONESHOT)
+ if (done)
io_put_req(req);
}
return ipt.error;
@@ -6333,19 +6344,16 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
struct io_uring_rsrc_update2 up;
int ret;
- if (issue_flags & IO_URING_F_NONBLOCK)
- return -EAGAIN;
-
up.offset = req->rsrc_update.offset;
up.data = req->rsrc_update.arg;
up.nr = 0;
up.tags = 0;
up.resv = 0;
- mutex_lock(&ctx->uring_lock);
+ io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
&up, req->rsrc_update.nr_args);
- mutex_unlock(&ctx->uring_lock);
+ io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
if (ret < 0)
req_set_fail(req);
@@ -8400,6 +8408,44 @@ err:
return ret;
}
+static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
+{
+ unsigned int offset = req->close.file_slot - 1;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_fixed_file *file_slot;
+ struct file *file;
+ int ret, i;
+
+ io_ring_submit_lock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+ ret = -ENXIO;
+ if (unlikely(!ctx->file_data))
+ goto out;
+ ret = -EINVAL;
+ if (offset >= ctx->nr_user_files)
+ goto out;
+ ret = io_rsrc_node_switch_start(ctx);
+ if (ret)
+ goto out;
+
+ i = array_index_nospec(offset, ctx->nr_user_files);
+ file_slot = io_fixed_file_slot(&ctx->file_table, i);
+ ret = -EBADF;
+ if (!file_slot->file_ptr)
+ goto out;
+
+ file = (struct file *)(file_slot->file_ptr & FFS_MASK);
+ ret = io_queue_rsrc_removal(ctx->file_data, offset, ctx->rsrc_node, file);
+ if (ret)
+ goto out;
+
+ file_slot->file_ptr = 0;
+ io_rsrc_node_switch(ctx, ctx->file_data);
+ ret = 0;
+out:
+ io_ring_submit_unlock(ctx, !(issue_flags & IO_URING_F_NONBLOCK));
+ return ret;
+}
+
static int __io_sqe_files_update(struct io_ring_ctx *ctx,
struct io_uring_rsrc_update2 *up,
unsigned nr_args)
@@ -9166,8 +9212,10 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
struct io_buffer *buf;
unsigned long index;
- xa_for_each(&ctx->io_buffers, index, buf)
+ xa_for_each(&ctx->io_buffers, index, buf) {
__io_remove_buffers(ctx, buf, index, -1U);
+ cond_resched();
+ }
}
static void io_req_cache_free(struct list_head *list)
@@ -9292,13 +9340,6 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
return mask;
}
-static int io_uring_fasync(int fd, struct file *file, int on)
-{
- struct io_ring_ctx *ctx = file->private_data;
-
- return fasync_helper(fd, file, on, &ctx->cq_fasync);
-}
-
static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
{
const struct cred *creds;
@@ -9665,8 +9706,10 @@ static void io_uring_clean_tctx(struct io_uring_task *tctx)
struct io_tctx_node *node;
unsigned long index;
- xa_for_each(&tctx->xa, index, node)
+ xa_for_each(&tctx->xa, index, node) {
io_uring_del_tctx_node(index);
+ cond_resched();
+ }
if (wq) {
/*
* Must be after io_uring_del_task_file() (removes nodes under
@@ -10090,7 +10133,6 @@ static const struct file_operations io_uring_fops = {
.mmap_capabilities = io_uring_nommu_mmap_capabilities,
#endif
.poll = io_uring_poll,
- .fasync = io_uring_fasync,
#ifdef CONFIG_PROC_FS
.show_fdinfo = io_uring_show_fdinfo,
#endif
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index ba581429bf7b..cfc3ce8b815a 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -1116,8 +1116,13 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
if (!inode)
inode = ERR_PTR(-ENOMEM);
}
- /* Needed only for negative dentry validation */
- if (!inode)
+ /*
+ * Needed for negative dentry validation.
+ * The negative dentry can be created in kernfs_iop_lookup()
+ * or transforms from positive dentry in dentry_unlink_inode()
+ * called from vfs_rmdir().
+ */
+ if (!IS_ERR(inode))
kernfs_set_rev(parent, dentry);
up_read(&kernfs_rwsem);
diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
index de36f12070bf..71c989f1568d 100644
--- a/fs/ksmbd/auth.c
+++ b/fs/ksmbd/auth.c
@@ -68,125 +68,6 @@ void ksmbd_copy_gss_neg_header(void *buf)
memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);
}
-static void
-str_to_key(unsigned char *str, unsigned char *key)
-{
- int i;
-
- key[0] = str[0] >> 1;
- key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);
- key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);
- key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);
- key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);
- key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
- key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
- key[7] = str[6] & 0x7F;
- for (i = 0; i < 8; i++)
- key[i] = (key[i] << 1);
-}
-
-static int
-smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
-{
- unsigned char key2[8];
- struct des_ctx ctx;
-
- if (fips_enabled) {
- ksmbd_debug(AUTH, "FIPS compliance enabled: DES not permitted\n");
- return -ENOENT;
- }
-
- str_to_key(key, key2);
- des_expand_key(&ctx, key2, DES_KEY_SIZE);
- des_encrypt(&ctx, out, in);
- memzero_explicit(&ctx, sizeof(ctx));
- return 0;
-}
-
-static int ksmbd_enc_p24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)
-{
- int rc;
-
- rc = smbhash(p24, c8, p21);
- if (rc)
- return rc;
- rc = smbhash(p24 + 8, c8, p21 + 7);
- if (rc)
- return rc;
- return smbhash(p24 + 16, c8, p21 + 14);
-}
-
-/* produce a md4 message digest from data of length n bytes */
-static int ksmbd_enc_md4(unsigned char *md4_hash, unsigned char *link_str,
- int link_len)
-{
- int rc;
- struct ksmbd_crypto_ctx *ctx;
-
- ctx = ksmbd_crypto_ctx_find_md4();
- if (!ctx) {
- ksmbd_debug(AUTH, "Crypto md4 allocation error\n");
- return -ENOMEM;
- }
-
- rc = crypto_shash_init(CRYPTO_MD4(ctx));
- if (rc) {
- ksmbd_debug(AUTH, "Could not init md4 shash\n");
- goto out;
- }
-
- rc = crypto_shash_update(CRYPTO_MD4(ctx), link_str, link_len);
- if (rc) {
- ksmbd_debug(AUTH, "Could not update with link_str\n");
- goto out;
- }
-
- rc = crypto_shash_final(CRYPTO_MD4(ctx), md4_hash);
- if (rc)
- ksmbd_debug(AUTH, "Could not generate md4 hash\n");
-out:
- ksmbd_release_crypto_ctx(ctx);
- return rc;
-}
-
-static int ksmbd_enc_update_sess_key(unsigned char *md5_hash, char *nonce,
- char *server_challenge, int len)
-{
- int rc;
- struct ksmbd_crypto_ctx *ctx;
-
- ctx = ksmbd_crypto_ctx_find_md5();
- if (!ctx) {
- ksmbd_debug(AUTH, "Crypto md5 allocation error\n");
- return -ENOMEM;
- }
-
- rc = crypto_shash_init(CRYPTO_MD5(ctx));
- if (rc) {
- ksmbd_debug(AUTH, "Could not init md5 shash\n");
- goto out;
- }
-
- rc = crypto_shash_update(CRYPTO_MD5(ctx), server_challenge, len);
- if (rc) {
- ksmbd_debug(AUTH, "Could not update with challenge\n");
- goto out;
- }
-
- rc = crypto_shash_update(CRYPTO_MD5(ctx), nonce, len);
- if (rc) {
- ksmbd_debug(AUTH, "Could not update with nonce\n");
- goto out;
- }
-
- rc = crypto_shash_final(CRYPTO_MD5(ctx), md5_hash);
- if (rc)
- ksmbd_debug(AUTH, "Could not generate md5 hash\n");
-out:
- ksmbd_release_crypto_ctx(ctx);
- return rc;
-}
-
/**
* ksmbd_gen_sess_key() - function to generate session key
* @sess: session of connection
@@ -325,43 +206,6 @@ out:
}
/**
- * ksmbd_auth_ntlm() - NTLM authentication handler
- * @sess: session of connection
- * @pw_buf: NTLM challenge response
- * @passkey: user password
- *
- * Return: 0 on success, error number on error
- */
-int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf)
-{
- int rc;
- unsigned char p21[21];
- char key[CIFS_AUTH_RESP_SIZE];
-
- memset(p21, '\0', 21);
- memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
- rc = ksmbd_enc_p24(p21, sess->ntlmssp.cryptkey, key);
- if (rc) {
- pr_err("password processing failed\n");
- return rc;
- }
-
- ksmbd_enc_md4(sess->sess_key, user_passkey(sess->user),
- CIFS_SMB1_SESSKEY_SIZE);
- memcpy(sess->sess_key + CIFS_SMB1_SESSKEY_SIZE, key,
- CIFS_AUTH_RESP_SIZE);
- sess->sequence_number = 1;
-
- if (strncmp(pw_buf, key, CIFS_AUTH_RESP_SIZE) != 0) {
- ksmbd_debug(AUTH, "ntlmv1 authentication failed\n");
- return -EINVAL;
- }
-
- ksmbd_debug(AUTH, "ntlmv1 authentication pass\n");
- return 0;
-}
-
-/**
* ksmbd_auth_ntlmv2() - NTLMv2 authentication handler
* @sess: session of connection
* @ntlmv2: NTLMv2 challenge response
@@ -442,44 +286,6 @@ out:
}
/**
- * __ksmbd_auth_ntlmv2() - NTLM2(extended security) authentication handler
- * @sess: session of connection
- * @client_nonce: client nonce from LM response.
- * @ntlm_resp: ntlm response data from client.
- *
- * Return: 0 on success, error number on error
- */
-static int __ksmbd_auth_ntlmv2(struct ksmbd_session *sess, char *client_nonce,
- char *ntlm_resp)
-{
- char sess_key[CIFS_SMB1_SESSKEY_SIZE] = {0};
- int rc;
- unsigned char p21[21];
- char key[CIFS_AUTH_RESP_SIZE];
-
- rc = ksmbd_enc_update_sess_key(sess_key,
- client_nonce,
- (char *)sess->ntlmssp.cryptkey, 8);
- if (rc) {
- pr_err("password processing failed\n");
- goto out;
- }
-
- memset(p21, '\0', 21);
- memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
- rc = ksmbd_enc_p24(p21, sess_key, key);
- if (rc) {
- pr_err("password processing failed\n");
- goto out;
- }
-
- if (memcmp(ntlm_resp, key, CIFS_AUTH_RESP_SIZE) != 0)
- rc = -EINVAL;
-out:
- return rc;
-}
-
-/**
* ksmbd_decode_ntlmssp_auth_blob() - helper function to construct
* authenticate blob
* @authblob: authenticate blob source pointer
@@ -512,17 +318,6 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
nt_off = le32_to_cpu(authblob->NtChallengeResponse.BufferOffset);
nt_len = le16_to_cpu(authblob->NtChallengeResponse.Length);
- /* process NTLM authentication */
- if (nt_len == CIFS_AUTH_RESP_SIZE) {
- if (le32_to_cpu(authblob->NegotiateFlags) &
- NTLMSSP_NEGOTIATE_EXTENDED_SEC)
- return __ksmbd_auth_ntlmv2(sess, (char *)authblob +
- lm_off, (char *)authblob + nt_off);
- else
- return ksmbd_auth_ntlm(sess, (char *)authblob +
- nt_off);
- }
-
/* TODO : use domain name that imported from configuration file */
domain_name = smb_strndup_from_utf16((const char *)authblob +
le32_to_cpu(authblob->DomainName.BufferOffset),
diff --git a/fs/ksmbd/crypto_ctx.c b/fs/ksmbd/crypto_ctx.c
index 5f4b1008d17e..81488d04199d 100644
--- a/fs/ksmbd/crypto_ctx.c
+++ b/fs/ksmbd/crypto_ctx.c
@@ -81,12 +81,6 @@ static struct shash_desc *alloc_shash_desc(int id)
case CRYPTO_SHASH_SHA512:
tfm = crypto_alloc_shash("sha512", 0, 0);
break;
- case CRYPTO_SHASH_MD4:
- tfm = crypto_alloc_shash("md4", 0, 0);
- break;
- case CRYPTO_SHASH_MD5:
- tfm = crypto_alloc_shash("md5", 0, 0);
- break;
default:
return NULL;
}
@@ -214,16 +208,6 @@ struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void)
return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
}
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void)
-{
- return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD4);
-}
-
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void)
-{
- return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD5);
-}
-
static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
{
struct ksmbd_crypto_ctx *ctx;
diff --git a/fs/ksmbd/crypto_ctx.h b/fs/ksmbd/crypto_ctx.h
index ef11154b43df..4a367c62f653 100644
--- a/fs/ksmbd/crypto_ctx.h
+++ b/fs/ksmbd/crypto_ctx.h
@@ -15,8 +15,6 @@ enum {
CRYPTO_SHASH_CMACAES,
CRYPTO_SHASH_SHA256,
CRYPTO_SHASH_SHA512,
- CRYPTO_SHASH_MD4,
- CRYPTO_SHASH_MD5,
CRYPTO_SHASH_MAX,
};
@@ -43,8 +41,6 @@ struct ksmbd_crypto_ctx {
#define CRYPTO_CMACAES(c) ((c)->desc[CRYPTO_SHASH_CMACAES])
#define CRYPTO_SHA256(c) ((c)->desc[CRYPTO_SHASH_SHA256])
#define CRYPTO_SHA512(c) ((c)->desc[CRYPTO_SHASH_SHA512])
-#define CRYPTO_MD4(c) ((c)->desc[CRYPTO_SHASH_MD4])
-#define CRYPTO_MD5(c) ((c)->desc[CRYPTO_SHASH_MD5])
#define CRYPTO_HMACMD5_TFM(c) ((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm)
#define CRYPTO_HMACSHA256_TFM(c)\
@@ -52,8 +48,6 @@ struct ksmbd_crypto_ctx {
#define CRYPTO_CMACAES_TFM(c) ((c)->desc[CRYPTO_SHASH_CMACAES]->tfm)
#define CRYPTO_SHA256_TFM(c) ((c)->desc[CRYPTO_SHASH_SHA256]->tfm)
#define CRYPTO_SHA512_TFM(c) ((c)->desc[CRYPTO_SHASH_SHA512]->tfm)
-#define CRYPTO_MD4_TFM(c) ((c)->desc[CRYPTO_SHASH_MD4]->tfm)
-#define CRYPTO_MD5_TFM(c) ((c)->desc[CRYPTO_SHASH_MD5]->tfm)
#define CRYPTO_GCM(c) ((c)->ccmaes[CRYPTO_AEAD_AES_GCM])
#define CRYPTO_CCM(c) ((c)->ccmaes[CRYPTO_AEAD_AES_CCM])
@@ -64,8 +58,6 @@ struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void);
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void);
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void);
void ksmbd_crypto_destroy(void);
diff --git a/fs/ksmbd/misc.c b/fs/ksmbd/misc.c
index 0b307ca28a19..60e7ac62c917 100644
--- a/fs/ksmbd/misc.c
+++ b/fs/ksmbd/misc.c
@@ -158,25 +158,18 @@ out:
* Return : windows path string or error
*/
-char *convert_to_nt_pathname(char *filename, char *sharepath)
+char *convert_to_nt_pathname(char *filename)
{
char *ab_pathname;
- int len, name_len;
- name_len = strlen(filename);
- ab_pathname = kmalloc(name_len, GFP_KERNEL);
+ if (strlen(filename) == 0)
+ filename = "\\";
+
+ ab_pathname = kstrdup(filename, GFP_KERNEL);
if (!ab_pathname)
return NULL;
- ab_pathname[0] = '\\';
- ab_pathname[1] = '\0';
-
- len = strlen(sharepath);
- if (!strncmp(filename, sharepath, len) && name_len != len) {
- strscpy(ab_pathname, &filename[len], name_len);
- ksmbd_conv_path_to_windows(ab_pathname);
- }
-
+ ksmbd_conv_path_to_windows(ab_pathname);
return ab_pathname;
}
@@ -240,7 +233,7 @@ char *ksmbd_extract_sharename(char *treename)
*
* Return: converted name on success, otherwise NULL
*/
-char *convert_to_unix_name(struct ksmbd_share_config *share, char *name)
+char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name)
{
int no_slash = 0, name_len, path_len;
char *new_name;
diff --git a/fs/ksmbd/misc.h b/fs/ksmbd/misc.h
index af8717d4d85b..253366bd0951 100644
--- a/fs/ksmbd/misc.h
+++ b/fs/ksmbd/misc.h
@@ -14,13 +14,13 @@ struct ksmbd_file;
int match_pattern(const char *str, size_t len, const char *pattern);
int ksmbd_validate_filename(char *filename);
int parse_stream_name(char *filename, char **stream_name, int *s_type);
-char *convert_to_nt_pathname(char *filename, char *sharepath);
+char *convert_to_nt_pathname(char *filename);
int get_nlink(struct kstat *st);
void ksmbd_conv_path_to_unix(char *path);
void ksmbd_strip_last_slash(char *path);
void ksmbd_conv_path_to_windows(char *path);
char *ksmbd_extract_sharename(char *treename);
-char *convert_to_unix_name(struct ksmbd_share_config *share, char *name);
+char *convert_to_unix_name(struct ksmbd_share_config *share, const char *name);
#define KSMBD_DIR_INFO_ALIGNMENT 8
struct ksmbd_dir_info;
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
index 16b6236d1bd2..f9dae6ef2115 100644
--- a/fs/ksmbd/oplock.c
+++ b/fs/ksmbd/oplock.c
@@ -1451,26 +1451,47 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
*/
struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
{
- char *data_offset;
struct create_context *cc;
unsigned int next = 0;
char *name;
struct smb2_create_req *req = (struct smb2_create_req *)open_req;
+ unsigned int remain_len, name_off, name_len, value_off, value_len,
+ cc_len;
- data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset);
- cc = (struct create_context *)data_offset;
+ /*
+ * CreateContextsOffset and CreateContextsLength are guaranteed to
+ * be valid because of ksmbd_smb2_check_message().
+ */
+ cc = (struct create_context *)((char *)req + 4 +
+ le32_to_cpu(req->CreateContextsOffset));
+ remain_len = le32_to_cpu(req->CreateContextsLength);
do {
- int val;
-
cc = (struct create_context *)((char *)cc + next);
- name = le16_to_cpu(cc->NameOffset) + (char *)cc;
- val = le16_to_cpu(cc->NameLength);
- if (val < 4)
+ if (remain_len < offsetof(struct create_context, Buffer))
return ERR_PTR(-EINVAL);
- if (memcmp(name, tag, val) == 0)
- return cc;
next = le32_to_cpu(cc->Next);
+ name_off = le16_to_cpu(cc->NameOffset);
+ name_len = le16_to_cpu(cc->NameLength);
+ value_off = le16_to_cpu(cc->DataOffset);
+ value_len = le32_to_cpu(cc->DataLength);
+ cc_len = next ? next : remain_len;
+
+ if ((next & 0x7) != 0 ||
+ next > remain_len ||
+ name_off != offsetof(struct create_context, Buffer) ||
+ name_len < 4 ||
+ name_off + name_len > cc_len ||
+ (value_off & 0x7) != 0 ||
+ (value_off && (value_off < name_off + name_len)) ||
+ ((u64)value_off + value_len > cc_len))
+ return ERR_PTR(-EINVAL);
+
+ name = (char *)cc + name_off;
+ if (memcmp(name, tag, name_len) == 0)
+ return cc;
+
+ remain_len -= next;
} while (next != 0);
return NULL;
diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
index e6a9f6aa47eb..2a2b2135bfde 100644
--- a/fs/ksmbd/server.c
+++ b/fs/ksmbd/server.c
@@ -584,6 +584,9 @@ static int __init ksmbd_server_init(void)
ret = ksmbd_workqueue_init();
if (ret)
goto err_crypto_destroy;
+
+ pr_warn_once("The ksmbd server is experimental, use at your own risk.\n");
+
return 0;
err_crypto_destroy:
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index c86164dc70bb..dcf907738610 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -433,7 +433,7 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
work->compound_pfid = KSMBD_NO_FID;
}
memset((char *)rsp_hdr + 4, 0, sizeof(struct smb2_hdr) + 2);
- rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
+ rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->Command = rcv_hdr->Command;
@@ -459,13 +459,22 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
bool is_chained_smb2_message(struct ksmbd_work *work)
{
struct smb2_hdr *hdr = work->request_buf;
- unsigned int len;
+ unsigned int len, next_cmd;
if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
return false;
hdr = ksmbd_req_buf_next(work);
- if (le32_to_cpu(hdr->NextCommand) > 0) {
+ next_cmd = le32_to_cpu(hdr->NextCommand);
+ if (next_cmd > 0) {
+ if ((u64)work->next_smb2_rcv_hdr_off + next_cmd +
+ __SMB2_HEADER_STRUCTURE_SIZE >
+ get_rfc1002_len(work->request_buf)) {
+ pr_err("next command(%u) offset exceeds smb msg size\n",
+ next_cmd);
+ return false;
+ }
+
ksmbd_debug(SMB, "got SMB2 chained command\n");
init_chained_smb2_rsp(work);
return true;
@@ -634,7 +643,7 @@ static char *
smb2_get_name(struct ksmbd_share_config *share, const char *src,
const int maxlen, struct nls_table *local_nls)
{
- char *name, *unixname;
+ char *name;
name = smb_strndup_from_utf16(src, maxlen, 1, local_nls);
if (IS_ERR(name)) {
@@ -642,19 +651,9 @@ smb2_get_name(struct ksmbd_share_config *share, const char *src,
return name;
}
- /* change it to absolute unix name */
ksmbd_conv_path_to_unix(name);
ksmbd_strip_last_slash(name);
-
- unixname = convert_to_unix_name(share, name);
- kfree(name);
- if (!unixname) {
- pr_err("can not convert absolute name\n");
- return ERR_PTR(-ENOMEM);
- }
-
- ksmbd_debug(SMB, "absolute name = %s\n", unixname);
- return unixname;
+ return name;
}
int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
@@ -1068,6 +1067,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
struct smb2_negotiate_req *req = work->request_buf;
struct smb2_negotiate_rsp *rsp = work->response_buf;
int rc = 0;
+ unsigned int smb2_buf_len, smb2_neg_size;
__le32 status;
ksmbd_debug(SMB, "Received negotiate request\n");
@@ -1085,6 +1085,44 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
goto err_out;
}
+ smb2_buf_len = get_rfc1002_len(work->request_buf);
+ smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects) - 4;
+ if (smb2_neg_size > smb2_buf_len) {
+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ if (conn->dialect == SMB311_PROT_ID) {
+ unsigned int nego_ctxt_off = le32_to_cpu(req->NegotiateContextOffset);
+
+ if (smb2_buf_len < nego_ctxt_off) {
+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ if (smb2_neg_size > nego_ctxt_off) {
+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+ nego_ctxt_off) {
+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ rc = -EINVAL;
+ goto err_out;
+ }
+ } else {
+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+ smb2_buf_len) {
+ rsp->hdr.Status = STATUS_INVALID_PARAMETER;
+ rc = -EINVAL;
+ goto err_out;
+ }
+ }
+
conn->cli_cap = le32_to_cpu(req->Capabilities);
switch (conn->dialect) {
case SMB311_PROT_ID:
@@ -2103,16 +2141,22 @@ out:
* smb2_set_ea() - handler for setting extended attributes using set
* info command
* @eabuf: set info command buffer
+ * @buf_len: set info command buffer length
* @path: dentry path for get ea
*
* Return: 0 on success, otherwise error
*/
-static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)
+static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len,
+ struct path *path)
{
struct user_namespace *user_ns = mnt_user_ns(path->mnt);
char *attr_name = NULL, *value;
int rc = 0;
- int next = 0;
+ unsigned int next = 0;
+
+ if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength +
+ le16_to_cpu(eabuf->EaValueLength))
+ return -EINVAL;
attr_name = kmalloc(XATTR_NAME_MAX + 1, GFP_KERNEL);
if (!attr_name)
@@ -2177,7 +2221,13 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, struct path *path)
next:
next = le32_to_cpu(eabuf->NextEntryOffset);
+ if (next == 0 || buf_len < next)
+ break;
+ buf_len -= next;
eabuf = (struct smb2_ea_info *)((char *)eabuf + next);
+ if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength))
+ break;
+
} while (next != 0);
kfree(attr_name);
@@ -2348,7 +2398,7 @@ static int smb2_creat(struct ksmbd_work *work, struct path *path, char *name,
return rc;
}
- rc = ksmbd_vfs_kern_path(name, 0, path, 0);
+ rc = ksmbd_vfs_kern_path(work, name, 0, path, 0);
if (rc) {
pr_err("cannot get linux path (%s), err = %d\n",
name, rc);
@@ -2377,6 +2427,10 @@ static int smb2_create_sd_buffer(struct ksmbd_work *work,
ksmbd_debug(SMB,
"Set ACLs using SMB2_CREATE_SD_BUFFER context\n");
sd_buf = (struct create_sd_buf_req *)context;
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_sd_buf_req))
+ return -EINVAL;
return set_info_sec(work->conn, work->tcon, path, &sd_buf->ntsd,
le32_to_cpu(sd_buf->ccontext.DataLength), true);
}
@@ -2423,7 +2477,7 @@ int smb2_open(struct ksmbd_work *work)
struct oplock_info *opinfo;
__le32 *next_ptr = NULL;
int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
- int rc = 0, len = 0;
+ int rc = 0;
int contxt_cnt = 0, query_disk_id = 0;
int maximal_access_ctxt = 0, posix_ctxt = 0;
int s_type = 0;
@@ -2495,17 +2549,11 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
} else {
- len = strlen(share->path);
- ksmbd_debug(SMB, "share path len %d\n", len);
- name = kmalloc(len + 1, GFP_KERNEL);
+ name = kstrdup("", GFP_KERNEL);
if (!name) {
- rsp->hdr.Status = STATUS_NO_MEMORY;
rc = -ENOMEM;
goto err_out1;
}
-
- memcpy(name, share->path, len);
- *(name + len) = '\0';
}
req_op_level = req->RequestedOplockLevel;
@@ -2577,6 +2625,12 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
} else if (context) {
ea_buf = (struct create_ea_buf_req *)context;
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_ea_buf_req)) {
+ rc = -EINVAL;
+ goto err_out1;
+ }
if (req->CreateOptions & FILE_NO_EA_KNOWLEDGE_LE) {
rsp->hdr.Status = STATUS_ACCESS_DENIED;
rc = -EACCES;
@@ -2615,6 +2669,12 @@ int smb2_open(struct ksmbd_work *work)
} else if (context) {
struct create_posix *posix =
(struct create_posix *)context;
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_posix)) {
+ rc = -EINVAL;
+ goto err_out1;
+ }
ksmbd_debug(SMB, "get posix context\n");
posix_mode = le32_to_cpu(posix->Mode);
@@ -2628,13 +2688,9 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
- if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
- /*
- * On delete request, instead of following up, need to
- * look the current entity
- */
- rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
- if (!rc) {
+ rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, 1);
+ if (!rc) {
+ if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
/*
* If file exists with under flags, return access
* denied error.
@@ -2653,34 +2709,16 @@ int smb2_open(struct ksmbd_work *work)
path_put(&path);
goto err_out;
}
- }
- } else {
- if (test_share_config_flag(work->tcon->share_conf,
- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS)) {
- /*
- * Use LOOKUP_FOLLOW to follow the path of
- * symlink in path buildup
- */
- rc = ksmbd_vfs_kern_path(name, LOOKUP_FOLLOW, &path, 1);
- if (rc) { /* Case for broken link ?*/
- rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
- }
- } else {
- rc = ksmbd_vfs_kern_path(name, 0, &path, 1);
- if (!rc && d_is_symlink(path.dentry)) {
- rc = -EACCES;
- path_put(&path);
- goto err_out;
- }
+ } else if (d_is_symlink(path.dentry)) {
+ rc = -EACCES;
+ path_put(&path);
+ goto err_out;
}
}
if (rc) {
- if (rc == -EACCES) {
- ksmbd_debug(SMB,
- "User does not have right permission\n");
+ if (rc != -ENOENT)
goto err_out;
- }
ksmbd_debug(SMB, "can not get linux path for %s, rc = %d\n",
name, rc);
rc = 0;
@@ -2786,7 +2824,15 @@ int smb2_open(struct ksmbd_work *work)
created = true;
user_ns = mnt_user_ns(path.mnt);
if (ea_buf) {
- rc = smb2_set_ea(&ea_buf->ea, &path);
+ if (le32_to_cpu(ea_buf->ccontext.DataLength) <
+ sizeof(struct smb2_ea_info)) {
+ rc = -EINVAL;
+ goto err_out;
+ }
+
+ rc = smb2_set_ea(&ea_buf->ea,
+ le32_to_cpu(ea_buf->ccontext.DataLength),
+ &path);
if (rc == -EOPNOTSUPP)
rc = 0;
else if (rc)
@@ -3019,9 +3065,16 @@ int smb2_open(struct ksmbd_work *work)
rc = PTR_ERR(az_req);
goto err_out;
} else if (az_req) {
- loff_t alloc_size = le64_to_cpu(az_req->AllocationSize);
+ loff_t alloc_size;
int err;
+ if (le16_to_cpu(az_req->ccontext.DataOffset) +
+ le32_to_cpu(az_req->ccontext.DataLength) <
+ sizeof(struct create_alloc_size_req)) {
+ rc = -EINVAL;
+ goto err_out;
+ }
+ alloc_size = le64_to_cpu(az_req->AllocationSize);
ksmbd_debug(SMB,
"request smb2 create allocate size : %llu\n",
alloc_size);
@@ -3176,7 +3229,7 @@ err_out1:
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
else if (rc == -EOPNOTSUPP)
rsp->hdr.Status = STATUS_NOT_SUPPORTED;
- else if (rc == -EACCES || rc == -ESTALE)
+ else if (rc == -EACCES || rc == -ESTALE || rc == -EXDEV)
rsp->hdr.Status = STATUS_ACCESS_DENIED;
else if (rc == -ENOENT)
rsp->hdr.Status = STATUS_OBJECT_NAME_INVALID;
@@ -4041,6 +4094,10 @@ static int smb2_get_ea(struct ksmbd_work *work, struct ksmbd_file *fp,
path = &fp->filp->f_path;
/* single EA entry is requested with given user.* name */
if (req->InputBufferLength) {
+ if (le32_to_cpu(req->InputBufferLength) <
+ sizeof(struct smb2_ea_info_req))
+ return -EINVAL;
+
ea_req = (struct smb2_ea_info_req *)req->Buffer;
} else {
/* need to send all EAs, if no specific EA is requested*/
@@ -4186,7 +4243,7 @@ static void get_file_access_info(struct smb2_query_info_rsp *rsp,
static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
struct ksmbd_file *fp, void *rsp_org)
{
- struct smb2_file_all_info *basic_info;
+ struct smb2_file_basic_info *basic_info;
struct kstat stat;
u64 time;
@@ -4196,7 +4253,7 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
return -EACCES;
}
- basic_info = (struct smb2_file_all_info *)rsp->Buffer;
+ basic_info = (struct smb2_file_basic_info *)rsp->Buffer;
generic_fillattr(file_mnt_user_ns(fp->filp), file_inode(fp->filp),
&stat);
basic_info->CreationTime = cpu_to_le64(fp->create_time);
@@ -4209,9 +4266,8 @@ static int get_file_basic_info(struct smb2_query_info_rsp *rsp,
basic_info->Attributes = fp->f_ci->m_fattr;
basic_info->Pad1 = 0;
rsp->OutputBufferLength =
- cpu_to_le32(offsetof(struct smb2_file_all_info, AllocationSize));
- inc_rfc1001_len(rsp_org, offsetof(struct smb2_file_all_info,
- AllocationSize));
+ cpu_to_le32(sizeof(struct smb2_file_basic_info));
+ inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_basic_info));
return 0;
}
@@ -4288,8 +4344,7 @@ static int get_file_all_info(struct ksmbd_work *work,
return -EACCES;
}
- filename = convert_to_nt_pathname(fp->filename,
- work->tcon->share_conf->path);
+ filename = convert_to_nt_pathname(fp->filename);
if (!filename)
return -ENOMEM;
@@ -4420,17 +4475,15 @@ static void get_file_stream_info(struct ksmbd_work *work,
file_info->NextEntryOffset = cpu_to_le32(next);
}
- if (nbytes) {
+ if (!S_ISDIR(stat.mode)) {
file_info = (struct smb2_file_stream_info *)
&rsp->Buffer[nbytes];
streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
"::$DATA", 7, conn->local_nls, 0);
streamlen *= 2;
file_info->StreamNameLength = cpu_to_le32(streamlen);
- file_info->StreamSize = S_ISDIR(stat.mode) ? 0 :
- cpu_to_le64(stat.size);
- file_info->StreamAllocationSize = S_ISDIR(stat.mode) ? 0 :
- cpu_to_le64(stat.size);
+ file_info->StreamSize = 0;
+ file_info->StreamAllocationSize = 0;
nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
}
@@ -4745,12 +4798,8 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
struct path path;
int rc = 0, len;
int fs_infoclass_size = 0;
- int lookup_flags = 0;
- if (test_share_config_flag(share, KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
- lookup_flags = LOOKUP_FOLLOW;
-
- rc = ksmbd_vfs_kern_path(share->path, lookup_flags, &path, 0);
+ rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
if (rc) {
pr_err("cannot create vfs path\n");
return -EIO;
@@ -5299,7 +5348,7 @@ static int smb2_rename(struct ksmbd_work *work,
goto out;
len = strlen(new_name);
- if (new_name[len - 1] != '/') {
+ if (len > 0 && new_name[len - 1] != '/') {
pr_err("not allow base filename in rename\n");
rc = -ESHARE;
goto out;
@@ -5327,11 +5376,14 @@ static int smb2_rename(struct ksmbd_work *work,
}
ksmbd_debug(SMB, "new name %s\n", new_name);
- rc = ksmbd_vfs_kern_path(new_name, 0, &path, 1);
- if (rc)
+ rc = ksmbd_vfs_kern_path(work, new_name, LOOKUP_NO_SYMLINKS, &path, 1);
+ if (rc) {
+ if (rc != -ENOENT)
+ goto out;
file_present = false;
- else
+ } else {
path_put(&path);
+ }
if (ksmbd_share_veto_filename(share, new_name)) {
rc = -ENOENT;
@@ -5371,7 +5423,7 @@ out:
static int smb2_create_link(struct ksmbd_work *work,
struct ksmbd_share_config *share,
struct smb2_file_link_info *file_info,
- struct file *filp,
+ unsigned int buf_len, struct file *filp,
struct nls_table *local_nls)
{
char *link_name = NULL, *target_name = NULL, *pathname = NULL;
@@ -5379,6 +5431,10 @@ static int smb2_create_link(struct ksmbd_work *work,
bool file_present = true;
int rc;
+ if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
+ le32_to_cpu(file_info->FileNameLength))
+ return -EINVAL;
+
ksmbd_debug(SMB, "setting FILE_LINK_INFORMATION\n");
pathname = kmalloc(PATH_MAX, GFP_KERNEL);
if (!pathname)
@@ -5401,11 +5457,14 @@ static int smb2_create_link(struct ksmbd_work *work,
}
ksmbd_debug(SMB, "target name is %s\n", target_name);
- rc = ksmbd_vfs_kern_path(link_name, 0, &path, 0);
- if (rc)
+ rc = ksmbd_vfs_kern_path(work, link_name, LOOKUP_NO_SYMLINKS, &path, 0);
+ if (rc) {
+ if (rc != -ENOENT)
+ goto out;
file_present = false;
- else
+ } else {
path_put(&path);
+ }
if (file_info->ReplaceIfExists) {
if (file_present) {
@@ -5435,10 +5494,10 @@ out:
return rc;
}
-static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
+static int set_file_basic_info(struct ksmbd_file *fp,
+ struct smb2_file_basic_info *file_info,
struct ksmbd_share_config *share)
{
- struct smb2_file_all_info *file_info;
struct iattr attrs;
struct timespec64 ctime;
struct file *filp;
@@ -5449,7 +5508,6 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
if (!(fp->daccess & FILE_WRITE_ATTRIBUTES_LE))
return -EACCES;
- file_info = (struct smb2_file_all_info *)buf;
attrs.ia_valid = 0;
filp = fp->filp;
inode = file_inode(filp);
@@ -5526,7 +5584,8 @@ static int set_file_basic_info(struct ksmbd_file *fp, char *buf,
}
static int set_file_allocation_info(struct ksmbd_work *work,
- struct ksmbd_file *fp, char *buf)
+ struct ksmbd_file *fp,
+ struct smb2_file_alloc_info *file_alloc_info)
{
/*
* TODO : It's working fine only when store dos attributes
@@ -5534,7 +5593,6 @@ static int set_file_allocation_info(struct ksmbd_work *work,
* properly with any smb.conf option
*/
- struct smb2_file_alloc_info *file_alloc_info;
loff_t alloc_blks;
struct inode *inode;
int rc;
@@ -5542,7 +5600,6 @@ static int set_file_allocation_info(struct ksmbd_work *work,
if (!(fp->daccess & FILE_WRITE_DATA_LE))
return -EACCES;
- file_alloc_info = (struct smb2_file_alloc_info *)buf;
alloc_blks = (le64_to_cpu(file_alloc_info->AllocationSize) + 511) >> 9;
inode = file_inode(fp->filp);
@@ -5565,7 +5622,7 @@ static int set_file_allocation_info(struct ksmbd_work *work,
* inode size is retained by backup inode size.
*/
size = i_size_read(inode);
- rc = ksmbd_vfs_truncate(work, NULL, fp, alloc_blks * 512);
+ rc = ksmbd_vfs_truncate(work, fp, alloc_blks * 512);
if (rc) {
pr_err("truncate failed! filename : %s, err %d\n",
fp->filename, rc);
@@ -5578,9 +5635,8 @@ static int set_file_allocation_info(struct ksmbd_work *work,
}
static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
- char *buf)
+ struct smb2_file_eof_info *file_eof_info)
{
- struct smb2_file_eof_info *file_eof_info;
loff_t newsize;
struct inode *inode;
int rc;
@@ -5588,7 +5644,6 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
if (!(fp->daccess & FILE_WRITE_DATA_LE))
return -EACCES;
- file_eof_info = (struct smb2_file_eof_info *)buf;
newsize = le64_to_cpu(file_eof_info->EndOfFile);
inode = file_inode(fp->filp);
@@ -5602,7 +5657,7 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
ksmbd_debug(SMB, "filename : %s truncated to newsize %lld\n",
fp->filename, newsize);
- rc = ksmbd_vfs_truncate(work, NULL, fp, newsize);
+ rc = ksmbd_vfs_truncate(work, fp, newsize);
if (rc) {
ksmbd_debug(SMB, "truncate failed! filename : %s err %d\n",
fp->filename, rc);
@@ -5615,7 +5670,8 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
}
static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
- char *buf)
+ struct smb2_file_rename_info *rename_info,
+ unsigned int buf_len)
{
struct user_namespace *user_ns;
struct ksmbd_file *parent_fp;
@@ -5628,6 +5684,10 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
return -EACCES;
}
+ if (buf_len < (u64)sizeof(struct smb2_file_rename_info) +
+ le32_to_cpu(rename_info->FileNameLength))
+ return -EINVAL;
+
user_ns = file_mnt_user_ns(fp->filp);
if (ksmbd_stream_fd(fp))
goto next;
@@ -5650,14 +5710,13 @@ static int set_rename_info(struct ksmbd_work *work, struct ksmbd_file *fp,
}
}
next:
- return smb2_rename(work, fp, user_ns,
- (struct smb2_file_rename_info *)buf,
+ return smb2_rename(work, fp, user_ns, rename_info,
work->sess->conn->local_nls);
}
-static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
+static int set_file_disposition_info(struct ksmbd_file *fp,
+ struct smb2_file_disposition_info *file_info)
{
- struct smb2_file_disposition_info *file_info;
struct inode *inode;
if (!(fp->daccess & FILE_DELETE_LE)) {
@@ -5666,7 +5725,6 @@ static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
}
inode = file_inode(fp->filp);
- file_info = (struct smb2_file_disposition_info *)buf;
if (file_info->DeletePending) {
if (S_ISDIR(inode->i_mode) &&
ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY)
@@ -5678,15 +5736,14 @@ static int set_file_disposition_info(struct ksmbd_file *fp, char *buf)
return 0;
}
-static int set_file_position_info(struct ksmbd_file *fp, char *buf)
+static int set_file_position_info(struct ksmbd_file *fp,
+ struct smb2_file_pos_info *file_info)
{
- struct smb2_file_pos_info *file_info;
loff_t current_byte_offset;
unsigned long sector_size;
struct inode *inode;
inode = file_inode(fp->filp);
- file_info = (struct smb2_file_pos_info *)buf;
current_byte_offset = le64_to_cpu(file_info->CurrentByteOffset);
sector_size = inode->i_sb->s_blocksize;
@@ -5702,12 +5759,11 @@ static int set_file_position_info(struct ksmbd_file *fp, char *buf)
return 0;
}
-static int set_file_mode_info(struct ksmbd_file *fp, char *buf)
+static int set_file_mode_info(struct ksmbd_file *fp,
+ struct smb2_file_mode_info *file_info)
{
- struct smb2_file_mode_info *file_info;
__le32 mode;
- file_info = (struct smb2_file_mode_info *)buf;
mode = file_info->Mode;
if ((mode & ~FILE_MODE_INFO_MASK) ||
@@ -5737,40 +5793,74 @@ static int set_file_mode_info(struct ksmbd_file *fp, char *buf)
* TODO: need to implement an error handling for STATUS_INFO_LENGTH_MISMATCH
*/
static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
- int info_class, char *buf,
+ struct smb2_set_info_req *req,
struct ksmbd_share_config *share)
{
- switch (info_class) {
+ unsigned int buf_len = le32_to_cpu(req->BufferLength);
+
+ switch (req->FileInfoClass) {
case FILE_BASIC_INFORMATION:
- return set_file_basic_info(fp, buf, share);
+ {
+ if (buf_len < sizeof(struct smb2_file_basic_info))
+ return -EINVAL;
+ return set_file_basic_info(fp, (struct smb2_file_basic_info *)req->Buffer, share);
+ }
case FILE_ALLOCATION_INFORMATION:
- return set_file_allocation_info(work, fp, buf);
+ {
+ if (buf_len < sizeof(struct smb2_file_alloc_info))
+ return -EINVAL;
+ return set_file_allocation_info(work, fp,
+ (struct smb2_file_alloc_info *)req->Buffer);
+ }
case FILE_END_OF_FILE_INFORMATION:
- return set_end_of_file_info(work, fp, buf);
+ {
+ if (buf_len < sizeof(struct smb2_file_eof_info))
+ return -EINVAL;
+ return set_end_of_file_info(work, fp,
+ (struct smb2_file_eof_info *)req->Buffer);
+ }
case FILE_RENAME_INFORMATION:
+ {
if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
ksmbd_debug(SMB,
"User does not have write permission\n");
return -EACCES;
}
- return set_rename_info(work, fp, buf);
+ if (buf_len < sizeof(struct smb2_file_rename_info))
+ return -EINVAL;
+
+ return set_rename_info(work, fp,
+ (struct smb2_file_rename_info *)req->Buffer,
+ buf_len);
+ }
case FILE_LINK_INFORMATION:
+ {
+ if (buf_len < sizeof(struct smb2_file_link_info))
+ return -EINVAL;
+
return smb2_create_link(work, work->tcon->share_conf,
- (struct smb2_file_link_info *)buf, fp->filp,
+ (struct smb2_file_link_info *)req->Buffer,
+ buf_len, fp->filp,
work->sess->conn->local_nls);
-
+ }
case FILE_DISPOSITION_INFORMATION:
+ {
if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
ksmbd_debug(SMB,
"User does not have write permission\n");
return -EACCES;
}
- return set_file_disposition_info(fp, buf);
+ if (buf_len < sizeof(struct smb2_file_disposition_info))
+ return -EINVAL;
+
+ return set_file_disposition_info(fp,
+ (struct smb2_file_disposition_info *)req->Buffer);
+ }
case FILE_FULL_EA_INFORMATION:
{
if (!(fp->daccess & FILE_WRITE_EA_LE)) {
@@ -5779,18 +5869,29 @@ static int smb2_set_info_file(struct ksmbd_work *work, struct ksmbd_file *fp,
return -EACCES;
}
- return smb2_set_ea((struct smb2_ea_info *)buf,
- &fp->filp->f_path);
- }
+ if (buf_len < sizeof(struct smb2_ea_info))
+ return -EINVAL;
+ return smb2_set_ea((struct smb2_ea_info *)req->Buffer,
+ buf_len, &fp->filp->f_path);
+ }
case FILE_POSITION_INFORMATION:
- return set_file_position_info(fp, buf);
+ {
+ if (buf_len < sizeof(struct smb2_file_pos_info))
+ return -EINVAL;
+ return set_file_position_info(fp, (struct smb2_file_pos_info *)req->Buffer);
+ }
case FILE_MODE_INFORMATION:
- return set_file_mode_info(fp, buf);
+ {
+ if (buf_len < sizeof(struct smb2_file_mode_info))
+ return -EINVAL;
+
+ return set_file_mode_info(fp, (struct smb2_file_mode_info *)req->Buffer);
+ }
}
- pr_err("Unimplemented Fileinfoclass :%d\n", info_class);
+ pr_err("Unimplemented Fileinfoclass :%d\n", req->FileInfoClass);
return -EOPNOTSUPP;
}
@@ -5851,8 +5952,7 @@ int smb2_set_info(struct ksmbd_work *work)
switch (req->InfoType) {
case SMB2_O_INFO_FILE:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
- rc = smb2_set_info_file(work, fp, req->FileInfoClass,
- req->Buffer, work->tcon->share_conf);
+ rc = smb2_set_info_file(work, fp, req, work->tcon->share_conf);
break;
case SMB2_O_INFO_SECURITY:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
@@ -5879,7 +5979,7 @@ int smb2_set_info(struct ksmbd_work *work)
return 0;
err_out:
- if (rc == -EACCES || rc == -EPERM)
+ if (rc == -EACCES || rc == -EPERM || rc == -EXDEV)
rsp->hdr.Status = STATUS_ACCESS_DENIED;
else if (rc == -EINVAL)
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
@@ -8206,7 +8306,8 @@ void smb3_preauth_hash_rsp(struct ksmbd_work *work)
WORK_BUFFERS(work, req, rsp);
- if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE)
+ if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE &&
+ conn->preauth_info)
ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,
conn->preauth_info->Preauth_HashValue);
@@ -8313,16 +8414,8 @@ int smb3_decrypt_req(struct ksmbd_work *work)
unsigned int buf_data_size = pdu_length + 4 -
sizeof(struct smb2_transform_hdr);
struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
- unsigned int orig_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
int rc = 0;
- sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));
- if (!sess) {
- pr_err("invalid session id(%llx) in transform header\n",
- le64_to_cpu(tr_hdr->SessionId));
- return -ECONNABORTED;
- }
-
if (pdu_length + 4 <
sizeof(struct smb2_transform_hdr) + sizeof(struct smb2_hdr)) {
pr_err("Transform message is too small (%u)\n",
@@ -8330,11 +8423,19 @@ int smb3_decrypt_req(struct ksmbd_work *work)
return -ECONNABORTED;
}
- if (pdu_length + 4 < orig_len + sizeof(struct smb2_transform_hdr)) {
+ if (pdu_length + 4 <
+ le32_to_cpu(tr_hdr->OriginalMessageSize) + sizeof(struct smb2_transform_hdr)) {
pr_err("Transform message is broken\n");
return -ECONNABORTED;
}
+ sess = ksmbd_session_lookup_all(conn, le64_to_cpu(tr_hdr->SessionId));
+ if (!sess) {
+ pr_err("invalid session id(%llx) in transform header\n",
+ le64_to_cpu(tr_hdr->SessionId));
+ return -ECONNABORTED;
+ }
+
iov[0].iov_base = buf;
iov[0].iov_len = sizeof(struct smb2_transform_hdr);
iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
index bcec845b03f3..261825d06391 100644
--- a/fs/ksmbd/smb2pdu.h
+++ b/fs/ksmbd/smb2pdu.h
@@ -1464,6 +1464,15 @@ struct smb2_file_all_info { /* data block encoding of response to level 18 */
char FileName[1];
} __packed; /* level 18 Query */
+struct smb2_file_basic_info { /* data block encoding of response to level 18 */
+ __le64 CreationTime; /* Beginning of FILE_BASIC_INFO equivalent */
+ __le64 LastAccessTime;
+ __le64 LastWriteTime;
+ __le64 ChangeTime;
+ __le32 Attributes;
+ __u32 Pad1; /* End of FILE_BASIC_INFO_INFO equivalent */
+} __packed;
+
struct smb2_file_alt_name_info {
__le32 FileNameLength;
char FileName[0];
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index 43d3123d8b62..db8042a173d0 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -129,16 +129,22 @@ int ksmbd_lookup_protocol_idx(char *str)
*
* check for valid smb signature and packet direction(request/response)
*
- * Return: 0 on success, otherwise 1
+ * Return: 0 on success, otherwise -EINVAL
*/
int ksmbd_verify_smb_message(struct ksmbd_work *work)
{
- struct smb2_hdr *smb2_hdr = work->request_buf;
+ struct smb2_hdr *smb2_hdr = work->request_buf + work->next_smb2_rcv_hdr_off;
+ struct smb_hdr *hdr;
if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
return ksmbd_smb2_check_message(work);
- return 0;
+ hdr = work->request_buf;
+ if (*(__le32 *)hdr->Protocol == SMB1_PROTO_NUMBER &&
+ hdr->Command == SMB_COM_NEGOTIATE)
+ return 0;
+
+ return -EINVAL;
}
/**
@@ -149,20 +155,7 @@ int ksmbd_verify_smb_message(struct ksmbd_work *work)
*/
bool ksmbd_smb_request(struct ksmbd_conn *conn)
{
- int type = *(char *)conn->request_buf;
-
- switch (type) {
- case RFC1002_SESSION_MESSAGE:
- /* Regular SMB request */
- return true;
- case RFC1002_SESSION_KEEP_ALIVE:
- ksmbd_debug(SMB, "RFC 1002 session keep alive\n");
- break;
- default:
- ksmbd_debug(SMB, "RFC 1002 unknown request type 0x%x\n", type);
- }
-
- return false;
+ return conn->request_buf[0] == 0;
}
static bool supported_protocol(int idx)
@@ -176,10 +169,12 @@ static bool supported_protocol(int idx)
idx <= server_conf.max_protocol);
}
-static char *next_dialect(char *dialect, int *next_off)
+static char *next_dialect(char *dialect, int *next_off, int bcount)
{
dialect = dialect + *next_off;
- *next_off = strlen(dialect);
+ *next_off = strnlen(dialect, bcount);
+ if (dialect[*next_off] != '\0')
+ return NULL;
return dialect;
}
@@ -194,7 +189,9 @@ static int ksmbd_lookup_dialect_by_name(char *cli_dialects, __le16 byte_count)
dialect = cli_dialects;
bcount = le16_to_cpu(byte_count);
do {
- dialect = next_dialect(dialect, &next);
+ dialect = next_dialect(dialect, &next, bcount);
+ if (!dialect)
+ break;
ksmbd_debug(SMB, "client requested dialect %s\n",
dialect);
if (!strcmp(dialect, smb1_protos[i].name)) {
@@ -242,13 +239,22 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count)
static int ksmbd_negotiate_smb_dialect(void *buf)
{
- __le32 proto;
+ int smb_buf_length = get_rfc1002_len(buf);
+ __le32 proto = ((struct smb2_hdr *)buf)->ProtocolId;
- proto = ((struct smb2_hdr *)buf)->ProtocolId;
if (proto == SMB2_PROTO_NUMBER) {
struct smb2_negotiate_req *req;
+ int smb2_neg_size =
+ offsetof(struct smb2_negotiate_req, Dialects) - 4;
req = (struct smb2_negotiate_req *)buf;
+ if (smb2_neg_size > smb_buf_length)
+ goto err_out;
+
+ if (smb2_neg_size + le16_to_cpu(req->DialectCount) * sizeof(__le16) >
+ smb_buf_length)
+ goto err_out;
+
return ksmbd_lookup_dialect_by_id(req->Dialects,
req->DialectCount);
}
@@ -258,14 +264,22 @@ static int ksmbd_negotiate_smb_dialect(void *buf)
struct smb_negotiate_req *req;
req = (struct smb_negotiate_req *)buf;
+ if (le16_to_cpu(req->ByteCount) < 2)
+ goto err_out;
+
+ if (offsetof(struct smb_negotiate_req, DialectsArray) - 4 +
+ le16_to_cpu(req->ByteCount) > smb_buf_length) {
+ goto err_out;
+ }
+
return ksmbd_lookup_dialect_by_name(req->DialectsArray,
req->ByteCount);
}
+err_out:
return BAD_PROT_ID;
}
-#define SMB_COM_NEGOTIATE 0x72
int ksmbd_init_smb_server(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
index 57c667c1be06..994abede27e9 100644
--- a/fs/ksmbd/smb_common.h
+++ b/fs/ksmbd/smb_common.h
@@ -48,14 +48,6 @@
#define CIFS_DEFAULT_IOSIZE (64 * 1024)
#define MAX_CIFS_SMALL_BUFFER_SIZE 448 /* big enough for most */
-/* RFC 1002 session packet types */
-#define RFC1002_SESSION_MESSAGE 0x00
-#define RFC1002_SESSION_REQUEST 0x81
-#define RFC1002_POSITIVE_SESSION_RESPONSE 0x82
-#define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83
-#define RFC1002_RETARGET_SESSION_RESPONSE 0x84
-#define RFC1002_SESSION_KEEP_ALIVE 0x85
-
/* Responses when opening a file. */
#define F_SUPERSEDED 0
#define F_OPENED 1
@@ -210,6 +202,7 @@
FILE_READ_ATTRIBUTES | FILE_WRITE_ATTRIBUTES)
#define SMB1_PROTO_NUMBER cpu_to_le32(0x424d53ff)
+#define SMB_COM_NEGOTIATE 0x72
#define SMB1_CLIENT_GUID_SIZE (16)
struct smb_hdr {
diff --git a/fs/ksmbd/smbacl.c b/fs/ksmbd/smbacl.c
index 0a95cdec8c80..bd792db32623 100644
--- a/fs/ksmbd/smbacl.c
+++ b/fs/ksmbd/smbacl.c
@@ -380,7 +380,7 @@ static void parse_dacl(struct user_namespace *user_ns,
{
int i, ret;
int num_aces = 0;
- int acl_size;
+ unsigned int acl_size;
char *acl_base;
struct smb_ace **ppace;
struct posix_acl_entry *cf_pace, *cf_pdace;
@@ -392,7 +392,7 @@ static void parse_dacl(struct user_namespace *user_ns,
return;
/* validate that we do not go past end of acl */
- if (end_of_acl <= (char *)pdacl ||
+ if (end_of_acl < (char *)pdacl + sizeof(struct smb_acl) ||
end_of_acl < (char *)pdacl + le16_to_cpu(pdacl->size)) {
pr_err("ACL too small to parse DACL\n");
return;
@@ -431,8 +431,22 @@ static void parse_dacl(struct user_namespace *user_ns,
* user/group/other have no permissions
*/
for (i = 0; i < num_aces; ++i) {
+ if (end_of_acl - acl_base < acl_size)
+ break;
+
ppace[i] = (struct smb_ace *)(acl_base + acl_size);
acl_base = (char *)ppace[i];
+ acl_size = offsetof(struct smb_ace, sid) +
+ offsetof(struct smb_sid, sub_auth);
+
+ if (end_of_acl - acl_base < acl_size ||
+ ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
+ (end_of_acl - acl_base <
+ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
+ (le16_to_cpu(ppace[i]->size) <
+ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
+ break;
+
acl_size = le16_to_cpu(ppace[i]->size);
ppace[i]->access_req =
smb_map_generic_desired_access(ppace[i]->access_req);
@@ -807,6 +821,9 @@ int parse_sec_desc(struct user_namespace *user_ns, struct smb_ntsd *pntsd,
if (!pntsd)
return -EIO;
+ if (acl_len < sizeof(struct smb_ntsd))
+ return -EINVAL;
+
owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset));
group_sid_ptr = (struct smb_sid *)((char *)pntsd +
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
index 52b2556e76b1..3a7fa23ba850 100644
--- a/fs/ksmbd/transport_rdma.c
+++ b/fs/ksmbd/transport_rdma.c
@@ -20,7 +20,6 @@
#define SUBMOD_NAME "smb_direct"
#include <linux/kthread.h>
-#include <linux/rwlock.h>
#include <linux/list.h>
#include <linux/mempool.h>
#include <linux/highmem.h>
diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
index dc15a5ecd2e0..c14320e03b69 100644
--- a/fs/ksmbd/transport_tcp.c
+++ b/fs/ksmbd/transport_tcp.c
@@ -215,7 +215,7 @@ out_error:
* ksmbd_kthread_fn() - listen to new SMB connections and callback server
* @p: arguments to forker thread
*
- * Return: Returns a task_struct or ERR_PTR
+ * Return: 0 on success, error number otherwise
*/
static int ksmbd_kthread_fn(void *p)
{
@@ -387,7 +387,7 @@ static void tcp_destroy_socket(struct socket *ksmbd_socket)
/**
* create_socket - create socket for ksmbd/0
*
- * Return: Returns a task_struct or ERR_PTR
+ * Return: 0 on success, error number otherwise
*/
static int create_socket(struct interface *iface)
{
diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
index b047f2980d96..b41954294d38 100644
--- a/fs/ksmbd/vfs.c
+++ b/fs/ksmbd/vfs.c
@@ -19,6 +19,8 @@
#include <linux/sched/xacct.h>
#include <linux/crc32c.h>
+#include "../internal.h" /* for vfs_path_lookup */
+
#include "glob.h"
#include "oplock.h"
#include "connection.h"
@@ -44,7 +46,6 @@ static char *extract_last_component(char *path)
p++;
} else {
p = NULL;
- pr_err("Invalid path %s\n", path);
}
return p;
}
@@ -155,7 +156,7 @@ int ksmbd_vfs_query_maximal_access(struct user_namespace *user_ns,
/**
* ksmbd_vfs_create() - vfs helper for smb create file
* @work: work
- * @name: file name
+ * @name: file name that is relative to share
* @mode: file create mode
*
* Return: 0 on success, otherwise error
@@ -166,7 +167,8 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
struct dentry *dentry;
int err;
- dentry = kern_path_create(AT_FDCWD, name, &path, 0);
+ dentry = ksmbd_vfs_kern_path_create(work, name,
+ LOOKUP_NO_SYMLINKS, &path);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
if (err != -ENOENT)
@@ -191,7 +193,7 @@ int ksmbd_vfs_create(struct ksmbd_work *work, const char *name, umode_t mode)
/**
* ksmbd_vfs_mkdir() - vfs helper for smb create directory
* @work: work
- * @name: directory name
+ * @name: directory name that is relative to share
* @mode: directory create mode
*
* Return: 0 on success, otherwise error
@@ -203,7 +205,9 @@ int ksmbd_vfs_mkdir(struct ksmbd_work *work, const char *name, umode_t mode)
struct dentry *dentry;
int err;
- dentry = kern_path_create(AT_FDCWD, name, &path, LOOKUP_DIRECTORY);
+ dentry = ksmbd_vfs_kern_path_create(work, name,
+ LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+ &path);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
if (err != -EEXIST)
@@ -578,7 +582,7 @@ int ksmbd_vfs_fsync(struct ksmbd_work *work, u64 fid, u64 p_id)
/**
* ksmbd_vfs_remove_file() - vfs helper for smb rmdir or unlink
- * @name: absolute directory or file name
+ * @name: directory or file name that is relative to share
*
* Return: 0 on success, otherwise error
*/
@@ -588,16 +592,11 @@ int ksmbd_vfs_remove_file(struct ksmbd_work *work, char *name)
struct path path;
struct dentry *parent;
int err;
- int flags = 0;
if (ksmbd_override_fsids(work))
return -ENOMEM;
- if (test_share_config_flag(work->tcon->share_conf,
- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
- flags = LOOKUP_FOLLOW;
-
- err = kern_path(name, flags, &path);
+ err = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS, &path, false);
if (err) {
ksmbd_debug(VFS, "can't get %s, err %d\n", name, err);
ksmbd_revert_fsids(work);
@@ -642,7 +641,7 @@ out_err:
/**
* ksmbd_vfs_link() - vfs helper for creating smb hardlink
* @oldname: source file name
- * @newname: hardlink name
+ * @newname: hardlink name that is relative to share
*
* Return: 0 on success, otherwise error
*/
@@ -652,24 +651,20 @@ int ksmbd_vfs_link(struct ksmbd_work *work, const char *oldname,
struct path oldpath, newpath;
struct dentry *dentry;
int err;
- int flags = 0;
if (ksmbd_override_fsids(work))
return -ENOMEM;
- if (test_share_config_flag(work->tcon->share_conf,
- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
- flags = LOOKUP_FOLLOW;
-
- err = kern_path(oldname, flags, &oldpath);
+ err = kern_path(oldname, LOOKUP_NO_SYMLINKS, &oldpath);
if (err) {
pr_err("cannot get linux path for %s, err = %d\n",
oldname, err);
goto out1;
}
- dentry = kern_path_create(AT_FDCWD, newname, &newpath,
- flags | LOOKUP_REVAL);
+ dentry = ksmbd_vfs_kern_path_create(work, newname,
+ LOOKUP_NO_SYMLINKS | LOOKUP_REVAL,
+ &newpath);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
pr_err("path create err for %s, err %d\n", newname, err);
@@ -788,21 +783,19 @@ int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
struct dentry *src_dent, *trap_dent, *src_child;
char *dst_name;
int err;
- int flags;
dst_name = extract_last_component(newname);
- if (!dst_name)
- return -EINVAL;
+ if (!dst_name) {
+ dst_name = newname;
+ newname = "";
+ }
src_dent_parent = dget_parent(fp->filp->f_path.dentry);
src_dent = fp->filp->f_path.dentry;
- flags = LOOKUP_DIRECTORY;
- if (test_share_config_flag(work->tcon->share_conf,
- KSMBD_SHARE_FLAG_FOLLOW_SYMLINKS))
- flags |= LOOKUP_FOLLOW;
-
- err = kern_path(newname, flags, &dst_path);
+ err = ksmbd_vfs_kern_path(work, newname,
+ LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+ &dst_path, false);
if (err) {
ksmbd_debug(VFS, "Cannot get path for %s [%d]\n", newname, err);
goto out;
@@ -848,61 +841,43 @@ out:
/**
* ksmbd_vfs_truncate() - vfs helper for smb file truncate
* @work: work
- * @name: old filename
* @fid: file id of old file
* @size: truncate to given size
*
* Return: 0 on success, otherwise error
*/
-int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
+int ksmbd_vfs_truncate(struct ksmbd_work *work,
struct ksmbd_file *fp, loff_t size)
{
- struct path path;
int err = 0;
+ struct file *filp;
- if (name) {
- err = kern_path(name, 0, &path);
- if (err) {
- pr_err("cannot get linux path for %s, err %d\n",
- name, err);
- return err;
- }
- err = vfs_truncate(&path, size);
- if (err)
- pr_err("truncate failed for %s err %d\n",
- name, err);
- path_put(&path);
- } else {
- struct file *filp;
-
- filp = fp->filp;
-
- /* Do we need to break any of a levelII oplock? */
- smb_break_all_levII_oplock(work, fp, 1);
+ filp = fp->filp;
- if (!work->tcon->posix_extensions) {
- struct inode *inode = file_inode(filp);
+ /* Do we need to break any of a levelII oplock? */
+ smb_break_all_levII_oplock(work, fp, 1);
- if (size < inode->i_size) {
- err = check_lock_range(filp, size,
- inode->i_size - 1, WRITE);
- } else {
- err = check_lock_range(filp, inode->i_size,
- size - 1, WRITE);
- }
+ if (!work->tcon->posix_extensions) {
+ struct inode *inode = file_inode(filp);
- if (err) {
- pr_err("failed due to lock\n");
- return -EAGAIN;
- }
+ if (size < inode->i_size) {
+ err = check_lock_range(filp, size,
+ inode->i_size - 1, WRITE);
+ } else {
+ err = check_lock_range(filp, inode->i_size,
+ size - 1, WRITE);
}
- err = vfs_truncate(&filp->f_path, size);
- if (err)
- pr_err("truncate failed for filename : %s err %d\n",
- fp->filename, err);
+ if (err) {
+ pr_err("failed due to lock\n");
+ return -EAGAIN;
+ }
}
+ err = vfs_truncate(&filp->f_path, size);
+ if (err)
+ pr_err("truncate failed for filename : %s err %d\n",
+ fp->filename, err);
return err;
}
@@ -1220,22 +1195,25 @@ static int ksmbd_vfs_lookup_in_dir(struct path *dir, char *name, size_t namelen)
/**
* ksmbd_vfs_kern_path() - lookup a file and get path info
- * @name: name of file for lookup
+ * @name: file path that is relative to share
* @flags: lookup flags
* @path: if lookup succeed, return path info
* @caseless: caseless filename lookup
*
* Return: 0 on success, otherwise error
*/
-int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
- bool caseless)
+int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
+ unsigned int flags, struct path *path, bool caseless)
{
+ struct ksmbd_share_config *share_conf = work->tcon->share_conf;
int err;
- if (name[0] != '/')
- return -EINVAL;
-
- err = kern_path(name, flags, path);
+ flags |= LOOKUP_BENEATH;
+ err = vfs_path_lookup(share_conf->vfs_path.dentry,
+ share_conf->vfs_path.mnt,
+ name,
+ flags,
+ path);
if (!err)
return 0;
@@ -1249,11 +1227,10 @@ int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
return -ENOMEM;
path_len = strlen(filepath);
- remain_len = path_len - 1;
+ remain_len = path_len;
- err = kern_path("/", flags, &parent);
- if (err)
- goto out;
+ parent = share_conf->vfs_path;
+ path_get(&parent);
while (d_can_lookup(parent.dentry)) {
char *filename = filepath + path_len - remain_len;
@@ -1266,21 +1243,21 @@ int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
err = ksmbd_vfs_lookup_in_dir(&parent, filename,
filename_len);
- if (err) {
- path_put(&parent);
+ path_put(&parent);
+ if (err)
goto out;
- }
- path_put(&parent);
next[0] = '\0';
- err = kern_path(filepath, flags, &parent);
+ err = vfs_path_lookup(share_conf->vfs_path.dentry,
+ share_conf->vfs_path.mnt,
+ filepath,
+ flags,
+ &parent);
if (err)
goto out;
-
- if (is_last) {
- path->mnt = parent.mnt;
- path->dentry = parent.dentry;
+ else if (is_last) {
+ *path = parent;
goto out;
}
@@ -1296,6 +1273,23 @@ out:
return err;
}
+struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ const char *name,
+ unsigned int flags,
+ struct path *path)
+{
+ char *abs_name;
+ struct dentry *dent;
+
+ abs_name = convert_to_unix_name(work->tcon->share_conf, name);
+ if (!abs_name)
+ return ERR_PTR(-ENOMEM);
+
+ dent = kern_path_create(AT_FDCWD, abs_name, path, flags);
+ kfree(abs_name);
+ return dent;
+}
+
int ksmbd_vfs_remove_acl_xattrs(struct user_namespace *user_ns,
struct dentry *dentry)
{
diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
index 85db50abdb24..7b1dcaa3fbdc 100644
--- a/fs/ksmbd/vfs.h
+++ b/fs/ksmbd/vfs.h
@@ -126,7 +126,7 @@ int ksmbd_vfs_link(struct ksmbd_work *work,
int ksmbd_vfs_getattr(struct path *path, struct kstat *stat);
int ksmbd_vfs_fp_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
char *newname);
-int ksmbd_vfs_truncate(struct ksmbd_work *work, const char *name,
+int ksmbd_vfs_truncate(struct ksmbd_work *work,
struct ksmbd_file *fp, loff_t size);
struct srv_copychunk;
int ksmbd_vfs_copy_file_ranges(struct ksmbd_work *work,
@@ -152,8 +152,13 @@ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
size_t *xattr_stream_name_size, int s_type);
int ksmbd_vfs_remove_xattr(struct user_namespace *user_ns,
struct dentry *dentry, char *attr_name);
-int ksmbd_vfs_kern_path(char *name, unsigned int flags, struct path *path,
+int ksmbd_vfs_kern_path(struct ksmbd_work *work,
+ char *name, unsigned int flags, struct path *path,
bool caseless);
+struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
+ const char *name,
+ unsigned int flags,
+ struct path *path);
int ksmbd_vfs_empty_dir(struct ksmbd_file *fp);
void ksmbd_vfs_set_fadvise(struct file *filp, __le32 option);
int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
diff --git a/fs/lockd/svcxdr.h b/fs/lockd/svcxdr.h
index c69a0bb76c94..4f1a451da5ba 100644
--- a/fs/lockd/svcxdr.h
+++ b/fs/lockd/svcxdr.h
@@ -134,18 +134,9 @@ svcxdr_decode_owner(struct xdr_stream *xdr, struct xdr_netobj *obj)
static inline bool
svcxdr_encode_owner(struct xdr_stream *xdr, const struct xdr_netobj *obj)
{
- unsigned int quadlen = XDR_QUADLEN(obj->len);
- __be32 *p;
-
- if (xdr_stream_encode_u32(xdr, obj->len) < 0)
- return false;
- p = xdr_reserve_space(xdr, obj->len);
- if (!p)
+ if (obj->len > XDR_MAX_NETOBJ)
return false;
- p[quadlen - 1] = 0; /* XDR pad */
- memcpy(p, obj->data, obj->len);
-
- return true;
+ return xdr_stream_encode_opaque(xdr, obj->data, obj->len) > 0;
}
#endif /* _LOCKD_SVCXDR_H_ */
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 42356416f0a0..3f4027a5de88 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3570,7 +3570,7 @@ static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_s
}
static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
- struct nfsd4_session *session, u32 req)
+ struct nfsd4_session *session, u32 req, struct nfsd4_conn **conn)
{
struct nfs4_client *clp = session->se_client;
struct svc_xprt *xpt = rqst->rq_xprt;
@@ -3593,6 +3593,8 @@ static __be32 nfsd4_match_existing_connection(struct svc_rqst *rqst,
else
status = nfserr_inval;
spin_unlock(&clp->cl_lock);
+ if (status == nfs_ok && conn)
+ *conn = c;
return status;
}
@@ -3617,8 +3619,16 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
status = nfserr_wrong_cred;
if (!nfsd4_mach_creds_match(session->se_client, rqstp))
goto out;
- status = nfsd4_match_existing_connection(rqstp, session, bcts->dir);
- if (status == nfs_ok || status == nfserr_inval)
+ status = nfsd4_match_existing_connection(rqstp, session,
+ bcts->dir, &conn);
+ if (status == nfs_ok) {
+ if (bcts->dir == NFS4_CDFC4_FORE_OR_BOTH ||
+ bcts->dir == NFS4_CDFC4_BACK)
+ conn->cn_flags |= NFS4_CDFC4_BACK;
+ nfsd4_probe_callback(session->se_client);
+ goto out;
+ }
+ if (status == nfserr_inval)
goto out;
status = nfsd4_map_bcts_dir(&bcts->dir);
if (status)
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 359524b7341f..801e60bab955 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3951,7 +3951,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
oi = OCFS2_I(inode);
oi->ip_dir_lock_gen++;
mlog(0, "generation: %u\n", oi->ip_dir_lock_gen);
- goto out;
+ goto out_forget;
}
if (!S_ISREG(inode->i_mode))
@@ -3982,6 +3982,7 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres,
filemap_fdatawait(mapping);
}
+out_forget:
forget_all_cached_acls(inode);
out:
diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c
index 2a66844b7ff8..66645a5a35f3 100644
--- a/fs/qnx4/dir.c
+++ b/fs/qnx4/dir.c
@@ -20,12 +20,33 @@
* depending on the status field in the last byte. The
* first byte is where the name start either way, and a
* zero means it's empty.
+ *
+ * Also, due to a bug in gcc, we don't want to use the
+ * real (differently sized) name arrays in the inode and
+ * link entries, but always the 'de_name[]' one in the
+ * fake struct entry.
+ *
+ * See
+ *
+ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=99578#c6
+ *
+ * for details, but basically gcc will take the size of the
+ * 'name' array from one of the used union entries randomly.
+ *
+ * This use of 'de_name[]' (48 bytes) avoids the false positive
+ * warnings that would happen if gcc decides to use 'inode.di_name'
+ * (16 bytes) even when the pointer and size were to come from
+ * 'link.dl_name' (48 bytes).
+ *
+ * In all cases the actual name pointer itself is the same, it's
+ * only the gcc internal 'what is the size of this field' logic
+ * that can get confused.
*/
union qnx4_directory_entry {
struct {
- char de_name;
- char de_pad[62];
- char de_status;
+ const char de_name[48];
+ u8 de_pad[15];
+ u8 de_status;
};
struct qnx4_inode_entry inode;
struct qnx4_link_info link;
@@ -53,29 +74,26 @@ static int qnx4_readdir(struct file *file, struct dir_context *ctx)
ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK;
for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) {
union qnx4_directory_entry *de;
- const char *name;
offset = ix * QNX4_DIR_ENTRY_SIZE;
de = (union qnx4_directory_entry *) (bh->b_data + offset);
- if (!de->de_name)
+ if (!de->de_name[0])
continue;
if (!(de->de_status & (QNX4_FILE_USED|QNX4_FILE_LINK)))
continue;
if (!(de->de_status & QNX4_FILE_LINK)) {
size = sizeof(de->inode.di_fname);
- name = de->inode.di_fname;
ino = blknum * QNX4_INODES_PER_BLOCK + ix - 1;
} else {
size = sizeof(de->link.dl_fname);
- name = de->link.dl_fname;
ino = ( le32_to_cpu(de->link.dl_inode_blk) - 1 ) *
QNX4_INODES_PER_BLOCK +
de->link.dl_inode_ndx;
}
- size = strnlen(name, size);
+ size = strnlen(de->de_name, size);
QNX4DEBUG((KERN_INFO "qnx4_readdir:%.*s\n", size, name));
- if (!dir_emit(ctx, name, size, ino, DT_UNKNOWN)) {
+ if (!dir_emit(ctx, de->de_name, size, ino, DT_UNKNOWN)) {
brelse(bh);
return 0;
}
diff --git a/fs/smbfs_common/smbfsctl.h b/fs/smbfs_common/smbfsctl.h
index d01e8c9d7a31..926f87cd6af0 100644
--- a/fs/smbfs_common/smbfsctl.h
+++ b/fs/smbfs_common/smbfsctl.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1+ */
/*
- * fs/cifs/smbfsctl.h: SMB, CIFS, SMB2 FSCTL definitions
+ * SMB, CIFS, SMB2 FSCTL definitions
*
* Copyright (c) International Business Machines Corp., 2002,2013
* Author(s): Steve French (sfrench@us.ibm.com)
diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c
index 4f5e59f06284..37dd3fe5b1e9 100644
--- a/fs/vboxsf/super.c
+++ b/fs/vboxsf/super.c
@@ -21,10 +21,7 @@
#define VBOXSF_SUPER_MAGIC 0x786f4256 /* 'VBox' little endian */
-#define VBSF_MOUNT_SIGNATURE_BYTE_0 ('\000')
-#define VBSF_MOUNT_SIGNATURE_BYTE_1 ('\377')
-#define VBSF_MOUNT_SIGNATURE_BYTE_2 ('\376')
-#define VBSF_MOUNT_SIGNATURE_BYTE_3 ('\375')
+static const unsigned char VBSF_MOUNT_SIGNATURE[4] = "\000\377\376\375";
static int follow_symlinks;
module_param(follow_symlinks, int, 0444);
@@ -386,12 +383,7 @@ fail_nomem:
static int vboxsf_parse_monolithic(struct fs_context *fc, void *data)
{
- unsigned char *options = data;
-
- if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 &&
- options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 &&
- options[2] == VBSF_MOUNT_SIGNATURE_BYTE_2 &&
- options[3] == VBSF_MOUNT_SIGNATURE_BYTE_3) {
+ if (data && !memcmp(data, VBSF_MOUNT_SIGNATURE, 4)) {
vbg_err("vboxsf: Old binary mount data not supported, remove obsolete mount.vboxsf and/or update your VBoxService.\n");
return -EINVAL;
}
diff --git a/fs/verity/enable.c b/fs/verity/enable.c
index 77e159a0346b..60a4372aa4d7 100644
--- a/fs/verity/enable.c
+++ b/fs/verity/enable.c
@@ -177,7 +177,7 @@ static int build_merkle_tree(struct file *filp,
* (level 0) and ascending to the root node (level 'num_levels - 1').
* Then at the end (level 'num_levels'), calculate the root hash.
*/
- blocks = (inode->i_size + params->block_size - 1) >>
+ blocks = ((u64)inode->i_size + params->block_size - 1) >>
params->log_blocksize;
for (level = 0; level <= params->num_levels; level++) {
err = build_merkle_tree_level(filp, level, blocks, params,
diff --git a/fs/verity/open.c b/fs/verity/open.c
index 60ff8af7219f..92df87f5fa38 100644
--- a/fs/verity/open.c
+++ b/fs/verity/open.c
@@ -89,7 +89,7 @@ int fsverity_init_merkle_tree_params(struct merkle_tree_params *params,
*/
/* Compute number of levels and the number of blocks in each level */
- blocks = (inode->i_size + params->block_size - 1) >> log_blocksize;
+ blocks = ((u64)inode->i_size + params->block_size - 1) >> log_blocksize;
pr_debug("Data is %lld bytes (%llu blocks)\n", inode->i_size, blocks);
while (blocks > 1) {
if (params->num_levels >= FS_VERITY_MAX_LEVELS) {
diff --git a/include/acpi/acpi_io.h b/include/acpi/acpi_io.h
index a0212e67d6f4..027faa8883aa 100644
--- a/include/acpi/acpi_io.h
+++ b/include/acpi/acpi_io.h
@@ -14,14 +14,6 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
}
#endif
-#ifndef acpi_os_memmap
-static inline void __iomem *acpi_os_memmap(acpi_physical_address phys,
- acpi_size size)
-{
- return ioremap_cache(phys, size);
-}
-#endif
-
extern bool acpi_permanent_mmap;
void __iomem __ref
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index f4c16f19f83e..020a7d5bf470 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -578,11 +578,12 @@ struct btf_func_model {
* programs only. Should not be used with normal calls and indirect calls.
*/
#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
-
/* Store IP address of the caller on the trampoline stack,
* so it's available for trampoline's programs.
*/
#define BPF_TRAMP_F_IP_ARG BIT(3)
+/* Return the return value of fentry prog. Only used by bpf_struct_ops. */
+#define BPF_TRAMP_F_RET_FENTRY_RET BIT(4)
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 6486d3c19463..36f33685c8c0 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -194,7 +194,7 @@ void __breadahead_gfp(struct block_device *, sector_t block, unsigned int size,
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
void invalidate_bh_lrus(void);
-void invalidate_bh_lrus_cpu(int cpu);
+void invalidate_bh_lrus_cpu(void);
bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
@@ -408,7 +408,7 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
-static inline void invalidate_bh_lrus_cpu(int cpu) {}
+static inline void invalidate_bh_lrus_cpu(void) {}
static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
#define buffer_heads_over_limit 0
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 5d4d07a9e1ed..1e7399fc69c0 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -996,14 +996,15 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
* cpumask; Typically used by bin_attribute to export cpumask bitmask
* ABI.
*
- * Returns the length of how many bytes have been copied.
+ * Returns the length of how many bytes have been copied, excluding
+ * terminating '\0'.
*/
static inline ssize_t
cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
loff_t off, size_t count)
{
return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask),
- nr_cpu_ids, off, count);
+ nr_cpu_ids, off, count) - 1;
}
/**
@@ -1018,7 +1019,7 @@ cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
loff_t off, size_t count)
{
return bitmap_print_list_to_buf(buf, cpumask_bits(mask),
- nr_cpu_ids, off, count);
+ nr_cpu_ids, off, count) - 1;
}
#if NR_CPUS <= BITS_PER_LONG
diff --git a/include/linux/dsa/ocelot.h b/include/linux/dsa/ocelot.h
index c6bc45ae5e03..435777a0073c 100644
--- a/include/linux/dsa/ocelot.h
+++ b/include/linux/dsa/ocelot.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0
- * Copyright 2019-2021 NXP Semiconductors
+ * Copyright 2019-2021 NXP
*/
#ifndef _NET_DSA_TAG_OCELOT_H
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 59828516ebaf..9f4ad719bfe3 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -22,10 +22,15 @@ struct device;
* LINKS_ADDED: The fwnode has already be parsed to add fwnode links.
* NOT_DEVICE: The fwnode will never be populated as a struct device.
* INITIALIZED: The hardware corresponding to fwnode has been initialized.
+ * NEEDS_CHILD_BOUND_ON_ADD: For this fwnode/device to probe successfully, its
+ * driver needs its child devices to be bound with
+ * their respective drivers as soon as they are
+ * added.
*/
-#define FWNODE_FLAG_LINKS_ADDED BIT(0)
-#define FWNODE_FLAG_NOT_DEVICE BIT(1)
-#define FWNODE_FLAG_INITIALIZED BIT(2)
+#define FWNODE_FLAG_LINKS_ADDED BIT(0)
+#define FWNODE_FLAG_NOT_DEVICE BIT(1)
+#define FWNODE_FLAG_INITIALIZED BIT(2)
+#define FWNODE_FLAG_NEEDS_CHILD_BOUND_ON_ADD BIT(3)
struct fwnode_handle {
struct fwnode_handle *secondary;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 23e4ee523576..9ee238ad29ce 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -251,7 +251,7 @@ static inline struct fwnode_handle *irq_domain_alloc_fwnode(phys_addr_t *pa)
}
void irq_domain_free_fwnode(struct fwnode_handle *fwnode);
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data);
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index ffb787d5ebde..5e6dc38f418e 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -80,6 +80,9 @@ struct mdio_driver {
/* Clears up any memory if needed */
void (*remove)(struct mdio_device *mdiodev);
+
+ /* Quiesces the device on system shutdown, turns off interrupts etc */
+ void (*shutdown)(struct mdio_device *mdiodev);
};
static inline struct mdio_driver *
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 326250996b4e..c8077e936691 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -19,6 +19,11 @@ struct migration_target_control;
*/
#define MIGRATEPAGE_SUCCESS 0
+/*
+ * Keep sync with:
+ * - macro MIGRATE_REASON in include/trace/events/migrate.h
+ * - migrate_reason_names[MR_TYPES] in mm/debug.c
+ */
enum migrate_reason {
MR_COMPACTION,
MR_MEMORY_FAILURE,
@@ -32,7 +37,6 @@ enum migrate_reason {
MR_TYPES
};
-/* In mm/debug.c; also keep sync with include/trace/events/migrate.h */
extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 923dada24eb4..c0c0cefc3b92 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -150,6 +150,20 @@ static inline int nvmem_cell_read_u64(struct device *dev,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_variable_le_u32(struct device *dev,
+ const char *cell_id,
+ u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_cell_read_variable_le_u64(struct device *dev,
+ const char *cell_id,
+ u64 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
diff --git a/include/linux/packing.h b/include/linux/packing.h
index 54667735cc67..8d6571feb95d 100644
--- a/include/linux/packing.h
+++ b/include/linux/packing.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+ * Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#ifndef _LINUX_PACKING_H
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index fe156a8170aa..9b60bb89d86a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -683,7 +683,9 @@ struct perf_event {
/*
* timestamp shadows the actual context timing but it can
* be safely used in NMI interrupt context. It reflects the
- * context time as it was when the event was last scheduled in.
+ * context time as it was when the event was last scheduled in,
+ * or when ctx_sched_in failed to schedule the event because we
+ * run out of PMC.
*
* ctx_time already accounts for ctx->timestamp. Therefore to
* compute ctx_time for a sample, simply add perf_clock().
diff --git a/include/linux/pkeys.h b/include/linux/pkeys.h
index 6beb26b7151d..86be8bf27b41 100644
--- a/include/linux/pkeys.h
+++ b/include/linux/pkeys.h
@@ -4,6 +4,8 @@
#include <linux/mm.h>
+#define ARCH_DEFAULT_PKEY 0
+
#ifdef CONFIG_ARCH_HAS_PKEYS
#include <asm/pkeys.h>
#else /* ! CONFIG_ARCH_HAS_PKEYS */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 39039ce8ac4c..c1a927ddec64 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1720,7 +1720,7 @@ extern struct pid *cad_pid;
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
-static inline bool is_percpu_thread(void)
+static __always_inline bool is_percpu_thread(void)
{
#ifdef CONFIG_SMP
return (current->flags & PF_NO_SETAFFINITY) &&
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 548a028f2dab..2c1fc9212cf2 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -124,6 +124,7 @@ struct usb_hcd {
#define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */
#define HCD_FLAG_DEAD 6 /* controller has died? */
#define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */
+#define HCD_FLAG_DEFER_RH_REGISTER 8 /* Defer roothub registration */
/* The flags can be tested using these macros; they are likely to
* be slightly faster than test_bit().
@@ -134,6 +135,7 @@ struct usb_hcd {
#define HCD_WAKEUP_PENDING(hcd) ((hcd)->flags & (1U << HCD_FLAG_WAKEUP_PENDING))
#define HCD_RH_RUNNING(hcd) ((hcd)->flags & (1U << HCD_FLAG_RH_RUNNING))
#define HCD_DEAD(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEAD))
+#define HCD_DEFER_RH_REGISTER(hcd) ((hcd)->flags & (1U << HCD_FLAG_DEFER_RH_REGISTER))
/*
* Specifies if interfaces are authorized by default
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 258867eff230..d784e76113b8 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -585,8 +585,16 @@ struct dsa_switch_ops {
int (*change_tag_protocol)(struct dsa_switch *ds, int port,
enum dsa_tag_protocol proto);
+ /* Optional switch-wide initialization and destruction methods */
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
+
+ /* Per-port initialization and destruction methods. Mandatory if the
+ * driver registers devlink port regions, optional otherwise.
+ */
+ int (*port_setup)(struct dsa_switch *ds, int port);
+ void (*port_teardown)(struct dsa_switch *ds, int port);
+
u32 (*get_phy_flags)(struct dsa_switch *ds, int port);
/*
@@ -1046,6 +1054,7 @@ static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
+void dsa_switch_shutdown(struct dsa_switch *ds);
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
#ifdef CONFIG_PM_SLEEP
int dsa_switch_suspend(struct dsa_switch *ds);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 21c5386d4a6d..ab5348e57db1 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -597,5 +597,5 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
u8 rt_family, unsigned char *flags, bool skip_oif);
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
- int nh_weight, u8 rt_family);
+ int nh_weight, u8 rt_family, u32 nh_tclassid);
#endif /* _NET_FIB_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index af0fc13cea34..618d1f427cb2 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -2818,13 +2818,13 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
* Mac80211 drivers should set the @NL80211_EXT_FEATURE_CAN_REPLACE_PTK0 flag
* when they are able to replace in-use PTK keys according to the following
* requirements:
- * 1) They do not hand over frames decrypted with the old key to
- mac80211 once the call to set_key() with command %DISABLE_KEY has been
- completed when also setting @IEEE80211_KEY_FLAG_GENERATE_IV for any key,
+ * 1) They do not hand over frames decrypted with the old key to mac80211
+ once the call to set_key() with command %DISABLE_KEY has been completed,
2) either drop or continue to use the old key for any outgoing frames queued
at the time of the key deletion (including re-transmits),
3) never send out a frame queued prior to the set_key() %SET_KEY command
- encrypted with the new key and
+ encrypted with the new key when also needing
+ @IEEE80211_KEY_FLAG_GENERATE_IV and
4) never send out a frame unencrypted when it should be encrypted.
Mac80211 will not queue any new frames for a deleted key to the driver.
*/
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 10e1777877e6..28085b995ddc 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -325,7 +325,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
struct fib_nh_common *nhc = &nhi->fib_nhc;
int weight = nhg->nh_entries[i].weight;
- if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
+ if (fib_add_nexthop(skb, nhc, weight, rt_family, 0) < 0)
return -EMSGSIZE;
}
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 6d7b12cba015..bf79f3a890af 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -11,6 +11,7 @@
#include <uapi/linux/pkt_sched.h>
#define DEFAULT_TX_QUEUE_LEN 1000
+#define STAB_SIZE_LOG_MAX 30
struct qdisc_walker {
int stop;
diff --git a/include/net/sock.h b/include/net/sock.h
index 66a9a90f9558..ae929e21a376 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -488,8 +488,10 @@ struct sock {
u8 sk_prefer_busy_poll;
u16 sk_busy_poll_budget;
#endif
+ spinlock_t sk_peer_lock;
struct pid *sk_peer_pid;
const struct cred *sk_peer_cred;
+
long sk_rcvtimeo;
ktime_t sk_stamp;
#if BITS_PER_LONG==32
@@ -1623,7 +1625,36 @@ void release_sock(struct sock *sk);
SINGLE_DEPTH_NESTING)
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock);
+
+/**
+ * lock_sock_fast - fast version of lock_sock
+ * @sk: socket
+ *
+ * This version should be used for very small section, where process wont block
+ * return false if fast path is taken:
+ *
+ * sk_lock.slock locked, owned = 0, BH disabled
+ *
+ * return true if slow path is taken:
+ *
+ * sk_lock.slock unlocked, owned = 1, BH enabled
+ */
+static inline bool lock_sock_fast(struct sock *sk)
+{
+ /* The sk_lock has mutex_lock() semantics here. */
+ mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
+
+ return __lock_sock_fast(sk);
+}
+
+/* fast socket lock variant for caller already holding a [different] socket lock */
+static inline bool lock_sock_fast_nested(struct sock *sk)
+{
+ mutex_acquire(&sk->sk_lock.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+
+ return __lock_sock_fast(sk);
+}
/**
* unlock_sock_fast - complement of lock_sock_fast
@@ -1640,6 +1671,7 @@ static inline void unlock_sock_fast(struct sock *sk, bool slow)
release_sock(sk);
__release(&sk->sk_lock.slock);
} else {
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
spin_unlock_bh(&sk->sk_lock.slock);
}
}
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 09a17f6e93a7..b97e142a7ca9 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -146,7 +146,6 @@ struct scsi_device {
struct scsi_vpd __rcu *vpd_pg83;
struct scsi_vpd __rcu *vpd_pg80;
struct scsi_vpd __rcu *vpd_pg89;
- unsigned char current_tag; /* current tag */
struct scsi_target *sdev_target;
blist_flags_t sdev_bflags; /* black/white flags as also found in
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 989e1517332d..7a08ed2acd60 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -98,6 +98,7 @@ struct snd_rawmidi_file {
struct snd_rawmidi *rmidi;
struct snd_rawmidi_substream *input;
struct snd_rawmidi_substream *output;
+ unsigned int user_pversion; /* supported protocol version */
};
struct snd_rawmidi_str {
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 9f73ed2cf061..bca73e8c8cde 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -306,11 +306,13 @@ enum afs_flock_operation {
enum afs_cb_break_reason {
afs_cb_break_no_break,
+ afs_cb_break_no_promise,
afs_cb_break_for_callback,
afs_cb_break_for_deleted,
afs_cb_break_for_lapsed,
+ afs_cb_break_for_s_reinit,
afs_cb_break_for_unlink,
- afs_cb_break_for_vsbreak,
+ afs_cb_break_for_v_break,
afs_cb_break_for_volume_callback,
afs_cb_break_for_zap,
};
@@ -602,11 +604,13 @@ enum afs_cb_break_reason {
#define afs_cb_break_reasons \
EM(afs_cb_break_no_break, "no-break") \
+ EM(afs_cb_break_no_promise, "no-promise") \
EM(afs_cb_break_for_callback, "break-cb") \
EM(afs_cb_break_for_deleted, "break-del") \
EM(afs_cb_break_for_lapsed, "break-lapsed") \
+ EM(afs_cb_break_for_s_reinit, "s-reinit") \
EM(afs_cb_break_for_unlink, "break-unlink") \
- EM(afs_cb_break_for_vsbreak, "break-vs") \
+ EM(afs_cb_break_for_v_break, "break-v") \
EM(afs_cb_break_for_volume_callback, "break-v-cb") \
E_(afs_cb_break_for_zap, "break-zap")
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index 9a448fe9355d..695bfdbfdcad 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -305,7 +305,7 @@ TRACE_EVENT(cachefiles_mark_buried,
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
+ __entry->obj = obj ? obj->fscache.debug_id : UINT_MAX;
__entry->de = de;
__entry->why = why;
),
diff --git a/include/trace/events/erofs.h b/include/trace/events/erofs.h
index bf9806fd1306..db4f2cec8360 100644
--- a/include/trace/events/erofs.h
+++ b/include/trace/events/erofs.h
@@ -35,20 +35,20 @@ TRACE_EVENT(erofs_lookup,
TP_STRUCT__entry(
__field(dev_t, dev )
__field(erofs_nid_t, nid )
- __field(const char *, name )
+ __string(name, dentry->d_name.name )
__field(unsigned int, flags )
),
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->nid = EROFS_I(dir)->nid;
- __entry->name = dentry->d_name.name;
+ __assign_str(name, dentry->d_name.name);
__entry->flags = flags;
),
TP_printk("dev = (%d,%d), pnid = %llu, name:%s, flags:%x",
show_dev_nid(__entry),
- __entry->name,
+ __get_str(name),
__entry->flags)
);
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 20e435fe657a..3246f2c74696 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -225,7 +225,14 @@ struct binder_freeze_info {
struct binder_frozen_status_info {
__u32 pid;
+
+ /* process received sync transactions since last frozen
+ * bit 0: received sync transaction after being frozen
+ * bit 1: new pending sync transaction during freezing
+ */
__u32 sync_recv;
+
+ /* process received async transactions since last frozen */
__u32 async_recv;
};
diff --git a/include/uapi/linux/cifs/cifs_mount.h b/include/uapi/linux/cifs/cifs_mount.h
index 69829205fdb5..8e87d27b0951 100644
--- a/include/uapi/linux/cifs/cifs_mount.h
+++ b/include/uapi/linux/cifs/cifs_mount.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: LGPL-2.1+ WITH Linux-syscall-note */
/*
- * include/uapi/linux/cifs/cifs_mount.h
*
* Author(s): Scott Lovenberg (scott.lovenberg@gmail.com)
*
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 1d84ec9db93b..5859ca0a1439 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -784,6 +784,7 @@ struct snd_rawmidi_status {
#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
+#define SNDRV_RAWMIDI_IOCTL_USER_PVERSION _IOW('W', 0x02, int)
#define SNDRV_RAWMIDI_IOCTL_PARAMS _IOWR('W', 0x10, struct snd_rawmidi_params)
#define SNDRV_RAWMIDI_IOCTL_STATUS _IOWR('W', 0x20, struct snd_rawmidi_status)
#define SNDRV_RAWMIDI_IOCTL_DROP _IOW('W', 0x30, int)
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 39a5580f8feb..db28e79b77ee 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -46,19 +46,7 @@ extern unsigned long *xen_contiguous_bitmap;
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
-
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-#else
-static inline int xen_create_contiguous_region(phys_addr_t pstart,
- unsigned int order,
- unsigned int address_bits,
- dma_addr_t *dma_handle)
-{
- return 0;
-}
-
-static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
- unsigned int order) { }
#endif
#if defined(CONFIG_XEN_PV)
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 2ed30ff6c906..762b534978d9 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -338,20 +338,19 @@ __setup("rootflags=", root_data_setup);
__setup("rootfstype=", fs_names_setup);
__setup("rootdelay=", root_delay_setup);
-static int __init split_fs_names(char *page, char *names)
+/* This can return zero length strings. Caller should check */
+static int __init split_fs_names(char *page, size_t size, char *names)
{
- int count = 0;
+ int count = 1;
char *p = page;
- strcpy(p, root_fs_names);
+ strlcpy(p, root_fs_names, size);
while (*p++) {
- if (p[-1] == ',')
+ if (p[-1] == ',') {
p[-1] = '\0';
+ count++;
+ }
}
- *p = '\0';
-
- for (p = page; *p; p += strlen(p)+1)
- count++;
return count;
}
@@ -404,12 +403,16 @@ void __init mount_block_root(char *name, int flags)
scnprintf(b, BDEVNAME_SIZE, "unknown-block(%u,%u)",
MAJOR(ROOT_DEV), MINOR(ROOT_DEV));
if (root_fs_names)
- num_fs = split_fs_names(fs_names, root_fs_names);
+ num_fs = split_fs_names(fs_names, PAGE_SIZE, root_fs_names);
else
num_fs = list_bdev_fs_names(fs_names, PAGE_SIZE);
retry:
for (i = 0, p = fs_names; i < num_fs; i++, p += strlen(p)+1) {
- int err = do_mount_root(name, p, flags, root_mount_data);
+ int err;
+
+ if (!*p)
+ continue;
+ err = do_mount_root(name, p, flags, root_mount_data);
switch (err) {
case 0:
goto out;
@@ -543,19 +546,18 @@ static int __init mount_nodev_root(void)
fs_names = (void *)__get_free_page(GFP_KERNEL);
if (!fs_names)
return -EINVAL;
- num_fs = split_fs_names(fs_names, root_fs_names);
+ num_fs = split_fs_names(fs_names, PAGE_SIZE, root_fs_names);
for (i = 0, fstype = fs_names; i < num_fs;
i++, fstype += strlen(fstype) + 1) {
+ if (!*fstype)
+ continue;
if (!fs_is_nodev(fstype))
continue;
err = do_mount_root(root_device_name, fstype, root_mountflags,
root_mount_data);
if (!err)
break;
- if (err != -EACCES && err != -EINVAL)
- panic("VFS: Unable to mount root \"%s\" (%s), err=%d\n",
- root_device_name, fstype, err);
}
free_page((unsigned long)fs_names);
diff --git a/init/main.c b/init/main.c
index 3f7216934441..81a79a77db46 100644
--- a/init/main.c
+++ b/init/main.c
@@ -1242,7 +1242,7 @@ trace_initcall_start_cb(void *data, initcall_t fn)
{
ktime_t *calltime = (ktime_t *)data;
- printk(KERN_DEBUG "calling %pS @ %i irqs_disabled() %d\n", fn, task_pid_nr(current), irqs_disabled());
+ printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current));
*calltime = ktime_get();
}
@@ -1256,8 +1256,8 @@ trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
rettime = ktime_get();
delta = ktime_sub(rettime, *calltime);
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
- printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs, irqs_disabled() %d\n",
- fn, ret, duration, irqs_disabled());
+ printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
+ fn, ret, duration);
}
static ktime_t initcall_calltime;
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index d6731c32864e..9abcc33f02cf 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -368,6 +368,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
const struct btf_type *mtype, *ptype;
struct bpf_prog *prog;
u32 moff;
+ u32 flags;
moff = btf_member_bit_offset(t, member) / 8;
ptype = btf_type_resolve_ptr(btf_vmlinux, member->type, NULL);
@@ -431,10 +432,12 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
tprogs[BPF_TRAMP_FENTRY].progs[0] = prog;
tprogs[BPF_TRAMP_FENTRY].nr_progs = 1;
+ flags = st_ops->func_models[i].ret_size > 0 ?
+ BPF_TRAMP_F_RET_FENTRY_RET : 0;
err = arch_prepare_bpf_trampoline(NULL, image,
st_map->image + PAGE_SIZE,
- &st_ops->func_models[i], 0,
- tprogs, NULL);
+ &st_ops->func_models[i],
+ flags, tprogs, NULL);
if (err < 0)
goto reset_unlock;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 9f4636d021b1..d6b7dfdd8066 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -827,7 +827,7 @@ int bpf_jit_charge_modmem(u32 pages)
{
if (atomic_long_add_return(pages, &bpf_jit_current) >
(bpf_jit_limit >> PAGE_SHIFT)) {
- if (!capable(CAP_SYS_ADMIN)) {
+ if (!bpf_capable()) {
atomic_long_sub(pages, &bpf_jit_current);
return -EPERM;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 8afa8690d288..570b0c97392a 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -6574,22 +6574,29 @@ int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v)
void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
{
- /* Don't associate the sock with unrelated interrupted task's cgroup. */
- if (in_interrupt())
- return;
+ struct cgroup *cgroup;
rcu_read_lock();
+ /* Don't associate the sock with unrelated interrupted task's cgroup. */
+ if (in_interrupt()) {
+ cgroup = &cgrp_dfl_root.cgrp;
+ cgroup_get(cgroup);
+ goto out;
+ }
+
while (true) {
struct css_set *cset;
cset = task_css_set(current);
if (likely(cgroup_tryget(cset->dfl_cgrp))) {
- skcd->cgroup = cset->dfl_cgrp;
- cgroup_bpf_get(cset->dfl_cgrp);
+ cgroup = cset->dfl_cgrp;
break;
}
cpu_relax();
}
+out:
+ skcd->cgroup = cgroup;
+ cgroup_bpf_get(cgroup);
rcu_read_unlock();
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0c000cb01eeb..f23ca260307f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3707,6 +3707,29 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
return 0;
}
+static inline bool event_update_userpage(struct perf_event *event)
+{
+ if (likely(!atomic_read(&event->mmap_count)))
+ return false;
+
+ perf_event_update_time(event);
+ perf_set_shadow_time(event, event->ctx);
+ perf_event_update_userpage(event);
+
+ return true;
+}
+
+static inline void group_update_userpage(struct perf_event *group_event)
+{
+ struct perf_event *event;
+
+ if (!event_update_userpage(group_event))
+ return;
+
+ for_each_sibling_event(event, group_event)
+ event_update_userpage(event);
+}
+
static int merge_sched_in(struct perf_event *event, void *data)
{
struct perf_event_context *ctx = event->ctx;
@@ -3725,14 +3748,15 @@ static int merge_sched_in(struct perf_event *event, void *data)
}
if (event->state == PERF_EVENT_STATE_INACTIVE) {
+ *can_add_hw = 0;
if (event->attr.pinned) {
perf_cgroup_event_disable(event, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
+ } else {
+ ctx->rotate_necessary = 1;
+ perf_mux_hrtimer_restart(cpuctx);
+ group_update_userpage(event);
}
-
- *can_add_hw = 0;
- ctx->rotate_necessary = 1;
- perf_mux_hrtimer_restart(cpuctx);
}
return 0;
@@ -6324,6 +6348,8 @@ accounting:
ring_buffer_attach(event, rb);
+ perf_event_update_time(event);
+ perf_set_shadow_time(event, event->ctx);
perf_event_init_userpage(event);
perf_event_update_userpage(event);
} else {
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 19e83e9b723c..4d8fc65cf38f 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -136,7 +136,7 @@ EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
* Allocates and initializes an irq_domain structure.
* Returns pointer to IRQ domain, or NULL on failure.
*/
-struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, unsigned int size,
irq_hw_number_t hwirq_max, int direct_max,
const struct irq_domain_ops *ops,
void *host_data)
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 49716228efb4..17a653b67006 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -173,16 +173,22 @@ static ssize_t sched_scaling_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
char buf[16];
+ unsigned int scaling;
if (cnt > 15)
cnt = 15;
if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;
+ buf[cnt] = '\0';
- if (kstrtouint(buf, 10, &sysctl_sched_tunable_scaling))
+ if (kstrtouint(buf, 10, &scaling))
return -EINVAL;
+ if (scaling >= SCHED_TUNABLESCALING_END)
+ return -EINVAL;
+
+ sysctl_sched_tunable_scaling = scaling;
if (sched_update_scaling())
return -EINVAL;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ff69f245b939..f6a05d9b5443 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4936,8 +4936,12 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
/* update hierarchical throttle state */
walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
- if (!cfs_rq->load.weight)
+ /* Nothing to run but something to decay (on_list)? Complete the branch */
+ if (!cfs_rq->load.weight) {
+ if (cfs_rq->on_list)
+ goto unthrottle_throttle;
return;
+ }
task_delta = cfs_rq->h_nr_running;
idle_task_delta = cfs_rq->idle_h_nr_running;
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index ee736861b18f..643d412ac623 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -1404,7 +1404,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
}
}
- *newval += now;
+ if (*newval)
+ *newval += now;
}
/*
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index c221e4c3f625..fa91f398f28b 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1605,6 +1605,14 @@ static int blk_trace_remove_queue(struct request_queue *q)
if (bt == NULL)
return -EINVAL;
+ if (bt->trace_state == Blktrace_running) {
+ bt->trace_state = Blktrace_stopped;
+ spin_lock_irq(&running_trace_lock);
+ list_del_init(&bt->running_list);
+ spin_unlock_irq(&running_trace_lock);
+ relay_flush(bt->rchan);
+ }
+
put_probe_ref();
synchronize_rcu();
blk_trace_free(bt);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index d566f601780f..2a9b6dcdac4f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -346,7 +346,7 @@ config FRAME_WARN
int "Warn for stack frames larger than"
range 0 8192
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
- default 1536 if (!64BIT && PARISC)
+ default 1536 if (!64BIT && (PARISC || XTENSA))
default 1024 if (!64BIT && !PARISC)
default 2048 if 64BIT
help
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 1e2d10f86011..cdc842d090db 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -66,6 +66,7 @@ choice
config KASAN_GENERIC
bool "Generic mode"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
help
@@ -86,6 +87,7 @@ config KASAN_GENERIC
config KASAN_SW_TAGS
bool "Software tag-based mode"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
+ depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
select SLUB_DEBUG if SLUB
select CONSTRUCTORS
help
diff --git a/lib/packing.c b/lib/packing.c
index 6ed72dccfdb5..9a72f4bbf0e2 100644
--- a/lib/packing.c
+++ b/lib/packing.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
-/* Copyright (c) 2016-2018, NXP Semiconductors
+/* Copyright 2016-2018 NXP
* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
*/
#include <linux/packing.h>
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index f19c4fbe1be7..2843f9bb42ac 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -253,13 +253,12 @@ void inflate_fast(z_streamp strm, unsigned start)
sfrom = (unsigned short *)(from);
loops = len >> 1;
- do
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
- *sout++ = *sfrom++;
-#else
- *sout++ = get_unaligned16(sfrom++);
-#endif
- while (--loops);
+ do {
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ *sout++ = *sfrom++;
+ else
+ *sout++ = get_unaligned16(sfrom++);
+ } while (--loops);
out = (unsigned char *)sout;
from = (unsigned char *)sfrom;
} else { /* dist == 1 or dist == 2 */
diff --git a/mm/damon/dbgfs-test.h b/mm/damon/dbgfs-test.h
index 930e83bceef0..4eddcfa73996 100644
--- a/mm/damon/dbgfs-test.h
+++ b/mm/damon/dbgfs-test.h
@@ -20,27 +20,27 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
ssize_t nr_integers = 0, i;
question = "123";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
kfree(answers);
question = "123abc";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers);
KUNIT_EXPECT_EQ(test, 123ul, answers[0]);
kfree(answers);
question = "a123";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
kfree(answers);
question = "12 35";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
for (i = 0; i < nr_integers; i++)
@@ -48,7 +48,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
kfree(answers);
question = "12 35 46";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers);
for (i = 0; i < nr_integers; i++)
@@ -56,7 +56,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
kfree(answers);
question = "12 35 abc 46";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers);
for (i = 0; i < 2; i++)
@@ -64,13 +64,13 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test)
kfree(answers);
question = "";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
kfree(answers);
question = "\n";
- answers = str_to_target_ids(question, strnlen(question, 128),
+ answers = str_to_target_ids(question, strlen(question),
&nr_integers);
KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers);
kfree(answers);
diff --git a/mm/debug.c b/mm/debug.c
index e73fe0a8ec3d..fae0f81ad831 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -24,7 +24,9 @@ const char *migrate_reason_names[MR_TYPES] = {
"syscall_or_cpuset",
"mempolicy_mbind",
"numa_misplaced",
- "cma",
+ "contig_range",
+ "longterm_pin",
+ "demotion",
};
const struct trace_print_flags pageflag_names[] = {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index b762215d73eb..6da5020a8656 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -106,9 +106,6 @@ static bool do_memsw_account(void)
/* memcg and lruvec stats flushing */
static void flush_memcg_stats_dwork(struct work_struct *w);
static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork);
-static void flush_memcg_stats_work(struct work_struct *w);
-static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work);
-static DEFINE_PER_CPU(unsigned int, stats_flush_threshold);
static DEFINE_SPINLOCK(stats_flush_lock);
#define THRESHOLDS_EVENTS_TARGET 128
@@ -682,8 +679,6 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
/* Update lruvec */
__this_cpu_add(pn->lruvec_stats_percpu->state[idx], val);
- if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH))
- queue_work(system_unbound_wq, &stats_flush_work);
}
/**
@@ -5361,11 +5356,6 @@ static void flush_memcg_stats_dwork(struct work_struct *w)
queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ);
}
-static void flush_memcg_stats_work(struct work_struct *w)
-{
- mem_cgroup_flush_stats();
-}
-
static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 54879c339024..3e6449f2102a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -306,6 +306,7 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
struct vm_area_struct *vma)
{
unsigned long address = vma_address(page, vma);
+ unsigned long ret = 0;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
@@ -329,11 +330,10 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page,
if (pmd_devmap(*pmd))
return PMD_SHIFT;
pte = pte_offset_map(pmd, address);
- if (!pte_present(*pte))
- return 0;
- if (pte_devmap(*pte))
- return PAGE_SHIFT;
- return 0;
+ if (pte_present(*pte) && pte_devmap(*pte))
+ ret = PAGE_SHIFT;
+ pte_unmap(pte);
+ return ret;
}
/*
@@ -1126,7 +1126,7 @@ static int page_action(struct page_state *ps, struct page *p,
*/
static inline bool HWPoisonHandlable(struct page *page)
{
- return PageLRU(page) || __PageMovable(page);
+ return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page);
}
static int __get_hwpoison_page(struct page *page)
diff --git a/mm/memory.c b/mm/memory.c
index 25fc46e87214..adf9b9ef8277 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3403,6 +3403,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
unmap_mapping_range_tree(&mapping->i_mmap, &details);
i_mmap_unlock_write(mapping);
}
+EXPORT_SYMBOL_GPL(unmap_mapping_pages);
/**
* unmap_mapping_range - unmap the portion of all mmaps in the specified
diff --git a/mm/shmem.c b/mm/shmem.c
index 88742953532c..b5860f4a2738 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -490,9 +490,9 @@ bool shmem_is_huge(struct vm_area_struct *vma,
case SHMEM_HUGE_ALWAYS:
return true;
case SHMEM_HUGE_WITHIN_SIZE:
- index = round_up(index, HPAGE_PMD_NR);
+ index = round_up(index + 1, HPAGE_PMD_NR);
i_size = round_up(i_size_read(inode), PAGE_SIZE);
- if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index)
+ if (i_size >> PAGE_SHIFT >= index)
return true;
fallthrough;
case SHMEM_HUGE_ADVISE:
diff --git a/mm/swap.c b/mm/swap.c
index 897200d27dd0..af3cad4e5378 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -620,7 +620,6 @@ void lru_add_drain_cpu(int cpu)
pagevec_lru_move_fn(pvec, lru_lazyfree_fn);
activate_page_drain(cpu);
- invalidate_bh_lrus_cpu(cpu);
}
/**
@@ -703,6 +702,20 @@ void lru_add_drain(void)
local_unlock(&lru_pvecs.lock);
}
+/*
+ * It's called from per-cpu workqueue context in SMP case so
+ * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on
+ * the same cpu. It shouldn't be a problem in !SMP case since
+ * the core is only one and the locks will disable preemption.
+ */
+static void lru_add_and_bh_lrus_drain(void)
+{
+ local_lock(&lru_pvecs.lock);
+ lru_add_drain_cpu(smp_processor_id());
+ local_unlock(&lru_pvecs.lock);
+ invalidate_bh_lrus_cpu();
+}
+
void lru_add_drain_cpu_zone(struct zone *zone)
{
local_lock(&lru_pvecs.lock);
@@ -717,7 +730,7 @@ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
static void lru_add_drain_per_cpu(struct work_struct *dummy)
{
- lru_add_drain();
+ lru_add_and_bh_lrus_drain();
}
/*
@@ -858,7 +871,7 @@ void lru_cache_disable(void)
*/
__lru_add_drain_all(true);
#else
- lru_add_drain();
+ lru_add_and_bh_lrus_drain();
#endif
}
diff --git a/mm/util.c b/mm/util.c
index 499b6b5767ed..bacabe446906 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -787,7 +787,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
- int new_policy;
+ int new_policy = -1;
int ret;
/*
@@ -805,7 +805,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
t = *table;
t.data = &new_policy;
ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
- if (ret)
+ if (ret || new_policy == -1)
return ret;
mm_compute_batch(new_policy);
diff --git a/mm/workingset.c b/mm/workingset.c
index d4268d8e9a82..d5b81e4f4cbe 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -352,6 +352,7 @@ void workingset_refault(struct page *page, void *shadow)
inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file);
+ mem_cgroup_flush_stats();
/*
* Compare the distance to the existing workingset size. We
* don't activate pages that couldn't stay resident even if
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 2eb0e55ef54d..b5f4ef35357c 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -552,6 +552,12 @@ static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
__skb->gso_segs = skb_shinfo(skb)->gso_segs;
}
+static struct proto bpf_dummy_proto = {
+ .name = "bpf_dummy",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct sock),
+};
+
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
@@ -596,20 +602,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
break;
}
- sk = kzalloc(sizeof(struct sock), GFP_USER);
+ sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
if (!sk) {
kfree(data);
kfree(ctx);
return -ENOMEM;
}
- sock_net_set(sk, net);
sock_init_data(NULL, sk);
skb = build_skb(data, 0);
if (!skb) {
kfree(data);
kfree(ctx);
- kfree(sk);
+ sk_free(sk);
return -ENOMEM;
}
skb->sk = sk;
@@ -682,8 +687,7 @@ out:
if (dev && dev != net->loopback_dev)
dev_put(dev);
kfree_skb(skb);
- bpf_sk_storage_free(sk);
- kfree(sk);
+ sk_free(sk);
kfree(ctx);
return ret;
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3523c8c7068f..f3d751105343 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1677,8 +1677,6 @@ static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
int ifindex,
struct br_ip *saddr)
{
- lockdep_assert_held_once(&brmctx->br->multicast_lock);
-
write_seqcount_begin(&querier->seq);
querier->port_ifidx = ifindex;
memcpy(&querier->addr, saddr, sizeof(*saddr));
@@ -3867,13 +3865,13 @@ void br_multicast_ctx_init(struct net_bridge *br,
brmctx->ip4_other_query.delay_time = 0;
brmctx->ip4_querier.port_ifidx = 0;
- seqcount_init(&brmctx->ip4_querier.seq);
+ seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
brmctx->multicast_igmp_version = 2;
#if IS_ENABLED(CONFIG_IPV6)
brmctx->multicast_mld_version = 1;
brmctx->ip6_other_query.delay_time = 0;
brmctx->ip6_querier.port_ifidx = 0;
- seqcount_init(&brmctx->ip6_querier.seq);
+ seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
#endif
timer_setup(&brmctx->ip4_mc_router_timer,
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index b4cef3a97f12..e8136db44462 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,7 +82,7 @@ struct bridge_mcast_other_query {
struct bridge_mcast_querier {
struct br_ip addr;
int port_ifidx;
- seqcount_t seq;
+ seqcount_spinlock_t seq;
};
/* IGMP/MLD statistics */
diff --git a/net/core/dev.c b/net/core/dev.c
index 74fd402d26dd..7ee9fecd3aff 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6923,12 +6923,16 @@ EXPORT_SYMBOL(napi_disable);
*/
void napi_enable(struct napi_struct *n)
{
- BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
- smp_mb__before_atomic();
- clear_bit(NAPI_STATE_SCHED, &n->state);
- clear_bit(NAPI_STATE_NPSVC, &n->state);
- if (n->dev->threaded && n->thread)
- set_bit(NAPI_STATE_THREADED, &n->state);
+ unsigned long val, new;
+
+ do {
+ val = READ_ONCE(n->state);
+ BUG_ON(!test_bit(NAPI_STATE_SCHED, &val));
+
+ new = val & ~(NAPIF_STATE_SCHED | NAPIF_STATE_NPSVC);
+ if (n->dev->threaded && n->thread)
+ new |= NAPIF_STATE_THREADED;
+ } while (cmpxchg(&n->state, val, new) != val);
}
EXPORT_SYMBOL(napi_enable);
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 8c39283c26ae..f0cb38344126 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -50,6 +50,11 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
if (addr_len > MAX_ADDR_LEN)
return -EINVAL;
+ ha = list_first_entry(&list->list, struct netdev_hw_addr, list);
+ if (ha && !memcmp(addr, ha->addr, addr_len) &&
+ (!addr_type || addr_type == ha->type))
+ goto found_it;
+
while (*ins_point) {
int diff;
@@ -64,6 +69,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
} else if (diff > 0) {
ins_point = &parent->rb_right;
} else {
+found_it:
if (exclusive)
return -EEXIST;
if (global) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 62627e868e03..c1601f75ec4b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1376,6 +1376,16 @@ set_sndbuf:
}
EXPORT_SYMBOL(sock_setsockopt);
+static const struct cred *sk_get_peer_cred(struct sock *sk)
+{
+ const struct cred *cred;
+
+ spin_lock(&sk->sk_peer_lock);
+ cred = get_cred(sk->sk_peer_cred);
+ spin_unlock(&sk->sk_peer_lock);
+
+ return cred;
+}
static void cred_to_ucred(struct pid *pid, const struct cred *cred,
struct ucred *ucred)
@@ -1552,7 +1562,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
struct ucred peercred;
if (len > sizeof(peercred))
len = sizeof(peercred);
+
+ spin_lock(&sk->sk_peer_lock);
cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred);
+ spin_unlock(&sk->sk_peer_lock);
+
if (copy_to_user(optval, &peercred, len))
return -EFAULT;
goto lenout;
@@ -1560,20 +1574,23 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
case SO_PEERGROUPS:
{
+ const struct cred *cred;
int ret, n;
- if (!sk->sk_peer_cred)
+ cred = sk_get_peer_cred(sk);
+ if (!cred)
return -ENODATA;
- n = sk->sk_peer_cred->group_info->ngroups;
+ n = cred->group_info->ngroups;
if (len < n * sizeof(gid_t)) {
len = n * sizeof(gid_t);
+ put_cred(cred);
return put_user(len, optlen) ? -EFAULT : -ERANGE;
}
len = n * sizeof(gid_t);
- ret = groups_to_user((gid_t __user *)optval,
- sk->sk_peer_cred->group_info);
+ ret = groups_to_user((gid_t __user *)optval, cred->group_info);
+ put_cred(cred);
if (ret)
return ret;
goto lenout;
@@ -1935,9 +1952,10 @@ static void __sk_destruct(struct rcu_head *head)
sk->sk_frag.page = NULL;
}
- if (sk->sk_peer_cred)
- put_cred(sk->sk_peer_cred);
+ /* We do not need to acquire sk->sk_peer_lock, we are the last user. */
+ put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid);
+
if (likely(sk->sk_net_refcnt))
put_net(sock_net(sk));
sk_prot_free(sk->sk_prot_creator, sk);
@@ -3145,6 +3163,8 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_peer_pid = NULL;
sk->sk_peer_cred = NULL;
+ spin_lock_init(&sk->sk_peer_lock);
+
sk->sk_write_pending = 0;
sk->sk_rcvlowat = 1;
sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
@@ -3179,17 +3199,15 @@ EXPORT_SYMBOL(sock_init_data);
void lock_sock_nested(struct sock *sk, int subclass)
{
+ /* The sk_lock has mutex_lock() semantics here. */
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
- spin_unlock(&sk->sk_lock.slock);
- /*
- * The sk_lock has mutex_lock() semantics here:
- */
- mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
- local_bh_enable();
+ spin_unlock_bh(&sk->sk_lock.slock);
}
EXPORT_SYMBOL(lock_sock_nested);
@@ -3212,42 +3230,37 @@ void release_sock(struct sock *sk)
}
EXPORT_SYMBOL(release_sock);
-/**
- * lock_sock_fast - fast version of lock_sock
- * @sk: socket
- *
- * This version should be used for very small section, where process wont block
- * return false if fast path is taken:
- *
- * sk_lock.slock locked, owned = 0, BH disabled
- *
- * return true if slow path is taken:
- *
- * sk_lock.slock unlocked, owned = 1, BH enabled
- */
-bool lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
+bool __lock_sock_fast(struct sock *sk) __acquires(&sk->sk_lock.slock)
{
might_sleep();
spin_lock_bh(&sk->sk_lock.slock);
- if (!sk->sk_lock.owned)
+ if (!sk->sk_lock.owned) {
/*
- * Note : We must disable BH
+ * Fast path return with bottom halves disabled and
+ * sock::sk_lock.slock held.
+ *
+ * The 'mutex' is not contended and holding
+ * sock::sk_lock.slock prevents all other lockers to
+ * proceed so the corresponding unlock_sock_fast() can
+ * avoid the slow path of release_sock() completely and
+ * just release slock.
+ *
+ * From a semantical POV this is equivalent to 'acquiring'
+ * the 'mutex', hence the corresponding lockdep
+ * mutex_release() has to happen in the fast path of
+ * unlock_sock_fast().
*/
return false;
+ }
__lock_sock(sk);
sk->sk_lock.owned = 1;
- spin_unlock(&sk->sk_lock.slock);
- /*
- * The sk_lock has mutex_lock() semantics here:
- */
- mutex_acquire(&sk->sk_lock.dep_map, 0, 0, _RET_IP_);
__acquire(&sk->sk_lock.slock);
- local_bh_enable();
+ spin_unlock_bh(&sk->sk_lock.slock);
return true;
}
-EXPORT_SYMBOL(lock_sock_fast);
+EXPORT_SYMBOL(__lock_sock_fast);
int sock_gettstamp(struct socket *sock, void __user *userstamp,
bool timeval, bool time32)
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index eef13cd20f19..b29262eee00b 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -429,6 +429,7 @@ static int dsa_port_setup(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
bool dsa_port_link_registered = false;
+ struct dsa_switch *ds = dp->ds;
bool dsa_port_enabled = false;
int err = 0;
@@ -438,6 +439,12 @@ static int dsa_port_setup(struct dsa_port *dp)
INIT_LIST_HEAD(&dp->fdbs);
INIT_LIST_HEAD(&dp->mdbs);
+ if (ds->ops->port_setup) {
+ err = ds->ops->port_setup(ds, dp->index);
+ if (err)
+ return err;
+ }
+
switch (dp->type) {
case DSA_PORT_TYPE_UNUSED:
dsa_port_disable(dp);
@@ -480,8 +487,11 @@ static int dsa_port_setup(struct dsa_port *dp)
dsa_port_disable(dp);
if (err && dsa_port_link_registered)
dsa_port_link_unregister_of(dp);
- if (err)
+ if (err) {
+ if (ds->ops->port_teardown)
+ ds->ops->port_teardown(ds, dp->index);
return err;
+ }
dp->setup = true;
@@ -533,11 +543,15 @@ static int dsa_port_devlink_setup(struct dsa_port *dp)
static void dsa_port_teardown(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
+ struct dsa_switch *ds = dp->ds;
struct dsa_mac_addr *a, *tmp;
if (!dp->setup)
return;
+ if (ds->ops->port_teardown)
+ ds->ops->port_teardown(ds, dp->index);
+
devlink_port_type_clear(dlp);
switch (dp->type) {
@@ -581,6 +595,36 @@ static void dsa_port_devlink_teardown(struct dsa_port *dp)
dp->devlink_port_setup = false;
}
+/* Destroy the current devlink port, and create a new one which has the UNUSED
+ * flavour. At this point, any call to ds->ops->port_setup has been already
+ * balanced out by a call to ds->ops->port_teardown, so we know that any
+ * devlink port regions the driver had are now unregistered. We then call its
+ * ds->ops->port_setup again, in order for the driver to re-create them on the
+ * new devlink port.
+ */
+static int dsa_port_reinit_as_unused(struct dsa_port *dp)
+{
+ struct dsa_switch *ds = dp->ds;
+ int err;
+
+ dsa_port_devlink_teardown(dp);
+ dp->type = DSA_PORT_TYPE_UNUSED;
+ err = dsa_port_devlink_setup(dp);
+ if (err)
+ return err;
+
+ if (ds->ops->port_setup) {
+ /* On error, leave the devlink port registered,
+ * dsa_switch_teardown will clean it up later.
+ */
+ err = ds->ops->port_setup(ds, dp->index);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int dsa_devlink_info_get(struct devlink *dl,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -836,7 +880,7 @@ static int dsa_switch_setup(struct dsa_switch *ds)
devlink_params_publish(ds->devlink);
if (!ds->slave_mii_bus && ds->ops->phy_read) {
- ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus) {
err = -ENOMEM;
goto teardown;
@@ -846,13 +890,16 @@ static int dsa_switch_setup(struct dsa_switch *ds)
err = mdiobus_register(ds->slave_mii_bus);
if (err < 0)
- goto teardown;
+ goto free_slave_mii_bus;
}
ds->setup = true;
return 0;
+free_slave_mii_bus:
+ if (ds->slave_mii_bus && ds->ops->phy_read)
+ mdiobus_free(ds->slave_mii_bus);
teardown:
if (ds->ops->teardown)
ds->ops->teardown(ds);
@@ -877,8 +924,11 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
if (!ds->setup)
return;
- if (ds->slave_mii_bus && ds->ops->phy_read)
+ if (ds->slave_mii_bus && ds->ops->phy_read) {
mdiobus_unregister(ds->slave_mii_bus);
+ mdiobus_free(ds->slave_mii_bus);
+ ds->slave_mii_bus = NULL;
+ }
dsa_switch_unregister_notifier(ds);
@@ -938,12 +988,9 @@ static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
list_for_each_entry(dp, &dst->ports, list) {
err = dsa_port_setup(dp);
if (err) {
- dsa_port_devlink_teardown(dp);
- dp->type = DSA_PORT_TYPE_UNUSED;
- err = dsa_port_devlink_setup(dp);
+ err = dsa_port_reinit_as_unused(dp);
if (err)
goto teardown;
- continue;
}
}
@@ -1048,6 +1095,7 @@ static int dsa_tree_setup(struct dsa_switch_tree *dst)
teardown_master:
dsa_tree_teardown_master(dst);
teardown_switches:
+ dsa_tree_teardown_ports(dst);
dsa_tree_teardown_switches(dst);
teardown_cpu_ports:
dsa_tree_teardown_cpu_ports(dst);
@@ -1562,3 +1610,53 @@ void dsa_unregister_switch(struct dsa_switch *ds)
mutex_unlock(&dsa2_mutex);
}
EXPORT_SYMBOL_GPL(dsa_unregister_switch);
+
+/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
+ * blocking that operation from completion, due to the dev_hold taken inside
+ * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
+ * the DSA master, so that the system can reboot successfully.
+ */
+void dsa_switch_shutdown(struct dsa_switch *ds)
+{
+ struct net_device *master, *slave_dev;
+ LIST_HEAD(unregister_list);
+ struct dsa_port *dp;
+
+ mutex_lock(&dsa2_mutex);
+ rtnl_lock();
+
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (dp->ds != ds)
+ continue;
+
+ if (!dsa_port_is_user(dp))
+ continue;
+
+ master = dp->cpu_dp->master;
+ slave_dev = dp->slave;
+
+ netdev_upper_dev_unlink(master, slave_dev);
+ /* Just unlinking ourselves as uppers of the master is not
+ * sufficient. When the master net device unregisters, that will
+ * also call dev_close, which we will catch as NETDEV_GOING_DOWN
+ * and trigger a dev_close on our own devices (dsa_slave_close).
+ * In turn, that will call dev_mc_unsync on the master's net
+ * device. If the master is also a DSA switch port, this will
+ * trigger dsa_slave_set_rx_mode which will call dev_mc_sync on
+ * its own master. Lockdep will complain about the fact that
+ * all cascaded masters have the same dsa_master_addr_list_lock_key,
+ * which it normally would not do if the cascaded masters would
+ * be in a proper upper/lower relationship, which we've just
+ * destroyed.
+ * To suppress the lockdep warnings, let's actually unregister
+ * the DSA slave interfaces too, to avoid the nonsensical
+ * multicast address list synchronization on shutdown.
+ */
+ unregister_netdevice_queue(slave_dev, &unregister_list);
+ }
+ unregister_netdevice_many(&unregister_list);
+
+ rtnl_unlock();
+ mutex_unlock(&dsa2_mutex);
+}
+EXPORT_SYMBOL_GPL(dsa_switch_shutdown);
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index d37ab98e7fe1..8025ed778d33 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2019 NXP Semiconductors
+/* Copyright 2019 NXP
*/
#include <linux/dsa/ocelot.h>
#include <soc/mscc/ocelot.h>
diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c
index 3038a257ba05..59072930cb02 100644
--- a/net/dsa/tag_ocelot_8021q.c
+++ b/net/dsa/tag_ocelot_8021q.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright 2020-2021 NXP Semiconductors
+/* Copyright 2020-2021 NXP
*
* An implementation of the software-defined tag_8021q.c tagger format, which
* also preserves full functionality under a vlan_filtering bridge. It does
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index b42c429cebbe..3364cb9c67e0 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1661,7 +1661,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info);
#if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
- int nh_weight, u8 rt_family)
+ int nh_weight, u8 rt_family, u32 nh_tclassid)
{
const struct net_device *dev = nhc->nhc_dev;
struct rtnexthop *rtnh;
@@ -1679,6 +1679,9 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
rtnh->rtnh_flags = flags;
+ if (nh_tclassid && nla_put_u32(skb, RTA_FLOW, nh_tclassid))
+ goto nla_put_failure;
+
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
@@ -1706,14 +1709,13 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
}
for_nexthops(fi) {
- if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
- AF_INET) < 0)
- goto nla_put_failure;
+ u32 nh_tclassid = 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
- if (nh->nh_tclassid &&
- nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
- goto nla_put_failure;
+ nh_tclassid = nh->nh_tclassid;
#endif
+ if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
+ AF_INET, nh_tclassid) < 0)
+ goto nla_put_failure;
} endfor_nexthops(fi);
mp_end:
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index b88e0f36cd05..8265c6765705 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -42,7 +42,7 @@ iptable_raw_hook(void *priv, struct sk_buff *skb,
static struct nf_hook_ops *rawtable_ops __read_mostly;
-static int __net_init iptable_raw_table_init(struct net *net)
+static int iptable_raw_table_init(struct net *net)
{
struct ipt_replace *repl;
const struct xt_table *table = &packet_raw;
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 75ca4b6e484f..9e8100728d46 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -1982,6 +1982,8 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old,
rcu_assign_pointer(old->nh_grp, newg);
if (newg->resilient) {
+ /* Make sure concurrent readers are not using 'oldg' anymore. */
+ synchronize_net();
rcu_assign_pointer(oldg->res_table, tmp_table);
rcu_assign_pointer(oldg->spare->res_table, tmp_table);
}
@@ -3565,6 +3567,7 @@ static struct notifier_block nh_netdev_notifier = {
};
static int nexthops_dump(struct net *net, struct notifier_block *nb,
+ enum nexthop_event_type event_type,
struct netlink_ext_ack *extack)
{
struct rb_root *root = &net->nexthop.rb_root;
@@ -3575,8 +3578,7 @@ static int nexthops_dump(struct net *net, struct notifier_block *nb,
struct nexthop *nh;
nh = rb_entry(node, struct nexthop, rb_node);
- err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
- extack);
+ err = call_nexthop_notifier(nb, net, event_type, nh, extack);
if (err)
break;
}
@@ -3590,7 +3592,7 @@ int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
int err;
rtnl_lock();
- err = nexthops_dump(net, nb, extack);
+ err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
if (err)
goto unlock;
err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
@@ -3603,8 +3605,17 @@ EXPORT_SYMBOL(register_nexthop_notifier);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
- return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
- nb);
+ int err;
+
+ rtnl_lock();
+ err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
+ nb);
+ if (err)
+ goto unlock;
+ nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
+unlock:
+ rtnl_unlock();
+ return err;
}
EXPORT_SYMBOL(unregister_nexthop_notifier);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8851c9463b4b..2a7825a5b842 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1053,7 +1053,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
__be16 dport;
u8 tos;
int err, is_udplite = IS_UDPLITE(sk);
- int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
+ int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
struct sk_buff *skb;
struct ip_options_data opt_copy;
@@ -1361,7 +1361,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset,
}
up->len += size;
- if (!(up->corkflag || (flags&MSG_MORE)))
+ if (!(READ_ONCE(up->corkflag) || (flags&MSG_MORE)))
ret = udp_push_pending_frames(sk);
if (!ret)
ret = size;
@@ -2662,9 +2662,9 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
switch (optname) {
case UDP_CORK:
if (val != 0) {
- up->corkflag = 1;
+ WRITE_ONCE(up->corkflag, 1);
} else {
- up->corkflag = 0;
+ WRITE_ONCE(up->corkflag, 0);
lock_sock(sk);
push_pending_frames(sk);
release_sock(sk);
@@ -2787,7 +2787,7 @@ int udp_lib_getsockopt(struct sock *sk, int level, int optname,
switch (optname) {
case UDP_CORK:
- val = up->corkflag;
+ val = READ_ONCE(up->corkflag);
break;
case UDP_ENCAP:
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index de2cf3943b91..a579ea14a69b 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -273,6 +273,7 @@ ip6t_do_table(struct sk_buff *skb,
* things we don't know, ie. tcp syn flag or ports). If the
* rule is also a fragment-specific rule, non-fragments won't
* match it. */
+ acpar.fragoff = 0;
acpar.hotdrop = false;
acpar.state = state;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index dbc224023977..9b9ef09382ab 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5681,14 +5681,15 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
goto nla_put_failure;
if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
- rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
+ rt->fib6_nh->fib_nh_weight, AF_INET6,
+ 0) < 0)
goto nla_put_failure;
list_for_each_entry_safe(sibling, next_sibling,
&rt->fib6_siblings, fib6_siblings) {
if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
sibling->fib6_nh->fib_nh_weight,
- AF_INET6) < 0)
+ AF_INET6, 0) < 0)
goto nla_put_failure;
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index ea53847b5b7e..e505bb007e9f 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1303,7 +1303,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
int addr_len = msg->msg_namelen;
bool connected = false;
int ulen = len;
- int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
+ int corkreq = READ_ONCE(up->corkflag) || msg->msg_flags&MSG_MORE;
int err;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index efbefcbac3ac..7cab1cf09bf1 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -60,7 +60,10 @@ static struct mesh_table *mesh_table_alloc(void)
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
spin_lock_init(&newtbl->walk_lock);
- rhashtable_init(&newtbl->rhead, &mesh_rht_params);
+ if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) {
+ kfree(newtbl);
+ return NULL;
+ }
return newtbl;
}
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 204830a55240..3fbd0b9ff913 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -2,6 +2,7 @@
/*
* Copyright 2012-2013, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
* Copyright 2012-2013, cozybit Inc.
+ * Copyright (C) 2021 Intel Corporation
*/
#include "mesh.h"
@@ -588,7 +589,7 @@ void ieee80211_mps_frame_release(struct sta_info *sta,
/* only transmit to PS STA with announced, non-zero awake window */
if (test_sta_flag(sta, WLAN_STA_PS_STA) &&
- (!elems->awake_window || !le16_to_cpu(*elems->awake_window)))
+ (!elems->awake_window || !get_unaligned_le16(elems->awake_window)))
return;
if (!test_sta_flag(sta, WLAN_STA_MPSP_OWNER))
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index e5935e3d7a07..8c6416129d5b 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -392,10 +392,6 @@ static bool rate_control_send_low(struct ieee80211_sta *pubsta,
int mcast_rate;
bool use_basicrate = false;
- if (ieee80211_is_tx_data(txrc->skb) &&
- info->flags & IEEE80211_TX_CTL_NO_ACK)
- return false;
-
if (!pubsta || rc_no_data_or_no_ack_use_min(txrc)) {
__rate_control_send_low(txrc->hw, sband, pubsta, info,
txrc->rate_idx_mask);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 99ed68f7dc36..c4071b015c18 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -4131,7 +4131,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
if (!bssid)
return false;
if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
- ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
+ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) ||
+ !is_valid_ether_addr(hdr->addr2))
return false;
if (ieee80211_is_beacon(hdr->frame_control))
return true;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 2d1193ed3eb5..8921088a5df6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2209,7 +2209,11 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
}
vht_mcs = iterator.this_arg[4] >> 4;
+ if (vht_mcs > 11)
+ vht_mcs = 0;
vht_nss = iterator.this_arg[4] & 0xF;
+ if (!vht_nss || vht_nss > 8)
+ vht_nss = 1;
break;
/*
@@ -3380,6 +3384,14 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
if (!ieee80211_amsdu_prepare_head(sdata, fast_tx, head))
goto out;
+ /* If n == 2, the "while (*frag_tail)" loop above didn't execute
+ * and frag_tail should be &skb_shinfo(head)->frag_list.
+ * However, ieee80211_amsdu_prepare_head() can reallocate it.
+ * Reload frag_tail to have it pointing to the correct place.
+ */
+ if (n == 2)
+ frag_tail = &skb_shinfo(head)->frag_list;
+
/*
* Pad out the previous subframe to a multiple of 4 by adding the
* padding to the next one, that's being added. Note that head->len
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index bca47fad5a16..4eed23e27610 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -520,6 +520,9 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
return RX_DROP_UNUSABLE;
}
+ /* reload hdr - skb might have been reallocated */
+ hdr = (void *)rx->skb->data;
+
data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE;
@@ -749,6 +752,9 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
}
+ /* reload hdr - skb might have been reallocated */
+ hdr = (void *)rx->skb->data;
+
data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
if (!rx->sta || data_len < 0)
return RX_DROP_UNUSABLE;
diff --git a/net/mptcp/mptcp_diag.c b/net/mptcp/mptcp_diag.c
index f48eb6315bbb..292374fb0779 100644
--- a/net/mptcp/mptcp_diag.c
+++ b/net/mptcp/mptcp_diag.c
@@ -36,7 +36,7 @@ static int mptcp_diag_dump_one(struct netlink_callback *cb,
struct sock *sk;
net = sock_net(in_skb->sk);
- msk = mptcp_token_get_sock(req->id.idiag_cookie[0]);
+ msk = mptcp_token_get_sock(net, req->id.idiag_cookie[0]);
if (!msk)
goto out_nosk;
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index c4f9a5ce3815..050eea231528 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -1718,9 +1718,7 @@ static int mptcp_nl_cmd_set_flags(struct sk_buff *skb, struct genl_info *info)
list_for_each_entry(entry, &pernet->local_addr_list, list) {
if (addresses_equal(&entry->addr, &addr.addr, true)) {
- ret = mptcp_nl_addr_backup(net, &entry->addr, bkup);
- if (ret)
- return ret;
+ mptcp_nl_addr_backup(net, &entry->addr, bkup);
if (bkup)
entry->flags |= MPTCP_PM_ADDR_FLAG_BACKUP;
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 2602f1386160..e5df0b5971c8 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1316,7 +1316,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
goto alloc_skb;
}
- must_collapse = (info->size_goal - skb->len > 0) &&
+ must_collapse = (info->size_goal > skb->len) &&
(skb_shinfo(skb)->nr_frags < sysctl_max_skb_frags);
if (must_collapse) {
size_bias = skb->len;
@@ -1325,7 +1325,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
}
alloc_skb:
- if (!must_collapse && !ssk->sk_tx_skb_cache &&
+ if (!must_collapse &&
!mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held))
return 0;
@@ -2735,7 +2735,7 @@ cleanup:
inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32;
mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
- bool slow = lock_sock_fast(ssk);
+ bool slow = lock_sock_fast_nested(ssk);
sock_orphan(ssk);
unlock_sock_fast(ssk, slow);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index d3e6fd1615f1..dc984676c5eb 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -709,7 +709,7 @@ int mptcp_token_new_connect(struct sock *sk);
void mptcp_token_accept(struct mptcp_subflow_request_sock *r,
struct mptcp_sock *msk);
bool mptcp_token_exists(u32 token);
-struct mptcp_sock *mptcp_token_get_sock(u32 token);
+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token);
struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot,
long *s_num);
void mptcp_token_destroy(struct mptcp_sock *msk);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 1de7ce883c37..6172f380dfb7 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -86,7 +86,7 @@ static struct mptcp_sock *subflow_token_join_request(struct request_sock *req)
struct mptcp_sock *msk;
int local_id;
- msk = mptcp_token_get_sock(subflow_req->token);
+ msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
if (!msk) {
SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINNOTOKEN);
return NULL;
diff --git a/net/mptcp/syncookies.c b/net/mptcp/syncookies.c
index 37127781aee9..7f22526346a7 100644
--- a/net/mptcp/syncookies.c
+++ b/net/mptcp/syncookies.c
@@ -108,18 +108,12 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
e->valid = 0;
- msk = mptcp_token_get_sock(e->token);
+ msk = mptcp_token_get_sock(net, e->token);
if (!msk) {
spin_unlock_bh(&join_entry_locks[i]);
return false;
}
- /* If this fails, the token got re-used in the mean time by another
- * mptcp socket in a different netns, i.e. entry is outdated.
- */
- if (!net_eq(sock_net((struct sock *)msk), net))
- goto err_put;
-
subflow_req->remote_nonce = e->remote_nonce;
subflow_req->local_nonce = e->local_nonce;
subflow_req->backup = e->backup;
@@ -128,11 +122,6 @@ bool mptcp_token_join_cookie_init_state(struct mptcp_subflow_request_sock *subfl
subflow_req->msk = msk;
spin_unlock_bh(&join_entry_locks[i]);
return true;
-
-err_put:
- spin_unlock_bh(&join_entry_locks[i]);
- sock_put((struct sock *)msk);
- return false;
}
void __init mptcp_join_cookie_init(void)
diff --git a/net/mptcp/token.c b/net/mptcp/token.c
index a98e554b034f..e581b341c5be 100644
--- a/net/mptcp/token.c
+++ b/net/mptcp/token.c
@@ -231,6 +231,7 @@ found:
/**
* mptcp_token_get_sock - retrieve mptcp connection sock using its token
+ * @net: restrict to this namespace
* @token: token of the mptcp connection to retrieve
*
* This function returns the mptcp connection structure with the given token.
@@ -238,7 +239,7 @@ found:
*
* returns NULL if no connection with the given token value exists.
*/
-struct mptcp_sock *mptcp_token_get_sock(u32 token)
+struct mptcp_sock *mptcp_token_get_sock(struct net *net, u32 token)
{
struct hlist_nulls_node *pos;
struct token_bucket *bucket;
@@ -251,11 +252,15 @@ struct mptcp_sock *mptcp_token_get_sock(u32 token)
again:
sk_nulls_for_each_rcu(sk, pos, &bucket->msk_chain) {
msk = mptcp_sk(sk);
- if (READ_ONCE(msk->token) != token)
+ if (READ_ONCE(msk->token) != token ||
+ !net_eq(sock_net(sk), net))
continue;
+
if (!refcount_inc_not_zero(&sk->sk_refcnt))
goto not_found;
- if (READ_ONCE(msk->token) != token) {
+
+ if (READ_ONCE(msk->token) != token ||
+ !net_eq(sock_net(sk), net)) {
sock_put(sk);
goto again;
}
diff --git a/net/mptcp/token_test.c b/net/mptcp/token_test.c
index e1bd6f0a0676..5d984bec1cd8 100644
--- a/net/mptcp/token_test.c
+++ b/net/mptcp/token_test.c
@@ -11,6 +11,7 @@ static struct mptcp_subflow_request_sock *build_req_sock(struct kunit *test)
GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, req);
mptcp_token_init_request((struct request_sock *)req);
+ sock_net_set((struct sock *)req, &init_net);
return req;
}
@@ -22,7 +23,7 @@ static void mptcp_token_test_req_basic(struct kunit *test)
KUNIT_ASSERT_EQ(test, 0,
mptcp_token_new_request((struct request_sock *)req));
KUNIT_EXPECT_NE(test, 0, (int)req->token);
- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(req->token));
+ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, req->token));
/* cleanup */
mptcp_token_destroy_request((struct request_sock *)req);
@@ -55,6 +56,7 @@ static struct mptcp_sock *build_msk(struct kunit *test)
msk = kunit_kzalloc(test, sizeof(struct mptcp_sock), GFP_USER);
KUNIT_EXPECT_NOT_ERR_OR_NULL(test, msk);
refcount_set(&((struct sock *)msk)->sk_refcnt, 1);
+ sock_net_set((struct sock *)msk, &init_net);
return msk;
}
@@ -74,11 +76,11 @@ static void mptcp_token_test_msk_basic(struct kunit *test)
mptcp_token_new_connect((struct sock *)icsk));
KUNIT_EXPECT_NE(test, 0, (int)ctx->token);
KUNIT_EXPECT_EQ(test, ctx->token, msk->token);
- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(ctx->token));
+ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, ctx->token));
KUNIT_EXPECT_EQ(test, 2, (int)refcount_read(&sk->sk_refcnt));
mptcp_token_destroy(msk);
- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(ctx->token));
+ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, ctx->token));
}
static void mptcp_token_test_accept(struct kunit *test)
@@ -90,11 +92,11 @@ static void mptcp_token_test_accept(struct kunit *test)
mptcp_token_new_request((struct request_sock *)req));
msk->token = req->token;
mptcp_token_accept(req, msk);
- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
+ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
/* this is now a no-op */
mptcp_token_destroy_request((struct request_sock *)req);
- KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(msk->token));
+ KUNIT_EXPECT_PTR_EQ(test, msk, mptcp_token_get_sock(&init_net, msk->token));
/* cleanup */
mptcp_token_destroy(msk);
@@ -116,7 +118,7 @@ static void mptcp_token_test_destroyed(struct kunit *test)
/* simulate race on removal */
refcount_set(&sk->sk_refcnt, 0);
- KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(msk->token));
+ KUNIT_EXPECT_PTR_EQ(test, null_msk, mptcp_token_get_sock(&init_net, msk->token));
/* cleanup */
mptcp_token_destroy(msk);
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 6186358eac7c..6e391308431d 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -130,11 +130,11 @@ htable_size(u8 hbits)
{
size_t hsize;
- /* We must fit both into u32 in jhash and size_t */
+ /* We must fit both into u32 in jhash and INT_MAX in kvmalloc_node() */
if (hbits > 31)
return 0;
hsize = jhash_size(hbits);
- if ((((size_t)-1) - sizeof(struct htable)) / sizeof(struct hbucket *)
+ if ((INT_MAX - sizeof(struct htable)) / sizeof(struct hbucket *)
< hsize)
return 0;
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index c100c6b112c8..2c467c422dc6 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -1468,6 +1468,10 @@ int __init ip_vs_conn_init(void)
int idx;
/* Compute size and mask */
+ if (ip_vs_conn_tab_bits < 8 || ip_vs_conn_tab_bits > 20) {
+ pr_info("conn_tab_bits not in [8, 20]. Using default value\n");
+ ip_vs_conn_tab_bits = CONFIG_IP_VS_TAB_BITS;
+ }
ip_vs_conn_tab_size = 1 << ip_vs_conn_tab_bits;
ip_vs_conn_tab_mask = ip_vs_conn_tab_size - 1;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 94e18fb9690d..770a63103c7a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -74,10 +74,14 @@ static __read_mostly struct kmem_cache *nf_conntrack_cachep;
static DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;
+/* serialize hash resizes and nf_ct_iterate_cleanup */
+static DEFINE_MUTEX(nf_conntrack_mutex);
+
#define GC_SCAN_INTERVAL (120u * HZ)
#define GC_SCAN_MAX_DURATION msecs_to_jiffies(10)
-#define MAX_CHAINLEN 64u
+#define MIN_CHAINLEN 8u
+#define MAX_CHAINLEN (32u - MIN_CHAINLEN)
static struct conntrack_gc_work conntrack_gc_work;
@@ -188,11 +192,13 @@ seqcount_spinlock_t nf_conntrack_generation __read_mostly;
static siphash_key_t nf_conntrack_hash_rnd __read_mostly;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
+ unsigned int zoneid,
const struct net *net)
{
struct {
struct nf_conntrack_man src;
union nf_inet_addr dst_addr;
+ unsigned int zone;
u32 net_mix;
u16 dport;
u16 proto;
@@ -205,6 +211,7 @@ static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple,
/* The direction must be ignored, so handle usable members manually. */
combined.src = tuple->src;
combined.dst_addr = tuple->dst.u3;
+ combined.zone = zoneid;
combined.net_mix = net_hash_mix(net);
combined.dport = (__force __u16)tuple->dst.u.all;
combined.proto = tuple->dst.protonum;
@@ -219,15 +226,17 @@ static u32 scale_hash(u32 hash)
static u32 __hash_conntrack(const struct net *net,
const struct nf_conntrack_tuple *tuple,
+ unsigned int zoneid,
unsigned int size)
{
- return reciprocal_scale(hash_conntrack_raw(tuple, net), size);
+ return reciprocal_scale(hash_conntrack_raw(tuple, zoneid, net), size);
}
static u32 hash_conntrack(const struct net *net,
- const struct nf_conntrack_tuple *tuple)
+ const struct nf_conntrack_tuple *tuple,
+ unsigned int zoneid)
{
- return scale_hash(hash_conntrack_raw(tuple, net));
+ return scale_hash(hash_conntrack_raw(tuple, zoneid, net));
}
static bool nf_ct_get_tuple_ports(const struct sk_buff *skb,
@@ -650,9 +659,11 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
reply_hash = hash_conntrack(net,
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
clean_from_lists(ct);
@@ -819,8 +830,20 @@ struct nf_conntrack_tuple_hash *
nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
const struct nf_conntrack_tuple *tuple)
{
- return __nf_conntrack_find_get(net, zone, tuple,
- hash_conntrack_raw(tuple, net));
+ unsigned int rid, zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
+ struct nf_conntrack_tuple_hash *thash;
+
+ thash = __nf_conntrack_find_get(net, zone, tuple,
+ hash_conntrack_raw(tuple, zone_id, net));
+
+ if (thash)
+ return thash;
+
+ rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
+ if (rid != zone_id)
+ return __nf_conntrack_find_get(net, zone, tuple,
+ hash_conntrack_raw(tuple, rid, net));
+ return thash;
}
EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
@@ -842,6 +865,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct hlist_nulls_node *n;
+ unsigned int max_chainlen;
unsigned int chainlen = 0;
unsigned int sequence;
int err = -EEXIST;
@@ -852,18 +876,22 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
do {
sequence = read_seqcount_begin(&nf_conntrack_generation);
hash = hash_conntrack(net,
- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_ORIGINAL));
reply_hash = hash_conntrack(net,
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
+ max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
+
/* See if there's one in the list already, including reverse */
hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) {
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
zone, net))
goto out;
- if (chainlen++ > MAX_CHAINLEN)
+ if (chainlen++ > max_chainlen)
goto chaintoolong;
}
@@ -873,7 +901,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
zone, net))
goto out;
- if (chainlen++ > MAX_CHAINLEN)
+ if (chainlen++ > max_chainlen)
goto chaintoolong;
}
@@ -1103,8 +1131,8 @@ drop:
int
__nf_conntrack_confirm(struct sk_buff *skb)
{
+ unsigned int chainlen = 0, sequence, max_chainlen;
const struct nf_conntrack_zone *zone;
- unsigned int chainlen = 0, sequence;
unsigned int hash, reply_hash;
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
@@ -1133,8 +1161,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
hash = scale_hash(hash);
reply_hash = hash_conntrack(net,
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
-
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ nf_ct_zone_id(nf_ct_zone(ct), IP_CT_DIR_REPLY));
} while (nf_conntrack_double_lock(net, hash, reply_hash, sequence));
/* We're not in hash table, and we refuse to set up related
@@ -1168,6 +1196,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
goto dying;
}
+ max_chainlen = MIN_CHAINLEN + prandom_u32_max(MAX_CHAINLEN);
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
not in the hash. If there is, we lost race. */
@@ -1175,7 +1204,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
zone, net))
goto out;
- if (chainlen++ > MAX_CHAINLEN)
+ if (chainlen++ > max_chainlen)
goto chaintoolong;
}
@@ -1184,7 +1213,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
zone, net))
goto out;
- if (chainlen++ > MAX_CHAINLEN) {
+ if (chainlen++ > max_chainlen) {
chaintoolong:
nf_ct_add_to_dying_list(ct);
NF_CT_STAT_INC(net, chaintoolong);
@@ -1246,7 +1275,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
rcu_read_lock();
begin:
nf_conntrack_get_ht(&ct_hash, &hsize);
- hash = __hash_conntrack(net, tuple, hsize);
+ hash = __hash_conntrack(net, tuple, nf_ct_zone_id(zone, IP_CT_DIR_REPLY), hsize);
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[hash], hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
@@ -1687,8 +1716,8 @@ resolve_normal_ct(struct nf_conn *tmpl,
struct nf_conntrack_tuple_hash *h;
enum ip_conntrack_info ctinfo;
struct nf_conntrack_zone tmp;
+ u32 hash, zone_id, rid;
struct nf_conn *ct;
- u32 hash;
if (!nf_ct_get_tuple(skb, skb_network_offset(skb),
dataoff, state->pf, protonum, state->net,
@@ -1699,8 +1728,20 @@ resolve_normal_ct(struct nf_conn *tmpl,
/* look for tuple match */
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
- hash = hash_conntrack_raw(&tuple, state->net);
+
+ zone_id = nf_ct_zone_id(zone, IP_CT_DIR_ORIGINAL);
+ hash = hash_conntrack_raw(&tuple, zone_id, state->net);
h = __nf_conntrack_find_get(state->net, zone, &tuple, hash);
+
+ if (!h) {
+ rid = nf_ct_zone_id(zone, IP_CT_DIR_REPLY);
+ if (zone_id != rid) {
+ u32 tmp = hash_conntrack_raw(&tuple, rid, state->net);
+
+ h = __nf_conntrack_find_get(state->net, zone, &tuple, tmp);
+ }
+ }
+
if (!h) {
h = init_conntrack(state->net, tmpl, &tuple,
skb, dataoff, hash);
@@ -2225,28 +2266,31 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
spinlock_t *lockp;
for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+ struct hlist_nulls_head *hslot = &nf_conntrack_hash[*bucket];
+
+ if (hlist_nulls_empty(hslot))
+ continue;
+
lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
local_bh_disable();
nf_conntrack_lock(lockp);
- if (*bucket < nf_conntrack_htable_size) {
- hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
- if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
- continue;
- /* All nf_conn objects are added to hash table twice, one
- * for original direction tuple, once for the reply tuple.
- *
- * Exception: In the IPS_NAT_CLASH case, only the reply
- * tuple is added (the original tuple already existed for
- * a different object).
- *
- * We only need to call the iterator once for each
- * conntrack, so we just use the 'reply' direction
- * tuple while iterating.
- */
- ct = nf_ct_tuplehash_to_ctrack(h);
- if (iter(ct, data))
- goto found;
- }
+ hlist_nulls_for_each_entry(h, n, hslot, hnnode) {
+ if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
+ continue;
+ /* All nf_conn objects are added to hash table twice, one
+ * for original direction tuple, once for the reply tuple.
+ *
+ * Exception: In the IPS_NAT_CLASH case, only the reply
+ * tuple is added (the original tuple already existed for
+ * a different object).
+ *
+ * We only need to call the iterator once for each
+ * conntrack, so we just use the 'reply' direction
+ * tuple while iterating.
+ */
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (iter(ct, data))
+ goto found;
}
spin_unlock(lockp);
local_bh_enable();
@@ -2264,26 +2308,20 @@ found:
static void nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data),
void *data, u32 portid, int report)
{
- unsigned int bucket = 0, sequence;
+ unsigned int bucket = 0;
struct nf_conn *ct;
might_sleep();
- for (;;) {
- sequence = read_seqcount_begin(&nf_conntrack_generation);
-
- while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
- /* Time to push up daises... */
+ mutex_lock(&nf_conntrack_mutex);
+ while ((ct = get_next_corpse(iter, data, &bucket)) != NULL) {
+ /* Time to push up daises... */
- nf_ct_delete(ct, portid, report);
- nf_ct_put(ct);
- cond_resched();
- }
-
- if (!read_seqcount_retry(&nf_conntrack_generation, sequence))
- break;
- bucket = 0;
+ nf_ct_delete(ct, portid, report);
+ nf_ct_put(ct);
+ cond_resched();
}
+ mutex_unlock(&nf_conntrack_mutex);
}
struct iter_data {
@@ -2519,8 +2557,10 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
if (!hash)
return -ENOMEM;
+ mutex_lock(&nf_conntrack_mutex);
old_size = nf_conntrack_htable_size;
if (old_size == hashsize) {
+ mutex_unlock(&nf_conntrack_mutex);
kvfree(hash);
return 0;
}
@@ -2537,12 +2577,16 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
for (i = 0; i < nf_conntrack_htable_size; i++) {
while (!hlist_nulls_empty(&nf_conntrack_hash[i])) {
+ unsigned int zone_id;
+
h = hlist_nulls_entry(nf_conntrack_hash[i].first,
struct nf_conntrack_tuple_hash, hnnode);
ct = nf_ct_tuplehash_to_ctrack(h);
hlist_nulls_del_rcu(&h->hnnode);
+
+ zone_id = nf_ct_zone_id(nf_ct_zone(ct), NF_CT_DIRECTION(h));
bucket = __hash_conntrack(nf_ct_net(ct),
- &h->tuple, hashsize);
+ &h->tuple, zone_id, hashsize);
hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
}
}
@@ -2556,6 +2600,8 @@ int nf_conntrack_hash_resize(unsigned int hashsize)
nf_conntrack_all_unlock();
local_bh_enable();
+ mutex_unlock(&nf_conntrack_mutex);
+
synchronize_net();
kvfree(old_hash);
return 0;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 7008961f5cb0..273117683922 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -150,13 +150,16 @@ static void __nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl)
/* We keep an extra hash for each conntrack, for fast searching. */
static unsigned int
-hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
+hash_by_src(const struct net *net,
+ const struct nf_conntrack_zone *zone,
+ const struct nf_conntrack_tuple *tuple)
{
unsigned int hash;
struct {
struct nf_conntrack_man src;
u32 net_mix;
u32 protonum;
+ u32 zone;
} __aligned(SIPHASH_ALIGNMENT) combined;
get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
@@ -165,9 +168,13 @@ hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
/* Original src, to ensure we map it consistently if poss. */
combined.src = tuple->src;
- combined.net_mix = net_hash_mix(n);
+ combined.net_mix = net_hash_mix(net);
combined.protonum = tuple->dst.protonum;
+ /* Zone ID can be used provided its valid for both directions */
+ if (zone->dir == NF_CT_DEFAULT_ZONE_DIR)
+ combined.zone = zone->id;
+
hash = siphash(&combined, sizeof(combined), &nf_nat_hash_rnd);
return reciprocal_scale(hash, nf_nat_htable_size);
@@ -272,7 +279,7 @@ find_appropriate_src(struct net *net,
struct nf_conntrack_tuple *result,
const struct nf_nat_range2 *range)
{
- unsigned int h = hash_by_src(net, tuple);
+ unsigned int h = hash_by_src(net, zone, tuple);
const struct nf_conn *ct;
hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
@@ -619,7 +626,7 @@ nf_nat_setup_info(struct nf_conn *ct,
unsigned int srchash;
spinlock_t *lock;
- srchash = hash_by_src(net,
+ srchash = hash_by_src(net, nf_ct_zone(ct),
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
lock = &nf_nat_locks[srchash % CONNTRACK_LOCKS];
spin_lock_bh(lock);
@@ -788,7 +795,7 @@ static void __nf_nat_cleanup_conntrack(struct nf_conn *ct)
{
unsigned int h;
- h = hash_by_src(nf_ct_net(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ h = hash_by_src(nf_ct_net(ct), nf_ct_zone(ct), &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
spin_lock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
hlist_del_rcu(&ct->nat_bysource);
spin_unlock_bh(&nf_nat_locks[h % CONNTRACK_LOCKS]);
diff --git a/net/netfilter/nf_nat_masquerade.c b/net/netfilter/nf_nat_masquerade.c
index 8e8a65d46345..acd73f717a08 100644
--- a/net/netfilter/nf_nat_masquerade.c
+++ b/net/netfilter/nf_nat_masquerade.c
@@ -9,8 +9,19 @@
#include <net/netfilter/nf_nat_masquerade.h>
+struct masq_dev_work {
+ struct work_struct work;
+ struct net *net;
+ union nf_inet_addr addr;
+ int ifindex;
+ int (*iter)(struct nf_conn *i, void *data);
+};
+
+#define MAX_MASQ_WORKER_COUNT 16
+
static DEFINE_MUTEX(masq_mutex);
static unsigned int masq_refcnt __read_mostly;
+static atomic_t masq_worker_count __read_mostly;
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
@@ -63,13 +74,71 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4);
-static int device_cmp(struct nf_conn *i, void *ifindex)
+static void iterate_cleanup_work(struct work_struct *work)
+{
+ struct masq_dev_work *w;
+
+ w = container_of(work, struct masq_dev_work, work);
+
+ nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0);
+
+ put_net(w->net);
+ kfree(w);
+ atomic_dec(&masq_worker_count);
+ module_put(THIS_MODULE);
+}
+
+/* Iterate conntrack table in the background and remove conntrack entries
+ * that use the device/address being removed.
+ *
+ * In case too many work items have been queued already or memory allocation
+ * fails iteration is skipped, conntrack entries will time out eventually.
+ */
+static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr,
+ int ifindex,
+ int (*iter)(struct nf_conn *i, void *data),
+ gfp_t gfp_flags)
+{
+ struct masq_dev_work *w;
+
+ if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT)
+ return;
+
+ net = maybe_get_net(net);
+ if (!net)
+ return;
+
+ if (!try_module_get(THIS_MODULE))
+ goto err_module;
+
+ w = kzalloc(sizeof(*w), gfp_flags);
+ if (w) {
+ /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */
+ atomic_inc(&masq_worker_count);
+
+ INIT_WORK(&w->work, iterate_cleanup_work);
+ w->ifindex = ifindex;
+ w->net = net;
+ w->iter = iter;
+ if (addr)
+ w->addr = *addr;
+ schedule_work(&w->work);
+ return;
+ }
+
+ module_put(THIS_MODULE);
+ err_module:
+ put_net(net);
+}
+
+static int device_cmp(struct nf_conn *i, void *arg)
{
const struct nf_conn_nat *nat = nfct_nat(i);
+ const struct masq_dev_work *w = arg;
if (!nat)
return 0;
- return nat->masq_index == (int)(long)ifindex;
+ return nat->masq_index == w->ifindex;
}
static int masq_device_event(struct notifier_block *this,
@@ -85,8 +154,8 @@ static int masq_device_event(struct notifier_block *this,
* and forget them.
*/
- nf_ct_iterate_cleanup_net(net, device_cmp,
- (void *)(long)dev->ifindex, 0, 0);
+ nf_nat_masq_schedule(net, NULL, dev->ifindex,
+ device_cmp, GFP_KERNEL);
}
return NOTIFY_DONE;
@@ -94,35 +163,45 @@ static int masq_device_event(struct notifier_block *this,
static int inet_cmp(struct nf_conn *ct, void *ptr)
{
- struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
- struct net_device *dev = ifa->ifa_dev->dev;
struct nf_conntrack_tuple *tuple;
+ struct masq_dev_work *w = ptr;
- if (!device_cmp(ct, (void *)(long)dev->ifindex))
+ if (!device_cmp(ct, ptr))
return 0;
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
- return ifa->ifa_address == tuple->dst.u3.ip;
+ return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3);
}
static int masq_inet_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
- struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev;
- struct net *net = dev_net(idev->dev);
+ const struct in_ifaddr *ifa = ptr;
+ const struct in_device *idev;
+ const struct net_device *dev;
+ union nf_inet_addr addr;
+
+ if (event != NETDEV_DOWN)
+ return NOTIFY_DONE;
/* The masq_dev_notifier will catch the case of the device going
* down. So if the inetdev is dead and being destroyed we have
* no work to do. Otherwise this is an individual address removal
* and we have to perform the flush.
*/
+ idev = ifa->ifa_dev;
if (idev->dead)
return NOTIFY_DONE;
- if (event == NETDEV_DOWN)
- nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0);
+ memset(&addr, 0, sizeof(addr));
+
+ addr.ip = ifa->ifa_address;
+
+ dev = idev->dev;
+ nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex,
+ inet_cmp, GFP_KERNEL);
return NOTIFY_DONE;
}
@@ -136,8 +215,6 @@ static struct notifier_block masq_inet_notifier = {
};
#if IS_ENABLED(CONFIG_IPV6)
-static atomic_t v6_worker_count __read_mostly;
-
static int
nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
@@ -187,40 +264,6 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
}
EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6);
-struct masq_dev_work {
- struct work_struct work;
- struct net *net;
- struct in6_addr addr;
- int ifindex;
-};
-
-static int inet6_cmp(struct nf_conn *ct, void *work)
-{
- struct masq_dev_work *w = (struct masq_dev_work *)work;
- struct nf_conntrack_tuple *tuple;
-
- if (!device_cmp(ct, (void *)(long)w->ifindex))
- return 0;
-
- tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
-
- return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6);
-}
-
-static void iterate_cleanup_work(struct work_struct *work)
-{
- struct masq_dev_work *w;
-
- w = container_of(work, struct masq_dev_work, work);
-
- nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0);
-
- put_net(w->net);
- kfree(w);
- atomic_dec(&v6_worker_count);
- module_put(THIS_MODULE);
-}
-
/* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep).
*
* Defer it to the system workqueue.
@@ -233,36 +276,19 @@ static int masq_inet6_event(struct notifier_block *this,
{
struct inet6_ifaddr *ifa = ptr;
const struct net_device *dev;
- struct masq_dev_work *w;
- struct net *net;
+ union nf_inet_addr addr;
- if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16)
+ if (event != NETDEV_DOWN)
return NOTIFY_DONE;
dev = ifa->idev->dev;
- net = maybe_get_net(dev_net(dev));
- if (!net)
- return NOTIFY_DONE;
- if (!try_module_get(THIS_MODULE))
- goto err_module;
+ memset(&addr, 0, sizeof(addr));
- w = kmalloc(sizeof(*w), GFP_ATOMIC);
- if (w) {
- atomic_inc(&v6_worker_count);
-
- INIT_WORK(&w->work, iterate_cleanup_work);
- w->ifindex = dev->ifindex;
- w->net = net;
- w->addr = ifa->addr;
- schedule_work(&w->work);
+ addr.in6 = ifa->addr;
- return NOTIFY_DONE;
- }
-
- module_put(THIS_MODULE);
- err_module:
- put_net(net);
+ nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp,
+ GFP_ATOMIC);
return NOTIFY_DONE;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 081437dd75b7..b9546defdc28 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4336,7 +4336,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
if (ops->privsize != NULL)
size = ops->privsize(nla, &desc);
alloc_size = sizeof(*set) + size + udlen;
- if (alloc_size < size)
+ if (alloc_size < size || alloc_size > INT_MAX)
return -ENOMEM;
set = kvzalloc(alloc_size, GFP_KERNEL);
if (!set)
@@ -9599,7 +9599,6 @@ static void __nft_release_table(struct net *net, struct nft_table *table)
table->use--;
nf_tables_chain_destroy(&ctx);
}
- list_del(&table->list);
nf_tables_table_destroy(&ctx);
}
@@ -9612,6 +9611,8 @@ static void __nft_release_tables(struct net *net)
if (nft_table_has_owner(table))
continue;
+ list_del(&table->list);
+
__nft_release_table(net, table);
}
}
@@ -9619,31 +9620,38 @@ static void __nft_release_tables(struct net *net)
static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
+ struct nft_table *table, *to_delete[8];
struct nftables_pernet *nft_net;
struct netlink_notify *n = ptr;
- struct nft_table *table, *nt;
struct net *net = n->net;
- bool release = false;
+ unsigned int deleted;
+ bool restart = false;
if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER)
return NOTIFY_DONE;
nft_net = nft_pernet(net);
+ deleted = 0;
mutex_lock(&nft_net->commit_mutex);
+again:
list_for_each_entry(table, &nft_net->tables, list) {
if (nft_table_has_owner(table) &&
n->portid == table->nlpid) {
__nft_release_hook(net, table);
- release = true;
+ list_del_rcu(&table->list);
+ to_delete[deleted++] = table;
+ if (deleted >= ARRAY_SIZE(to_delete))
+ break;
}
}
- if (release) {
+ if (deleted) {
+ restart = deleted >= ARRAY_SIZE(to_delete);
synchronize_rcu();
- list_for_each_entry_safe(table, nt, &nft_net->tables, list) {
- if (nft_table_has_owner(table) &&
- n->portid == table->nlpid)
- __nft_release_table(net, table);
- }
+ while (deleted)
+ __nft_release_table(net, to_delete[--deleted]);
+
+ if (restart)
+ goto again;
}
mutex_unlock(&nft_net->commit_mutex);
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 272bcdb1392d..f69cc73c5813 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -19,6 +19,7 @@
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_arp/arp_tables.h>
#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_log.h>
/* Used for matches where *info is larger than X byte */
#define NFT_MATCH_LARGE_THRESH 192
@@ -257,8 +258,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
nft_compat_wait_for_destructors();
ret = xt_check_target(&par, size, proto, inv);
- if (ret < 0)
+ if (ret < 0) {
+ if (ret == -ENOENT) {
+ const char *modname = NULL;
+
+ if (strcmp(target->name, "LOG") == 0)
+ modname = "nf_log_syslog";
+ else if (strcmp(target->name, "NFLOG") == 0)
+ modname = "nfnetlink_log";
+
+ if (modname &&
+ nft_request_module(ctx->net, "%s", modname) == -EAGAIN)
+ return -EAGAIN;
+ }
+
return ret;
+ }
/* The standard target cannot be used */
if (!target->target)
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
index 2ff75f7637b0..f39244f9c0ed 100644
--- a/net/netfilter/xt_LOG.c
+++ b/net/netfilter/xt_LOG.c
@@ -44,6 +44,7 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
static int log_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_log_info *loginfo = par->targinfo;
+ int ret;
if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
return -EINVAL;
@@ -58,7 +59,14 @@ static int log_tg_check(const struct xt_tgchk_param *par)
return -EINVAL;
}
- return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+ if (ret != 0 && !par->nft_compat) {
+ request_module("%s", "nf_log_syslog");
+
+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+ }
+
+ return ret;
}
static void log_tg_destroy(const struct xt_tgdtor_param *par)
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index fb5793208059..e660c3710a10 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -42,13 +42,21 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
static int nflog_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_nflog_info *info = par->targinfo;
+ int ret;
if (info->flags & ~XT_NFLOG_MASK)
return -EINVAL;
if (info->prefix[sizeof(info->prefix) - 1] != '\0')
return -EINVAL;
- return nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+ if (ret != 0 && !par->nft_compat) {
+ request_module("%s", "nfnetlink_log");
+
+ ret = nf_logger_find_get(par->family, NF_LOG_TYPE_ULOG);
+ }
+
+ return ret;
}
static void nflog_tg_destroy(const struct xt_tgdtor_param *par)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 23b21253b3c3..eb6345a027e1 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -2188,18 +2188,24 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
arg->count = arg->skip;
+ rcu_read_lock();
idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
/* don't return filters that are being deleted */
if (!refcount_inc_not_zero(&f->refcnt))
continue;
+ rcu_read_unlock();
+
if (arg->fn(tp, f, arg) < 0) {
__fl_put(f);
arg->stop = 1;
+ rcu_read_lock();
break;
}
__fl_put(f);
arg->count++;
+ rcu_read_lock();
}
+ rcu_read_unlock();
arg->cookie = id;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 5e90e9b160e3..12f39a2dffd4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -513,6 +513,12 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt,
return stab;
}
+ if (s->size_log > STAB_SIZE_LOG_MAX ||
+ s->cell_log > STAB_SIZE_LOG_MAX) {
+ NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table");
+ return ERR_PTR(-EINVAL);
+ }
+
stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL);
if (!stab)
return ERR_PTR(-ENOMEM);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 5ef86fdb1176..1f1786021d9c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -702,7 +702,7 @@ static int sctp_rcv_ootb(struct sk_buff *skb)
ch = skb_header_pointer(skb, offset, sizeof(*ch), &_ch);
/* Break out if chunk length is less then minimal. */
- if (ntohs(ch->length) < sizeof(_ch))
+ if (!ch || ntohs(ch->length) < sizeof(_ch))
break;
ch_end = offset + SCTP_PAD4(ntohs(ch->length));
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index e286dafd6e88..6ec1ebe878ae 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -230,7 +230,8 @@ static int smc_clc_prfx_set(struct socket *clcsock,
goto out_rel;
}
/* get address to which the internal TCP socket is bound */
- kernel_getsockname(clcsock, (struct sockaddr *)&addrs);
+ if (kernel_getsockname(clcsock, (struct sockaddr *)&addrs) < 0)
+ goto out_rel;
/* analyze IP specific data of net_device belonging to TCP socket */
addr6 = (struct sockaddr_in6 *)&addrs;
rcu_read_lock();
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index af227b65669e..8280c938be80 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -1474,7 +1474,9 @@ static void smc_conn_abort_work(struct work_struct *work)
abort_work);
struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
+ lock_sock(&smc->sk);
smc_conn_kill(conn, true);
+ release_sock(&smc->sk);
sock_put(&smc->sk); /* sock_hold done by schedulers of abort_work */
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 92345c9bb60c..efac5989edb5 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -608,20 +608,42 @@ static void unix_release_sock(struct sock *sk, int embrion)
static void init_peercred(struct sock *sk)
{
- put_pid(sk->sk_peer_pid);
- if (sk->sk_peer_cred)
- put_cred(sk->sk_peer_cred);
+ const struct cred *old_cred;
+ struct pid *old_pid;
+
+ spin_lock(&sk->sk_peer_lock);
+ old_pid = sk->sk_peer_pid;
+ old_cred = sk->sk_peer_cred;
sk->sk_peer_pid = get_pid(task_tgid(current));
sk->sk_peer_cred = get_current_cred();
+ spin_unlock(&sk->sk_peer_lock);
+
+ put_pid(old_pid);
+ put_cred(old_cred);
}
static void copy_peercred(struct sock *sk, struct sock *peersk)
{
- put_pid(sk->sk_peer_pid);
- if (sk->sk_peer_cred)
- put_cred(sk->sk_peer_cred);
+ const struct cred *old_cred;
+ struct pid *old_pid;
+
+ if (sk < peersk) {
+ spin_lock(&sk->sk_peer_lock);
+ spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+ } else {
+ spin_lock(&peersk->sk_peer_lock);
+ spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
+ }
+ old_pid = sk->sk_peer_pid;
+ old_cred = sk->sk_peer_cred;
sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
+
+ spin_unlock(&sk->sk_peer_lock);
+ spin_unlock(&peersk->sk_peer_lock);
+
+ put_pid(old_pid);
+ put_cred(old_cred);
}
static int unix_listen(struct socket *sock, int backlog)
@@ -828,20 +850,25 @@ struct proto unix_stream_proto = {
static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
{
- struct sock *sk = NULL;
struct unix_sock *u;
+ struct sock *sk;
+ int err;
atomic_long_inc(&unix_nr_socks);
- if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
- goto out;
+ if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
+ err = -ENFILE;
+ goto err;
+ }
if (type == SOCK_STREAM)
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
else /*dgram and seqpacket */
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
- if (!sk)
- goto out;
+ if (!sk) {
+ err = -ENOMEM;
+ goto err;
+ }
sock_init_data(sock, sk);
@@ -861,20 +888,23 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
memset(&u->scm_stat, 0, sizeof(struct scm_stat));
unix_insert_socket(unix_sockets_unbound(sk), sk);
-out:
- if (sk == NULL)
- atomic_long_dec(&unix_nr_socks);
- else {
- local_bh_disable();
- sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
- local_bh_enable();
- }
+
+ local_bh_disable();
+ sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ local_bh_enable();
+
return sk;
+
+err:
+ atomic_long_dec(&unix_nr_socks);
+ return ERR_PTR(err);
}
static int unix_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
+ struct sock *sk;
+
if (protocol && protocol != PF_UNIX)
return -EPROTONOSUPPORT;
@@ -901,7 +931,11 @@ static int unix_create(struct net *net, struct socket *sock, int protocol,
return -ESOCKTNOSUPPORT;
}
- return unix_create1(net, sock, kern, sock->type) ? 0 : -ENOMEM;
+ sk = unix_create1(net, sock, kern, sock->type);
+ if (IS_ERR(sk))
+ return PTR_ERR(sk);
+
+ return 0;
}
static int unix_release(struct socket *sock)
@@ -1314,12 +1348,15 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
we will have to recheck all again in any case.
*/
- err = -ENOMEM;
-
/* create new sock for complete connection */
newsk = unix_create1(sock_net(sk), NULL, 0, sock->type);
- if (newsk == NULL)
+ if (IS_ERR(newsk)) {
+ err = PTR_ERR(newsk);
+ newsk = NULL;
goto out;
+ }
+
+ err = -ENOMEM;
/* Allocate skb for sending to listening sock */
skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 801c415bac59..b9e94c5e7097 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -33,10 +33,11 @@ else
CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
$(call cc-param,asan-globals=1) \
$(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
- $(call cc-param,asan-stack=$(stack_enable)) \
$(call cc-param,asan-instrument-allocas=1)
endif
+CFLAGS_KASAN += $(call cc-param,asan-stack=$(stack_enable))
+
endif # CONFIG_KASAN_GENERIC
ifdef CONFIG_KASAN_SW_TAGS
diff --git a/scripts/sorttable.c b/scripts/sorttable.c
index f355869c65cd..6ee4fa882919 100644
--- a/scripts/sorttable.c
+++ b/scripts/sorttable.c
@@ -54,6 +54,10 @@
#define EM_ARCV2 195
#endif
+#ifndef EM_RISCV
+#define EM_RISCV 243
+#endif
+
static uint32_t (*r)(const uint32_t *);
static uint16_t (*r2)(const uint16_t *);
static uint64_t (*r8)(const uint64_t *);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 6517f221d52c..e7ebd45ca345 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -2157,7 +2157,7 @@ static int selinux_ptrace_access_check(struct task_struct *child,
static int selinux_ptrace_traceme(struct task_struct *parent)
{
return avc_has_perm(&selinux_state,
- task_sid_subj(parent), task_sid_obj(current),
+ task_sid_obj(parent), task_sid_obj(current),
SECCLASS_PROCESS, PROCESS__PTRACE, NULL);
}
@@ -6222,7 +6222,7 @@ static int selinux_msg_queue_msgrcv(struct kern_ipc_perm *msq, struct msg_msg *m
struct ipc_security_struct *isec;
struct msg_security_struct *msec;
struct common_audit_data ad;
- u32 sid = task_sid_subj(target);
+ u32 sid = task_sid_obj(target);
int rc;
isec = selinux_ipc(msq);
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index cacbe7518519..21a0e7c3b8de 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -2016,7 +2016,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
const char *caller)
{
struct smk_audit_info ad;
- struct smack_known *skp = smk_of_task_struct_subj(p);
+ struct smack_known *skp = smk_of_task_struct_obj(p);
int rc;
smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
@@ -3480,7 +3480,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
static int smack_getprocattr(struct task_struct *p, char *name, char **value)
{
- struct smack_known *skp = smk_of_task_struct_subj(p);
+ struct smack_known *skp = smk_of_task_struct_obj(p);
char *cp;
int slen;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 6c0a4a67ad2e..6f30231bdb88 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -873,12 +873,21 @@ static long snd_rawmidi_ioctl(struct file *file, unsigned int cmd, unsigned long
return -EINVAL;
}
}
+ case SNDRV_RAWMIDI_IOCTL_USER_PVERSION:
+ if (get_user(rfile->user_pversion, (unsigned int __user *)arg))
+ return -EFAULT;
+ return 0;
+
case SNDRV_RAWMIDI_IOCTL_PARAMS:
{
struct snd_rawmidi_params params;
if (copy_from_user(&params, argp, sizeof(struct snd_rawmidi_params)))
return -EFAULT;
+ if (rfile->user_pversion < SNDRV_PROTOCOL_VERSION(2, 0, 2)) {
+ params.mode = 0;
+ memset(params.reserved, 0, sizeof(params.reserved));
+ }
switch (params.stream) {
case SNDRV_RAWMIDI_STREAM_OUTPUT:
if (rfile->output == NULL)
diff --git a/sound/drivers/pcsp/pcsp_lib.c b/sound/drivers/pcsp/pcsp_lib.c
index ed40d0f7432c..773db4bf0876 100644
--- a/sound/drivers/pcsp/pcsp_lib.c
+++ b/sound/drivers/pcsp/pcsp_lib.c
@@ -143,7 +143,7 @@ enum hrtimer_restart pcsp_do_timer(struct hrtimer *handle)
if (pointer_update)
pcsp_pointer_update(chip);
- hrtimer_forward(handle, hrtimer_get_expires(handle), ns_to_ktime(ns));
+ hrtimer_forward_now(handle, ns_to_ktime(ns));
return HRTIMER_RESTART;
}
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
index 5388b85fb60e..a18c2c033e83 100644
--- a/sound/firewire/motu/amdtp-motu.c
+++ b/sound/firewire/motu/amdtp-motu.c
@@ -276,10 +276,11 @@ static void __maybe_unused copy_message(u64 *frames, __be32 *buffer,
/* This is just for v2/v3 protocol. */
for (i = 0; i < data_blocks; ++i) {
- *frames = (be32_to_cpu(buffer[1]) << 16) |
- (be32_to_cpu(buffer[2]) >> 16);
+ *frames = be32_to_cpu(buffer[1]);
+ *frames <<= 16;
+ *frames |= be32_to_cpu(buffer[2]) >> 16;
+ ++frames;
buffer += data_block_quadlets;
- frames++;
}
}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index cb5b5e3a481b..daf731364695 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -184,13 +184,16 @@ static int detect_quirks(struct snd_oxfw *oxfw, const struct ieee1394_device_id
model = val;
}
- /*
- * Mackie Onyx Satellite with base station has a quirk to report a wrong
- * value in 'dbs' field of CIP header against its format information.
- */
- if (vendor == VENDOR_LOUD && model == MODEL_SATELLITE)
+ if (vendor == VENDOR_LOUD) {
+ // Mackie Onyx Satellite with base station has a quirk to report a wrong
+ // value in 'dbs' field of CIP header against its format information.
oxfw->quirks |= SND_OXFW_QUIRK_WRONG_DBS;
+ // OXFW971-based models may transfer events by blocking method.
+ if (!(oxfw->quirks & SND_OXFW_QUIRK_JUMBO_PAYLOAD))
+ oxfw->quirks |= SND_OXFW_QUIRK_BLOCKING_TRANSMISSION;
+ }
+
return 0;
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 3aa432d814a2..47777439961c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -883,10 +883,11 @@ static unsigned int azx_get_pos_skl(struct azx *chip, struct azx_dev *azx_dev)
return azx_get_pos_posbuf(chip, azx_dev);
}
-static void azx_shutdown_chip(struct azx *chip)
+static void __azx_shutdown_chip(struct azx *chip, bool skip_link_reset)
{
azx_stop_chip(chip);
- azx_enter_link_reset(chip);
+ if (!skip_link_reset)
+ azx_enter_link_reset(chip);
azx_clear_irq_pending(chip);
display_power(chip, false);
}
@@ -895,6 +896,11 @@ static void azx_shutdown_chip(struct azx *chip)
static DEFINE_MUTEX(card_list_lock);
static LIST_HEAD(card_list);
+static void azx_shutdown_chip(struct azx *chip)
+{
+ __azx_shutdown_chip(chip, false);
+}
+
static void azx_add_card_list(struct azx *chip)
{
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
@@ -2357,7 +2363,7 @@ static void azx_shutdown(struct pci_dev *pci)
return;
chip = card->private_data;
if (chip && chip->running)
- azx_shutdown_chip(chip);
+ __azx_shutdown_chip(chip, true);
}
/* PCI IDs */
diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
index 3c7ef55d016e..31ff11ab868e 100644
--- a/sound/pci/hda/patch_cs8409.c
+++ b/sound/pci/hda/patch_cs8409.c
@@ -1207,6 +1207,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
snd_hda_jack_add_kctl(codec, DOLPHIN_LO_PIN_NID, "Line Out", true,
SND_JACK_HEADPHONE, NULL);
+ snd_hda_jack_add_kctl(codec, DOLPHIN_AMIC_PIN_NID, "Microphone", true,
+ SND_JACK_MICROPHONE, NULL);
+
cs8409_fix_caps(codec, DOLPHIN_HP_PIN_NID);
cs8409_fix_caps(codec, DOLPHIN_LO_PIN_NID);
cs8409_fix_caps(codec, DOLPHIN_AMIC_PIN_NID);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8b7a389b6aed..4407f7da57c4 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6429,6 +6429,20 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
hda_fixup_thinkpad_acpi(codec, fix, action);
}
+/* Fixup for Lenovo Legion 15IMHg05 speaker output on headset removal. */
+static void alc287_fixup_legion_15imhg05_speakers(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct alc_spec *spec = codec->spec;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ spec->gen.suppress_auto_mute = 1;
+ break;
+ }
+}
+
/* for alc295_fixup_hp_top_speakers */
#include "hp_x360_helper.c"
@@ -6646,6 +6660,10 @@ enum {
ALC623_FIXUP_LENOVO_THINKSTATION_P340,
ALC255_FIXUP_ACER_HEADPHONE_AND_MIC,
ALC236_FIXUP_HP_LIMIT_INT_MIC_BOOST,
+ ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS,
+ ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+ ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
+ ALC287_FIXUP_13S_GEN2_SPEAKERS
};
static const struct hda_fixup alc269_fixups[] = {
@@ -8236,6 +8254,113 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
},
+ [ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS] = {
+ .type = HDA_FIXUP_VERBS,
+ //.v.verbs = legion_15imhg05_coefs,
+ .v.verbs = (const struct hda_verb[]) {
+ // set left speaker Legion 7i.
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ // set right speaker Legion 7i.
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE,
+ },
+ [ALC287_FIXUP_LEGION_15IMHG05_AUTOMUTE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc287_fixup_legion_15imhg05_speakers,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE,
+ },
+ [ALC287_FIXUP_YOGA7_14ITL_SPEAKERS] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ // set left speaker Yoga 7i.
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x1a },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ // set right speaker Yoga 7i.
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x46 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xc },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2a },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE,
+ },
+ [ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x41 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x24 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x42 },
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x26 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x2 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x0 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0xb020 },
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -8630,6 +8755,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME),
SND_PCI_QUIRK(0x17aa, 0x3827, "Ideapad S740", ALC285_FIXUP_IDEAPAD_S740_COEF),
SND_PCI_QUIRK(0x17aa, 0x3843, "Yoga 9i", ALC287_FIXUP_IDEAPAD_BASS_SPK_AMP),
+ SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3852, "Lenovo Yoga 7 14ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
+ SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
index 87d24224c042..23f253effb4f 100644
--- a/sound/pci/pcxhr/pcxhr_core.c
+++ b/sound/pci/pcxhr/pcxhr_core.c
@@ -52,7 +52,7 @@
#define PCXHR_DSP 2
#if (PCXHR_DSP_OFFSET_MAX > PCXHR_PLX_OFFSET_MIN)
-#undef PCXHR_REG_TO_PORT(x)
+#error PCXHR_REG_TO_PORT(x)
#else
#define PCXHR_REG_TO_PORT(x) ((x)>PCXHR_DSP_OFFSET_MAX ? PCXHR_PLX : PCXHR_DSP)
#endif
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index a961f837cd09..bda66b30e063 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -1073,6 +1073,16 @@ static int fsl_esai_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_get_sync;
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
+ ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
+ goto err_pm_get_sync;
+ }
+
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_esai_component,
&fsl_esai_dai, 1);
if (ret) {
@@ -1082,12 +1092,6 @@ static int fsl_esai_probe(struct platform_device *pdev)
INIT_WORK(&esai_priv->work, fsl_esai_hw_reset);
- ret = imx_pcm_dma_init(pdev, IMX_ESAI_DMABUF_SIZE);
- if (ret) {
- dev_err(&pdev->dev, "failed to init imx pcm dma: %d\n", ret);
- goto err_pm_get_sync;
- }
-
return ret;
err_pm_get_sync:
diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
index 8c0c75ce9490..9f90989ac59a 100644
--- a/sound/soc/fsl/fsl_micfil.c
+++ b/sound/soc/fsl/fsl_micfil.c
@@ -737,18 +737,23 @@ static int fsl_micfil_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
regcache_cache_only(micfil->regmap, true);
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to pcm register\n");
+ return ret;
+ }
+
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_micfil_component,
&fsl_micfil_dai, 1);
if (ret) {
dev_err(&pdev->dev, "failed to register component %s\n",
fsl_micfil_component.name);
- return ret;
}
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
- if (ret)
- dev_err(&pdev->dev, "failed to pcm register\n");
-
return ret;
}
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 223fcd15bfcc..38f6362099d5 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -1152,11 +1152,10 @@ static int fsl_sai_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_get_sync;
- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
- &sai->cpu_dai_drv, 1);
- if (ret)
- goto err_pm_get_sync;
-
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
if (sai->soc_data->use_imx_pcm) {
ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
if (ret)
@@ -1167,6 +1166,11 @@ static int fsl_sai_probe(struct platform_device *pdev)
goto err_pm_get_sync;
}
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
+ &sai->cpu_dai_drv, 1);
+ if (ret)
+ goto err_pm_get_sync;
+
return ret;
err_pm_get_sync:
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
index 8ffb1a6048d6..1c53719bb61e 100644
--- a/sound/soc/fsl/fsl_spdif.c
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -1434,16 +1434,20 @@ static int fsl_spdif_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
regcache_cache_only(spdif_priv->regmap, true);
- ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
- &spdif_priv->cpu_dai_drv, 1);
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
+ ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
if (ret) {
- dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+ dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
goto err_pm_disable;
}
- ret = imx_pcm_dma_init(pdev, IMX_SPDIF_DMABUF_SIZE);
+ ret = devm_snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
+ &spdif_priv->cpu_dai_drv, 1);
if (ret) {
- dev_err_probe(&pdev->dev, ret, "imx_pcm_dma_init failed\n");
+ dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
goto err_pm_disable;
}
diff --git a/sound/soc/fsl/fsl_xcvr.c b/sound/soc/fsl/fsl_xcvr.c
index 31c5ee641fe7..7ba2fd15132d 100644
--- a/sound/soc/fsl/fsl_xcvr.c
+++ b/sound/soc/fsl/fsl_xcvr.c
@@ -1215,18 +1215,23 @@ static int fsl_xcvr_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
regcache_cache_only(xcvr->regmap, true);
+ /*
+ * Register platform component before registering cpu dai for there
+ * is not defer probe for platform component in snd_soc_add_pcm_runtime().
+ */
+ ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
+ if (ret) {
+ dev_err(dev, "failed to pcm register\n");
+ return ret;
+ }
+
ret = devm_snd_soc_register_component(dev, &fsl_xcvr_comp,
&fsl_xcvr_dai, 1);
if (ret) {
dev_err(dev, "failed to register component %s\n",
fsl_xcvr_comp.name);
- return ret;
}
- ret = devm_snd_dmaengine_pcm_register(dev, NULL, 0);
- if (ret)
- dev_err(dev, "failed to pcm register\n");
-
return ret;
}
diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
index 6602eda89e8e..6b06248a9327 100644
--- a/sound/soc/intel/boards/sof_sdw.c
+++ b/sound/soc/intel/boards/sof_sdw.c
@@ -929,6 +929,11 @@ static int create_sdw_dailink(struct snd_soc_card *card,
cpus + *cpu_id, cpu_dai_num,
codecs, codec_num,
NULL, &sdw_ops);
+ /*
+ * SoundWire DAILINKs use 'stream' functions and Bank Switch operations
+ * based on wait_for_completion(), tag them as 'nonatomic'.
+ */
+ dai_links[*be_index].nonatomic = true;
ret = set_codec_init_func(card, link, dai_links + (*be_index)++,
playback, group_id);
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 5a2f4667d50b..81ad2dcee9eb 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config SND_SOC_MEDIATEK
tristate
+ select REGMAP_MMIO
config SND_SOC_MT2701
tristate "ASoC support for Mediatek MT2701 chip"
@@ -188,7 +189,9 @@ config SND_SOC_MT8192_MT6359_RT1015_RT5682
config SND_SOC_MT8195
tristate "ASoC support for Mediatek MT8195 chip"
depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on COMMON_CLK
select SND_SOC_MEDIATEK
+ select MFD_SYSCON if SND_SOC_MT6359
help
This adds ASoC platform driver support for Mediatek MT8195 chip
that can be used with other codecs.
diff --git a/sound/soc/mediatek/common/mtk-afe-fe-dai.c b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
index baaa5881b1d4..e95c7c018e7d 100644
--- a/sound/soc/mediatek/common/mtk-afe-fe-dai.c
+++ b/sound/soc/mediatek/common/mtk-afe-fe-dai.c
@@ -334,9 +334,11 @@ int mtk_afe_suspend(struct snd_soc_component *component)
devm_kcalloc(dev, afe->reg_back_up_list_num,
sizeof(unsigned int), GFP_KERNEL);
- for (i = 0; i < afe->reg_back_up_list_num; i++)
- regmap_read(regmap, afe->reg_back_up_list[i],
- &afe->reg_back_up[i]);
+ if (afe->reg_back_up) {
+ for (i = 0; i < afe->reg_back_up_list_num; i++)
+ regmap_read(regmap, afe->reg_back_up_list[i],
+ &afe->reg_back_up[i]);
+ }
afe->suspended = true;
afe->runtime_suspend(dev);
@@ -356,12 +358,13 @@ int mtk_afe_resume(struct snd_soc_component *component)
afe->runtime_resume(dev);
- if (!afe->reg_back_up)
+ if (!afe->reg_back_up) {
dev_dbg(dev, "%s no reg_backup\n", __func__);
-
- for (i = 0; i < afe->reg_back_up_list_num; i++)
- mtk_regmap_write(regmap, afe->reg_back_up_list[i],
- afe->reg_back_up[i]);
+ } else {
+ for (i = 0; i < afe->reg_back_up_list_num; i++)
+ mtk_regmap_write(regmap, afe->reg_back_up_list[i],
+ afe->reg_back_up[i]);
+ }
afe->suspended = false;
return 0;
diff --git a/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c b/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
index c97ace7387b4..de09f67c0450 100644
--- a/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
+++ b/sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c
@@ -424,8 +424,8 @@ static int mt8195_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd)
return snd_soc_component_set_jack(cmpnt_codec, &priv->hdmi_jack, NULL);
}
-static int mt8195_hdmitx_dptx_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
- struct snd_pcm_hw_params *params)
+static int mt8195_dptx_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
{
/* fix BE i2s format to 32bit, clean param mask first */
@@ -902,7 +902,7 @@ static struct snd_soc_dai_link mt8195_mt6359_rt1019_rt5682_dai_links[] = {
.no_pcm = 1,
.dpcm_playback = 1,
.ops = &mt8195_dptx_ops,
- .be_hw_params_fixup = mt8195_hdmitx_dptx_hw_params_fixup,
+ .be_hw_params_fixup = mt8195_dptx_hw_params_fixup,
SND_SOC_DAILINK_REG(DPTX_BE),
},
[DAI_LINK_ETDM1_IN_BE] = {
@@ -953,7 +953,6 @@ static struct snd_soc_dai_link mt8195_mt6359_rt1019_rt5682_dai_links[] = {
SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS,
.dpcm_playback = 1,
- .be_hw_params_fixup = mt8195_hdmitx_dptx_hw_params_fixup,
SND_SOC_DAILINK_REG(ETDM3_OUT_BE),
},
[DAI_LINK_PCM1_BE] = {
diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
index 3e4dd4a86363..59d0d7b2b55c 100644
--- a/sound/soc/sof/core.c
+++ b/sound/soc/sof/core.c
@@ -371,7 +371,6 @@ int snd_sof_device_remove(struct device *dev)
dev_warn(dev, "error: %d failed to prepare DSP for device removal",
ret);
- snd_sof_fw_unload(sdev);
snd_sof_ipc_free(sdev);
snd_sof_free_debug(sdev);
snd_sof_free_trace(sdev);
@@ -394,8 +393,7 @@ int snd_sof_device_remove(struct device *dev)
snd_sof_remove(sdev);
/* release firmware */
- release_firmware(pdata->fw);
- pdata->fw = NULL;
+ snd_sof_fw_unload(sdev);
return 0;
}
diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
index 12fedf0984bd..7e9723a10d02 100644
--- a/sound/soc/sof/imx/imx8.c
+++ b/sound/soc/sof/imx/imx8.c
@@ -365,7 +365,14 @@ static int imx8_remove(struct snd_sof_dev *sdev)
/* on i.MX8 there is 1 to 1 match between type and BAR idx */
static int imx8_get_bar_index(struct snd_sof_dev *sdev, u32 type)
{
- return type;
+ /* Only IRAM and SRAM bars are valid */
+ switch (type) {
+ case SOF_FW_BLK_TYPE_IRAM:
+ case SOF_FW_BLK_TYPE_SRAM:
+ return type;
+ default:
+ return -EINVAL;
+ }
}
static void imx8_ipc_msg_data(struct snd_sof_dev *sdev,
diff --git a/sound/soc/sof/imx/imx8m.c b/sound/soc/sof/imx/imx8m.c
index cb822d953767..892e1482f97f 100644
--- a/sound/soc/sof/imx/imx8m.c
+++ b/sound/soc/sof/imx/imx8m.c
@@ -228,7 +228,14 @@ static int imx8m_remove(struct snd_sof_dev *sdev)
/* on i.MX8 there is 1 to 1 match between type and BAR idx */
static int imx8m_get_bar_index(struct snd_sof_dev *sdev, u32 type)
{
- return type;
+ /* Only IRAM and SRAM bars are valid */
+ switch (type) {
+ case SOF_FW_BLK_TYPE_IRAM:
+ case SOF_FW_BLK_TYPE_SRAM:
+ return type;
+ default:
+ return -EINVAL;
+ }
}
static void imx8m_ipc_msg_data(struct snd_sof_dev *sdev,
diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c
index 2b38a77cd594..bb79c77775b3 100644
--- a/sound/soc/sof/loader.c
+++ b/sound/soc/sof/loader.c
@@ -729,10 +729,10 @@ int snd_sof_load_firmware_raw(struct snd_sof_dev *sdev)
ret = request_firmware(&plat_data->fw, fw_filename, sdev->dev);
if (ret < 0) {
- dev_err(sdev->dev, "error: request firmware %s failed err: %d\n",
- fw_filename, ret);
dev_err(sdev->dev,
- "you may need to download the firmware from https://github.com/thesofproject/sof-bin/\n");
+ "error: sof firmware file is missing, you might need to\n");
+ dev_err(sdev->dev,
+ " download it from https://github.com/thesofproject/sof-bin/\n");
goto err;
} else {
dev_dbg(sdev->dev, "request_firmware %s successful\n",
@@ -880,5 +880,7 @@ EXPORT_SYMBOL(snd_sof_run_firmware);
void snd_sof_fw_unload(struct snd_sof_dev *sdev)
{
/* TODO: support module unloading at runtime */
+ release_firmware(sdev->pdata->fw);
+ sdev->pdata->fw = NULL;
}
EXPORT_SYMBOL(snd_sof_fw_unload);
diff --git a/sound/soc/sof/trace.c b/sound/soc/sof/trace.c
index f72a6e83e6af..58f6ca5cf491 100644
--- a/sound/soc/sof/trace.c
+++ b/sound/soc/sof/trace.c
@@ -530,7 +530,6 @@ void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
return;
if (sdev->dtrace_is_enabled) {
- dev_err(sdev->dev, "error: waking up any trace sleepers\n");
sdev->dtrace_error = true;
wake_up(&sdev->trace_sleep);
}
diff --git a/sound/soc/sof/xtensa/core.c b/sound/soc/sof/xtensa/core.c
index bbb9a2282ed9..f6e3411b33cf 100644
--- a/sound/soc/sof/xtensa/core.c
+++ b/sound/soc/sof/xtensa/core.c
@@ -122,9 +122,9 @@ static void xtensa_stack(struct snd_sof_dev *sdev, void *oops, u32 *stack,
* 0x0049fbb0: 8000f2d0 0049fc00 6f6c6c61 00632e63
*/
for (i = 0; i < stack_words; i += 4) {
- hex_dump_to_buffer(stack + i * 4, 16, 16, 4,
+ hex_dump_to_buffer(stack + i, 16, 16, 4,
buf, sizeof(buf), false);
- dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i, buf);
+ dev_err(sdev->dev, "0x%08x: %s\n", stack_ptr + i * 4, buf);
}
}
diff --git a/sound/usb/card.c b/sound/usb/card.c
index fd570a42f043..1764b9302d46 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -1054,7 +1054,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
return 0;
}
-static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
+static int usb_audio_resume(struct usb_interface *intf)
{
struct snd_usb_audio *chip = usb_get_intfdata(intf);
struct snd_usb_stream *as;
@@ -1080,7 +1080,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
* we just notify and restart the mixers
*/
list_for_each_entry(mixer, &chip->mixer_list, list) {
- err = snd_usb_mixer_resume(mixer, reset_resume);
+ err = snd_usb_mixer_resume(mixer);
if (err < 0)
goto err_out;
}
@@ -1100,20 +1100,10 @@ err_out:
atomic_dec(&chip->active); /* allow autopm after this point */
return err;
}
-
-static int usb_audio_resume(struct usb_interface *intf)
-{
- return __usb_audio_resume(intf, false);
-}
-
-static int usb_audio_reset_resume(struct usb_interface *intf)
-{
- return __usb_audio_resume(intf, true);
-}
#else
#define usb_audio_suspend NULL
#define usb_audio_resume NULL
-#define usb_audio_reset_resume NULL
+#define usb_audio_resume NULL
#endif /* CONFIG_PM */
static const struct usb_device_id usb_audio_ids [] = {
@@ -1135,7 +1125,7 @@ static struct usb_driver usb_audio_driver = {
.disconnect = usb_audio_disconnect,
.suspend = usb_audio_suspend,
.resume = usb_audio_resume,
- .reset_resume = usb_audio_reset_resume,
+ .reset_resume = usb_audio_resume,
.id_table = usb_audio_ids,
.supports_autosuspend = 1,
};
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 43bc59575a6e..a2ce535df14b 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -3653,33 +3653,16 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list)
return 0;
}
-static int default_mixer_reset_resume(struct usb_mixer_elem_list *list)
-{
- int err;
-
- if (list->resume) {
- err = list->resume(list);
- if (err < 0)
- return err;
- }
- return restore_mixer_value(list);
-}
-
-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume)
+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer)
{
struct usb_mixer_elem_list *list;
- usb_mixer_elem_resume_func_t f;
int id, err;
/* restore cached mixer values */
for (id = 0; id < MAX_ID_ELEMS; id++) {
for_each_mixer_elem(list, mixer, id) {
- if (reset_resume)
- f = list->reset_resume;
- else
- f = list->resume;
- if (f) {
- err = f(list);
+ if (list->resume) {
+ err = list->resume(list);
if (err < 0)
return err;
}
@@ -3700,7 +3683,6 @@ void snd_usb_mixer_elem_init_std(struct usb_mixer_elem_list *list,
list->id = unitid;
list->dump = snd_usb_mixer_dump_cval;
#ifdef CONFIG_PM
- list->resume = NULL;
- list->reset_resume = default_mixer_reset_resume;
+ list->resume = restore_mixer_value;
#endif
}
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 876bbc9a71ad..98ea24d91d80 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -70,7 +70,6 @@ struct usb_mixer_elem_list {
bool is_std_info;
usb_mixer_elem_dump_func_t dump;
usb_mixer_elem_resume_func_t resume;
- usb_mixer_elem_resume_func_t reset_resume;
};
/* iterate over mixer element list of the given unit id */
@@ -121,7 +120,7 @@ int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
#ifdef CONFIG_PM
int snd_usb_mixer_suspend(struct usb_mixer_interface *mixer);
-int snd_usb_mixer_resume(struct usb_mixer_interface *mixer, bool reset_resume);
+int snd_usb_mixer_resume(struct usb_mixer_interface *mixer);
#endif
int snd_usb_set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index a66ce0375fd9..46082dc57be0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -151,7 +151,7 @@ static int add_single_ctl_with_resume(struct usb_mixer_interface *mixer,
*listp = list;
list->mixer = mixer;
list->id = id;
- list->reset_resume = resume;
+ list->resume = resume;
kctl = snd_ctl_new1(knew, list);
if (!kctl) {
kfree(list);
diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h
new file mode 100644
index 000000000000..7640fa27be94
--- /dev/null
+++ b/tools/arch/arm64/include/asm/sysreg.h
@@ -0,0 +1,1296 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Macros for accessing system registers with older binutils.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ */
+
+#ifndef __ASM_SYSREG_H
+#define __ASM_SYSREG_H
+
+#include <linux/bits.h>
+#include <linux/stringify.h>
+
+/*
+ * ARMv8 ARM reserves the following encoding for system registers:
+ * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
+ * C5.2, version:ARM DDI 0487A.f)
+ * [20-19] : Op0
+ * [18-16] : Op1
+ * [15-12] : CRn
+ * [11-8] : CRm
+ * [7-5] : Op2
+ */
+#define Op0_shift 19
+#define Op0_mask 0x3
+#define Op1_shift 16
+#define Op1_mask 0x7
+#define CRn_shift 12
+#define CRn_mask 0xf
+#define CRm_shift 8
+#define CRm_mask 0xf
+#define Op2_shift 5
+#define Op2_mask 0x7
+
+#define sys_reg(op0, op1, crn, crm, op2) \
+ (((op0) << Op0_shift) | ((op1) << Op1_shift) | \
+ ((crn) << CRn_shift) | ((crm) << CRm_shift) | \
+ ((op2) << Op2_shift))
+
+#define sys_insn sys_reg
+
+#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask)
+#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask)
+#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask)
+#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask)
+#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask)
+
+#ifndef CONFIG_BROKEN_GAS_INST
+
+#ifdef __ASSEMBLY__
+// The space separator is omitted so that __emit_inst(x) can be parsed as
+// either an assembler directive or an assembler macro argument.
+#define __emit_inst(x) .inst(x)
+#else
+#define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
+#endif
+
+#else /* CONFIG_BROKEN_GAS_INST */
+
+#ifndef CONFIG_CPU_BIG_ENDIAN
+#define __INSTR_BSWAP(x) (x)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+#define __INSTR_BSWAP(x) ((((x) << 24) & 0xff000000) | \
+ (((x) << 8) & 0x00ff0000) | \
+ (((x) >> 8) & 0x0000ff00) | \
+ (((x) >> 24) & 0x000000ff))
+#endif /* CONFIG_CPU_BIG_ENDIAN */
+
+#ifdef __ASSEMBLY__
+#define __emit_inst(x) .long __INSTR_BSWAP(x)
+#else /* __ASSEMBLY__ */
+#define __emit_inst(x) ".long " __stringify(__INSTR_BSWAP(x)) "\n\t"
+#endif /* __ASSEMBLY__ */
+
+#endif /* CONFIG_BROKEN_GAS_INST */
+
+/*
+ * Instructions for modifying PSTATE fields.
+ * As per Arm ARM for v8-A, Section "C.5.1.3 op0 == 0b00, architectural hints,
+ * barriers and CLREX, and PSTATE access", ARM DDI 0487 C.a, system instructions
+ * for accessing PSTATE fields have the following encoding:
+ * Op0 = 0, CRn = 4
+ * Op1, Op2 encodes the PSTATE field modified and defines the constraints.
+ * CRm = Imm4 for the instruction.
+ * Rt = 0x1f
+ */
+#define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift)
+#define PSTATE_Imm_shift CRm_shift
+
+#define PSTATE_PAN pstate_field(0, 4)
+#define PSTATE_UAO pstate_field(0, 3)
+#define PSTATE_SSBS pstate_field(3, 1)
+#define PSTATE_TCO pstate_field(3, 4)
+
+#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift))
+#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift))
+
+#define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x))
+#define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x))
+#define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x))
+
+#define __SYS_BARRIER_INSN(CRm, op2, Rt) \
+ __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f))
+
+#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
+
+#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
+#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
+#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
+
+/*
+ * System registers, organised loosely by encoding but grouped together
+ * where the architected name contains an index. e.g. ID_MMFR<n>_EL1.
+ */
+#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2)
+#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0)
+#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2)
+#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2)
+#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2)
+#define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4)
+#define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5)
+#define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6)
+#define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7)
+#define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0)
+#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4)
+#define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4)
+#define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4)
+#define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4)
+#define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6)
+#define SYS_DBGCLAIMCLR_EL1 sys_reg(2, 0, 7, 9, 6)
+#define SYS_DBGAUTHSTATUS_EL1 sys_reg(2, 0, 7, 14, 6)
+#define SYS_MDCCSR_EL0 sys_reg(2, 3, 0, 1, 0)
+#define SYS_DBGDTR_EL0 sys_reg(2, 3, 0, 4, 0)
+#define SYS_DBGDTRRX_EL0 sys_reg(2, 3, 0, 5, 0)
+#define SYS_DBGDTRTX_EL0 sys_reg(2, 3, 0, 5, 0)
+#define SYS_DBGVCR32_EL2 sys_reg(2, 4, 0, 7, 0)
+
+#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
+#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
+#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
+
+#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0)
+#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1)
+#define SYS_ID_PFR2_EL1 sys_reg(3, 0, 0, 3, 4)
+#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2)
+#define SYS_ID_DFR1_EL1 sys_reg(3, 0, 0, 3, 5)
+#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3)
+#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4)
+#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5)
+#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6)
+#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7)
+#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6)
+#define SYS_ID_MMFR5_EL1 sys_reg(3, 0, 0, 3, 6)
+
+#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0)
+#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1)
+#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2)
+#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3)
+#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4)
+#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5)
+#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7)
+
+#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0)
+#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
+#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
+
+#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
+#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
+#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4)
+
+#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
+#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
+
+#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4)
+#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5)
+
+#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
+#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
+
+#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
+#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
+#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
+
+#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0)
+#define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1)
+#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2)
+#define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5)
+#define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6)
+
+#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
+#define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1)
+
+#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0)
+#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1)
+#define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2)
+
+#define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0)
+#define SYS_APIAKEYHI_EL1 sys_reg(3, 0, 2, 1, 1)
+#define SYS_APIBKEYLO_EL1 sys_reg(3, 0, 2, 1, 2)
+#define SYS_APIBKEYHI_EL1 sys_reg(3, 0, 2, 1, 3)
+
+#define SYS_APDAKEYLO_EL1 sys_reg(3, 0, 2, 2, 0)
+#define SYS_APDAKEYHI_EL1 sys_reg(3, 0, 2, 2, 1)
+#define SYS_APDBKEYLO_EL1 sys_reg(3, 0, 2, 2, 2)
+#define SYS_APDBKEYHI_EL1 sys_reg(3, 0, 2, 2, 3)
+
+#define SYS_APGAKEYLO_EL1 sys_reg(3, 0, 2, 3, 0)
+#define SYS_APGAKEYHI_EL1 sys_reg(3, 0, 2, 3, 1)
+
+#define SYS_SPSR_EL1 sys_reg(3, 0, 4, 0, 0)
+#define SYS_ELR_EL1 sys_reg(3, 0, 4, 0, 1)
+
+#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
+
+#define SYS_AFSR0_EL1 sys_reg(3, 0, 5, 1, 0)
+#define SYS_AFSR1_EL1 sys_reg(3, 0, 5, 1, 1)
+#define SYS_ESR_EL1 sys_reg(3, 0, 5, 2, 0)
+
+#define SYS_ERRIDR_EL1 sys_reg(3, 0, 5, 3, 0)
+#define SYS_ERRSELR_EL1 sys_reg(3, 0, 5, 3, 1)
+#define SYS_ERXFR_EL1 sys_reg(3, 0, 5, 4, 0)
+#define SYS_ERXCTLR_EL1 sys_reg(3, 0, 5, 4, 1)
+#define SYS_ERXSTATUS_EL1 sys_reg(3, 0, 5, 4, 2)
+#define SYS_ERXADDR_EL1 sys_reg(3, 0, 5, 4, 3)
+#define SYS_ERXMISC0_EL1 sys_reg(3, 0, 5, 5, 0)
+#define SYS_ERXMISC1_EL1 sys_reg(3, 0, 5, 5, 1)
+#define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0)
+#define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1)
+
+#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0)
+#define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0)
+
+#define SYS_PAR_EL1_F BIT(0)
+#define SYS_PAR_EL1_FST GENMASK(6, 1)
+
+/*** Statistical Profiling Extension ***/
+/* ID registers */
+#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7)
+#define SYS_PMSIDR_EL1_FE_SHIFT 0
+#define SYS_PMSIDR_EL1_FT_SHIFT 1
+#define SYS_PMSIDR_EL1_FL_SHIFT 2
+#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3
+#define SYS_PMSIDR_EL1_LDS_SHIFT 4
+#define SYS_PMSIDR_EL1_ERND_SHIFT 5
+#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8
+#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL
+#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12
+#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL
+#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16
+#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL
+
+#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7)
+#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0
+#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU
+#define SYS_PMBIDR_EL1_P_SHIFT 4
+#define SYS_PMBIDR_EL1_F_SHIFT 5
+
+/* Sampling controls */
+#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0)
+#define SYS_PMSCR_EL1_E0SPE_SHIFT 0
+#define SYS_PMSCR_EL1_E1SPE_SHIFT 1
+#define SYS_PMSCR_EL1_CX_SHIFT 3
+#define SYS_PMSCR_EL1_PA_SHIFT 4
+#define SYS_PMSCR_EL1_TS_SHIFT 5
+#define SYS_PMSCR_EL1_PCT_SHIFT 6
+
+#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0)
+#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0
+#define SYS_PMSCR_EL2_E2SPE_SHIFT 1
+#define SYS_PMSCR_EL2_CX_SHIFT 3
+#define SYS_PMSCR_EL2_PA_SHIFT 4
+#define SYS_PMSCR_EL2_TS_SHIFT 5
+#define SYS_PMSCR_EL2_PCT_SHIFT 6
+
+#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2)
+
+#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3)
+#define SYS_PMSIRR_EL1_RND_SHIFT 0
+#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8
+#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL
+
+/* Filtering controls */
+#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1)
+
+#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4)
+#define SYS_PMSFCR_EL1_FE_SHIFT 0
+#define SYS_PMSFCR_EL1_FT_SHIFT 1
+#define SYS_PMSFCR_EL1_FL_SHIFT 2
+#define SYS_PMSFCR_EL1_B_SHIFT 16
+#define SYS_PMSFCR_EL1_LD_SHIFT 17
+#define SYS_PMSFCR_EL1_ST_SHIFT 18
+
+#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5)
+#define SYS_PMSEVFR_EL1_RES0_8_2 \
+ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\
+ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0))
+#define SYS_PMSEVFR_EL1_RES0_8_3 \
+ (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11)))
+
+#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6)
+#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0
+
+/* Buffer controls */
+#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0)
+#define SYS_PMBLIMITR_EL1_E_SHIFT 0
+#define SYS_PMBLIMITR_EL1_FM_SHIFT 1
+#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL
+#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT)
+
+#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1)
+
+/* Buffer error reporting */
+#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3)
+#define SYS_PMBSR_EL1_COLL_SHIFT 16
+#define SYS_PMBSR_EL1_S_SHIFT 17
+#define SYS_PMBSR_EL1_EA_SHIFT 18
+#define SYS_PMBSR_EL1_DL_SHIFT 19
+#define SYS_PMBSR_EL1_EC_SHIFT 26
+#define SYS_PMBSR_EL1_EC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT)
+#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT)
+#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT)
+
+#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0
+#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0
+#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL
+
+#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT)
+
+/*** End of Statistical Profiling Extension ***/
+
+/*
+ * TRBE Registers
+ */
+#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0)
+#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1)
+#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2)
+#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3)
+#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4)
+#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6)
+#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7)
+
+#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0)
+#define TRBLIMITR_LIMIT_SHIFT 12
+#define TRBLIMITR_NVM BIT(5)
+#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0)
+#define TRBLIMITR_TRIG_MODE_SHIFT 3
+#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0)
+#define TRBLIMITR_FILL_MODE_SHIFT 1
+#define TRBLIMITR_ENABLE BIT(0)
+#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0)
+#define TRBPTR_PTR_SHIFT 0
+#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0)
+#define TRBBASER_BASE_SHIFT 12
+#define TRBSR_EC_MASK GENMASK(5, 0)
+#define TRBSR_EC_SHIFT 26
+#define TRBSR_IRQ BIT(22)
+#define TRBSR_TRG BIT(21)
+#define TRBSR_WRAP BIT(20)
+#define TRBSR_ABORT BIT(18)
+#define TRBSR_STOP BIT(17)
+#define TRBSR_MSS_MASK GENMASK(15, 0)
+#define TRBSR_MSS_SHIFT 0
+#define TRBSR_BSC_MASK GENMASK(5, 0)
+#define TRBSR_BSC_SHIFT 0
+#define TRBSR_FSC_MASK GENMASK(5, 0)
+#define TRBSR_FSC_SHIFT 0
+#define TRBMAR_SHARE_MASK GENMASK(1, 0)
+#define TRBMAR_SHARE_SHIFT 8
+#define TRBMAR_OUTER_MASK GENMASK(3, 0)
+#define TRBMAR_OUTER_SHIFT 4
+#define TRBMAR_INNER_MASK GENMASK(3, 0)
+#define TRBMAR_INNER_SHIFT 0
+#define TRBTRG_TRG_MASK GENMASK(31, 0)
+#define TRBTRG_TRG_SHIFT 0
+#define TRBIDR_FLAG BIT(5)
+#define TRBIDR_PROG BIT(4)
+#define TRBIDR_ALIGN_MASK GENMASK(3, 0)
+#define TRBIDR_ALIGN_SHIFT 0
+
+#define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1)
+#define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2)
+
+#define SYS_PMMIR_EL1 sys_reg(3, 0, 9, 14, 6)
+
+#define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0)
+#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
+
+#define SYS_LORSA_EL1 sys_reg(3, 0, 10, 4, 0)
+#define SYS_LOREA_EL1 sys_reg(3, 0, 10, 4, 1)
+#define SYS_LORN_EL1 sys_reg(3, 0, 10, 4, 2)
+#define SYS_LORC_EL1 sys_reg(3, 0, 10, 4, 3)
+#define SYS_LORID_EL1 sys_reg(3, 0, 10, 4, 7)
+
+#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
+#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1)
+
+#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0)
+#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1)
+#define SYS_ICC_HPPIR0_EL1 sys_reg(3, 0, 12, 8, 2)
+#define SYS_ICC_BPR0_EL1 sys_reg(3, 0, 12, 8, 3)
+#define SYS_ICC_AP0Rn_EL1(n) sys_reg(3, 0, 12, 8, 4 | n)
+#define SYS_ICC_AP0R0_EL1 SYS_ICC_AP0Rn_EL1(0)
+#define SYS_ICC_AP0R1_EL1 SYS_ICC_AP0Rn_EL1(1)
+#define SYS_ICC_AP0R2_EL1 SYS_ICC_AP0Rn_EL1(2)
+#define SYS_ICC_AP0R3_EL1 SYS_ICC_AP0Rn_EL1(3)
+#define SYS_ICC_AP1Rn_EL1(n) sys_reg(3, 0, 12, 9, n)
+#define SYS_ICC_AP1R0_EL1 SYS_ICC_AP1Rn_EL1(0)
+#define SYS_ICC_AP1R1_EL1 SYS_ICC_AP1Rn_EL1(1)
+#define SYS_ICC_AP1R2_EL1 SYS_ICC_AP1Rn_EL1(2)
+#define SYS_ICC_AP1R3_EL1 SYS_ICC_AP1Rn_EL1(3)
+#define SYS_ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
+#define SYS_ICC_RPR_EL1 sys_reg(3, 0, 12, 11, 3)
+#define SYS_ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
+#define SYS_ICC_ASGI1R_EL1 sys_reg(3, 0, 12, 11, 6)
+#define SYS_ICC_SGI0R_EL1 sys_reg(3, 0, 12, 11, 7)
+#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
+#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
+#define SYS_ICC_HPPIR1_EL1 sys_reg(3, 0, 12, 12, 2)
+#define SYS_ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
+#define SYS_ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
+#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
+#define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6)
+#define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+
+#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1)
+#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4)
+
+#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7)
+
+#define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0)
+
+#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0)
+#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1)
+#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4)
+#define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7)
+
+#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0)
+
+#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
+#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
+
+#define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0)
+#define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1)
+
+#define SYS_PMCR_EL0 sys_reg(3, 3, 9, 12, 0)
+#define SYS_PMCNTENSET_EL0 sys_reg(3, 3, 9, 12, 1)
+#define SYS_PMCNTENCLR_EL0 sys_reg(3, 3, 9, 12, 2)
+#define SYS_PMOVSCLR_EL0 sys_reg(3, 3, 9, 12, 3)
+#define SYS_PMSWINC_EL0 sys_reg(3, 3, 9, 12, 4)
+#define SYS_PMSELR_EL0 sys_reg(3, 3, 9, 12, 5)
+#define SYS_PMCEID0_EL0 sys_reg(3, 3, 9, 12, 6)
+#define SYS_PMCEID1_EL0 sys_reg(3, 3, 9, 12, 7)
+#define SYS_PMCCNTR_EL0 sys_reg(3, 3, 9, 13, 0)
+#define SYS_PMXEVTYPER_EL0 sys_reg(3, 3, 9, 13, 1)
+#define SYS_PMXEVCNTR_EL0 sys_reg(3, 3, 9, 13, 2)
+#define SYS_PMUSERENR_EL0 sys_reg(3, 3, 9, 14, 0)
+#define SYS_PMOVSSET_EL0 sys_reg(3, 3, 9, 14, 3)
+
+#define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2)
+#define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3)
+
+#define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7)
+
+/* Definitions for system register interface to AMU for ARMv8.4 onwards */
+#define SYS_AM_EL0(crm, op2) sys_reg(3, 3, 13, (crm), (op2))
+#define SYS_AMCR_EL0 SYS_AM_EL0(2, 0)
+#define SYS_AMCFGR_EL0 SYS_AM_EL0(2, 1)
+#define SYS_AMCGCR_EL0 SYS_AM_EL0(2, 2)
+#define SYS_AMUSERENR_EL0 SYS_AM_EL0(2, 3)
+#define SYS_AMCNTENCLR0_EL0 SYS_AM_EL0(2, 4)
+#define SYS_AMCNTENSET0_EL0 SYS_AM_EL0(2, 5)
+#define SYS_AMCNTENCLR1_EL0 SYS_AM_EL0(3, 0)
+#define SYS_AMCNTENSET1_EL0 SYS_AM_EL0(3, 1)
+
+/*
+ * Group 0 of activity monitors (architected):
+ * op0 op1 CRn CRm op2
+ * Counter: 11 011 1101 010:n<3> n<2:0>
+ * Type: 11 011 1101 011:n<3> n<2:0>
+ * n: 0-15
+ *
+ * Group 1 of activity monitors (auxiliary):
+ * op0 op1 CRn CRm op2
+ * Counter: 11 011 1101 110:n<3> n<2:0>
+ * Type: 11 011 1101 111:n<3> n<2:0>
+ * n: 0-15
+ */
+
+#define SYS_AMEVCNTR0_EL0(n) SYS_AM_EL0(4 + ((n) >> 3), (n) & 7)
+#define SYS_AMEVTYPER0_EL0(n) SYS_AM_EL0(6 + ((n) >> 3), (n) & 7)
+#define SYS_AMEVCNTR1_EL0(n) SYS_AM_EL0(12 + ((n) >> 3), (n) & 7)
+#define SYS_AMEVTYPER1_EL0(n) SYS_AM_EL0(14 + ((n) >> 3), (n) & 7)
+
+/* AMU v1: Fixed (architecturally defined) activity monitors */
+#define SYS_AMEVCNTR0_CORE_EL0 SYS_AMEVCNTR0_EL0(0)
+#define SYS_AMEVCNTR0_CONST_EL0 SYS_AMEVCNTR0_EL0(1)
+#define SYS_AMEVCNTR0_INST_RET_EL0 SYS_AMEVCNTR0_EL0(2)
+#define SYS_AMEVCNTR0_MEM_STALL SYS_AMEVCNTR0_EL0(3)
+
+#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
+
+#define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0)
+#define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1)
+#define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2)
+
+#define SYS_CNTV_CTL_EL0 sys_reg(3, 3, 14, 3, 1)
+#define SYS_CNTV_CVAL_EL0 sys_reg(3, 3, 14, 3, 2)
+
+#define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0)
+#define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1)
+#define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0)
+
+#define __PMEV_op2(n) ((n) & 0x7)
+#define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3))
+#define SYS_PMEVCNTRn_EL0(n) sys_reg(3, 3, 14, __CNTR_CRm(n), __PMEV_op2(n))
+#define __TYPER_CRm(n) (0xc | (((n) >> 3) & 0x3))
+#define SYS_PMEVTYPERn_EL0(n) sys_reg(3, 3, 14, __TYPER_CRm(n), __PMEV_op2(n))
+
+#define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7)
+
+#define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0)
+#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4)
+#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5)
+#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6)
+#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
+#define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1)
+#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
+#define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4)
+#define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5)
+#define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6)
+#define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0)
+#define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1)
+#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
+#define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0)
+#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
+#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
+#define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0)
+#define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0)
+
+#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1)
+#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
+#define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0)
+#define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1)
+#define SYS_ICH_AP0R2_EL2 __SYS__AP0Rx_EL2(2)
+#define SYS_ICH_AP0R3_EL2 __SYS__AP0Rx_EL2(3)
+
+#define __SYS__AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
+#define SYS_ICH_AP1R0_EL2 __SYS__AP1Rx_EL2(0)
+#define SYS_ICH_AP1R1_EL2 __SYS__AP1Rx_EL2(1)
+#define SYS_ICH_AP1R2_EL2 __SYS__AP1Rx_EL2(2)
+#define SYS_ICH_AP1R3_EL2 __SYS__AP1Rx_EL2(3)
+
+#define SYS_ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
+#define SYS_ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
+#define SYS_ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
+#define SYS_ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
+#define SYS_ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
+#define SYS_ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
+#define SYS_ICH_ELRSR_EL2 sys_reg(3, 4, 12, 11, 5)
+#define SYS_ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
+
+#define __SYS__LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
+#define SYS_ICH_LR0_EL2 __SYS__LR0_EL2(0)
+#define SYS_ICH_LR1_EL2 __SYS__LR0_EL2(1)
+#define SYS_ICH_LR2_EL2 __SYS__LR0_EL2(2)
+#define SYS_ICH_LR3_EL2 __SYS__LR0_EL2(3)
+#define SYS_ICH_LR4_EL2 __SYS__LR0_EL2(4)
+#define SYS_ICH_LR5_EL2 __SYS__LR0_EL2(5)
+#define SYS_ICH_LR6_EL2 __SYS__LR0_EL2(6)
+#define SYS_ICH_LR7_EL2 __SYS__LR0_EL2(7)
+
+#define __SYS__LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
+#define SYS_ICH_LR8_EL2 __SYS__LR8_EL2(0)
+#define SYS_ICH_LR9_EL2 __SYS__LR8_EL2(1)
+#define SYS_ICH_LR10_EL2 __SYS__LR8_EL2(2)
+#define SYS_ICH_LR11_EL2 __SYS__LR8_EL2(3)
+#define SYS_ICH_LR12_EL2 __SYS__LR8_EL2(4)
+#define SYS_ICH_LR13_EL2 __SYS__LR8_EL2(5)
+#define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6)
+#define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7)
+
+/* VHE encodings for architectural EL0/1 system registers */
+#define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0)
+#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2)
+#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0)
+#define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0)
+#define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1)
+#define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2)
+#define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0)
+#define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1)
+#define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0)
+#define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1)
+#define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0)
+#define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0)
+#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0)
+#define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0)
+#define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0)
+#define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0)
+#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1)
+#define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0)
+#define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0)
+#define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1)
+#define SYS_CNTP_CVAL_EL02 sys_reg(3, 5, 14, 2, 2)
+#define SYS_CNTV_TVAL_EL02 sys_reg(3, 5, 14, 3, 0)
+#define SYS_CNTV_CTL_EL02 sys_reg(3, 5, 14, 3, 1)
+#define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2)
+
+/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_DSSBS (BIT(44))
+#define SCTLR_ELx_ATA (BIT(43))
+
+#define SCTLR_ELx_TCF_SHIFT 40
+#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT)
+#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT)
+
+#define SCTLR_ELx_ENIA_SHIFT 31
+
+#define SCTLR_ELx_ITFSB (BIT(37))
+#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT))
+#define SCTLR_ELx_ENIB (BIT(30))
+#define SCTLR_ELx_ENDA (BIT(27))
+#define SCTLR_ELx_EE (BIT(25))
+#define SCTLR_ELx_IESB (BIT(21))
+#define SCTLR_ELx_WXN (BIT(19))
+#define SCTLR_ELx_ENDB (BIT(13))
+#define SCTLR_ELx_I (BIT(12))
+#define SCTLR_ELx_SA (BIT(3))
+#define SCTLR_ELx_C (BIT(2))
+#define SCTLR_ELx_A (BIT(1))
+#define SCTLR_ELx_M (BIT(0))
+
+/* SCTLR_EL2 specific flags. */
+#define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \
+ (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \
+ (BIT(29)))
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ENDIAN_SET_EL2 SCTLR_ELx_EE
+#else
+#define ENDIAN_SET_EL2 0
+#endif
+
+#define INIT_SCTLR_EL2_MMU_ON \
+ (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_ELx_I | \
+ SCTLR_ELx_IESB | SCTLR_ELx_WXN | ENDIAN_SET_EL2 | \
+ SCTLR_ELx_ITFSB | SCTLR_EL2_RES1)
+
+#define INIT_SCTLR_EL2_MMU_OFF \
+ (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
+
+/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_EPAN (BIT(57))
+#define SCTLR_EL1_ATA0 (BIT(42))
+
+#define SCTLR_EL1_TCF0_SHIFT 38
+#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT)
+#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT)
+
+#define SCTLR_EL1_BT1 (BIT(36))
+#define SCTLR_EL1_BT0 (BIT(35))
+#define SCTLR_EL1_UCI (BIT(26))
+#define SCTLR_EL1_E0E (BIT(24))
+#define SCTLR_EL1_SPAN (BIT(23))
+#define SCTLR_EL1_NTWE (BIT(18))
+#define SCTLR_EL1_NTWI (BIT(16))
+#define SCTLR_EL1_UCT (BIT(15))
+#define SCTLR_EL1_DZE (BIT(14))
+#define SCTLR_EL1_UMA (BIT(9))
+#define SCTLR_EL1_SED (BIT(8))
+#define SCTLR_EL1_ITD (BIT(7))
+#define SCTLR_EL1_CP15BEN (BIT(5))
+#define SCTLR_EL1_SA0 (BIT(4))
+
+#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \
+ (BIT(29)))
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE)
+#else
+#define ENDIAN_SET_EL1 0
+#endif
+
+#define INIT_SCTLR_EL1_MMU_OFF \
+ (ENDIAN_SET_EL1 | SCTLR_EL1_RES1)
+
+#define INIT_SCTLR_EL1_MMU_ON \
+ (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \
+ SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \
+ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \
+ SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \
+ SCTLR_EL1_EPAN | SCTLR_EL1_RES1)
+
+/* MAIR_ELx memory attributes (used by Linux) */
+#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
+#define MAIR_ATTR_DEVICE_nGnRE UL(0x04)
+#define MAIR_ATTR_NORMAL_NC UL(0x44)
+#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0)
+#define MAIR_ATTR_NORMAL UL(0xff)
+#define MAIR_ATTR_MASK UL(0xff)
+
+/* Position the attr at the correct index */
+#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
+
+/* id_aa64isar0 */
+#define ID_AA64ISAR0_RNDR_SHIFT 60
+#define ID_AA64ISAR0_TLB_SHIFT 56
+#define ID_AA64ISAR0_TS_SHIFT 52
+#define ID_AA64ISAR0_FHM_SHIFT 48
+#define ID_AA64ISAR0_DP_SHIFT 44
+#define ID_AA64ISAR0_SM4_SHIFT 40
+#define ID_AA64ISAR0_SM3_SHIFT 36
+#define ID_AA64ISAR0_SHA3_SHIFT 32
+#define ID_AA64ISAR0_RDM_SHIFT 28
+#define ID_AA64ISAR0_ATOMICS_SHIFT 20
+#define ID_AA64ISAR0_CRC32_SHIFT 16
+#define ID_AA64ISAR0_SHA2_SHIFT 12
+#define ID_AA64ISAR0_SHA1_SHIFT 8
+#define ID_AA64ISAR0_AES_SHIFT 4
+
+#define ID_AA64ISAR0_TLB_RANGE_NI 0x0
+#define ID_AA64ISAR0_TLB_RANGE 0x2
+
+/* id_aa64isar1 */
+#define ID_AA64ISAR1_I8MM_SHIFT 52
+#define ID_AA64ISAR1_DGH_SHIFT 48
+#define ID_AA64ISAR1_BF16_SHIFT 44
+#define ID_AA64ISAR1_SPECRES_SHIFT 40
+#define ID_AA64ISAR1_SB_SHIFT 36
+#define ID_AA64ISAR1_FRINTTS_SHIFT 32
+#define ID_AA64ISAR1_GPI_SHIFT 28
+#define ID_AA64ISAR1_GPA_SHIFT 24
+#define ID_AA64ISAR1_LRCPC_SHIFT 20
+#define ID_AA64ISAR1_FCMA_SHIFT 16
+#define ID_AA64ISAR1_JSCVT_SHIFT 12
+#define ID_AA64ISAR1_API_SHIFT 8
+#define ID_AA64ISAR1_APA_SHIFT 4
+#define ID_AA64ISAR1_DPB_SHIFT 0
+
+#define ID_AA64ISAR1_APA_NI 0x0
+#define ID_AA64ISAR1_APA_ARCHITECTED 0x1
+#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2
+#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4
+#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5
+#define ID_AA64ISAR1_API_NI 0x0
+#define ID_AA64ISAR1_API_IMP_DEF 0x1
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4
+#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5
+#define ID_AA64ISAR1_GPA_NI 0x0
+#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1
+#define ID_AA64ISAR1_GPI_NI 0x0
+#define ID_AA64ISAR1_GPI_IMP_DEF 0x1
+
+/* id_aa64pfr0 */
+#define ID_AA64PFR0_CSV3_SHIFT 60
+#define ID_AA64PFR0_CSV2_SHIFT 56
+#define ID_AA64PFR0_DIT_SHIFT 48
+#define ID_AA64PFR0_AMU_SHIFT 44
+#define ID_AA64PFR0_MPAM_SHIFT 40
+#define ID_AA64PFR0_SEL2_SHIFT 36
+#define ID_AA64PFR0_SVE_SHIFT 32
+#define ID_AA64PFR0_RAS_SHIFT 28
+#define ID_AA64PFR0_GIC_SHIFT 24
+#define ID_AA64PFR0_ASIMD_SHIFT 20
+#define ID_AA64PFR0_FP_SHIFT 16
+#define ID_AA64PFR0_EL3_SHIFT 12
+#define ID_AA64PFR0_EL2_SHIFT 8
+#define ID_AA64PFR0_EL1_SHIFT 4
+#define ID_AA64PFR0_EL0_SHIFT 0
+
+#define ID_AA64PFR0_AMU 0x1
+#define ID_AA64PFR0_SVE 0x1
+#define ID_AA64PFR0_RAS_V1 0x1
+#define ID_AA64PFR0_RAS_V1P1 0x2
+#define ID_AA64PFR0_FP_NI 0xf
+#define ID_AA64PFR0_FP_SUPPORTED 0x0
+#define ID_AA64PFR0_ASIMD_NI 0xf
+#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
+#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1
+#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2
+
+/* id_aa64pfr1 */
+#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
+#define ID_AA64PFR1_RASFRAC_SHIFT 12
+#define ID_AA64PFR1_MTE_SHIFT 8
+#define ID_AA64PFR1_SSBS_SHIFT 4
+#define ID_AA64PFR1_BT_SHIFT 0
+
+#define ID_AA64PFR1_SSBS_PSTATE_NI 0
+#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1
+#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
+#define ID_AA64PFR1_BT_BTI 0x1
+
+#define ID_AA64PFR1_MTE_NI 0x0
+#define ID_AA64PFR1_MTE_EL0 0x1
+#define ID_AA64PFR1_MTE 0x2
+
+/* id_aa64zfr0 */
+#define ID_AA64ZFR0_F64MM_SHIFT 56
+#define ID_AA64ZFR0_F32MM_SHIFT 52
+#define ID_AA64ZFR0_I8MM_SHIFT 44
+#define ID_AA64ZFR0_SM4_SHIFT 40
+#define ID_AA64ZFR0_SHA3_SHIFT 32
+#define ID_AA64ZFR0_BF16_SHIFT 20
+#define ID_AA64ZFR0_BITPERM_SHIFT 16
+#define ID_AA64ZFR0_AES_SHIFT 4
+#define ID_AA64ZFR0_SVEVER_SHIFT 0
+
+#define ID_AA64ZFR0_F64MM 0x1
+#define ID_AA64ZFR0_F32MM 0x1
+#define ID_AA64ZFR0_I8MM 0x1
+#define ID_AA64ZFR0_BF16 0x1
+#define ID_AA64ZFR0_SM4 0x1
+#define ID_AA64ZFR0_SHA3 0x1
+#define ID_AA64ZFR0_BITPERM 0x1
+#define ID_AA64ZFR0_AES 0x1
+#define ID_AA64ZFR0_AES_PMULL 0x2
+#define ID_AA64ZFR0_SVEVER_SVE2 0x1
+
+/* id_aa64mmfr0 */
+#define ID_AA64MMFR0_ECV_SHIFT 60
+#define ID_AA64MMFR0_FGT_SHIFT 56
+#define ID_AA64MMFR0_EXS_SHIFT 44
+#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40
+#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36
+#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32
+#define ID_AA64MMFR0_TGRAN4_SHIFT 28
+#define ID_AA64MMFR0_TGRAN64_SHIFT 24
+#define ID_AA64MMFR0_TGRAN16_SHIFT 20
+#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
+#define ID_AA64MMFR0_SNSMEM_SHIFT 12
+#define ID_AA64MMFR0_BIGENDEL_SHIFT 8
+#define ID_AA64MMFR0_ASID_SHIFT 4
+#define ID_AA64MMFR0_PARANGE_SHIFT 0
+
+#define ID_AA64MMFR0_ASID_8 0x0
+#define ID_AA64MMFR0_ASID_16 0x2
+
+#define ID_AA64MMFR0_TGRAN4_NI 0xf
+#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0
+#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7
+#define ID_AA64MMFR0_TGRAN64_NI 0xf
+#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0
+#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7
+#define ID_AA64MMFR0_TGRAN16_NI 0x0
+#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1
+#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf
+
+#define ID_AA64MMFR0_PARANGE_32 0x0
+#define ID_AA64MMFR0_PARANGE_36 0x1
+#define ID_AA64MMFR0_PARANGE_40 0x2
+#define ID_AA64MMFR0_PARANGE_42 0x3
+#define ID_AA64MMFR0_PARANGE_44 0x4
+#define ID_AA64MMFR0_PARANGE_48 0x5
+#define ID_AA64MMFR0_PARANGE_52 0x6
+
+#define ARM64_MIN_PARANGE_BITS 32
+
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2
+#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7
+
+#ifdef CONFIG_ARM64_PA_BITS_52
+#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
+#else
+#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48
+#endif
+
+/* id_aa64mmfr1 */
+#define ID_AA64MMFR1_ETS_SHIFT 36
+#define ID_AA64MMFR1_TWED_SHIFT 32
+#define ID_AA64MMFR1_XNX_SHIFT 28
+#define ID_AA64MMFR1_SPECSEI_SHIFT 24
+#define ID_AA64MMFR1_PAN_SHIFT 20
+#define ID_AA64MMFR1_LOR_SHIFT 16
+#define ID_AA64MMFR1_HPD_SHIFT 12
+#define ID_AA64MMFR1_VHE_SHIFT 8
+#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
+#define ID_AA64MMFR1_HADBS_SHIFT 0
+
+#define ID_AA64MMFR1_VMIDBITS_8 0
+#define ID_AA64MMFR1_VMIDBITS_16 2
+
+/* id_aa64mmfr2 */
+#define ID_AA64MMFR2_E0PD_SHIFT 60
+#define ID_AA64MMFR2_EVT_SHIFT 56
+#define ID_AA64MMFR2_BBM_SHIFT 52
+#define ID_AA64MMFR2_TTL_SHIFT 48
+#define ID_AA64MMFR2_FWB_SHIFT 40
+#define ID_AA64MMFR2_IDS_SHIFT 36
+#define ID_AA64MMFR2_AT_SHIFT 32
+#define ID_AA64MMFR2_ST_SHIFT 28
+#define ID_AA64MMFR2_NV_SHIFT 24
+#define ID_AA64MMFR2_CCIDX_SHIFT 20
+#define ID_AA64MMFR2_LVA_SHIFT 16
+#define ID_AA64MMFR2_IESB_SHIFT 12
+#define ID_AA64MMFR2_LSM_SHIFT 8
+#define ID_AA64MMFR2_UAO_SHIFT 4
+#define ID_AA64MMFR2_CNP_SHIFT 0
+
+/* id_aa64dfr0 */
+#define ID_AA64DFR0_MTPMU_SHIFT 48
+#define ID_AA64DFR0_TRBE_SHIFT 44
+#define ID_AA64DFR0_TRACE_FILT_SHIFT 40
+#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36
+#define ID_AA64DFR0_PMSVER_SHIFT 32
+#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
+#define ID_AA64DFR0_WRPS_SHIFT 20
+#define ID_AA64DFR0_BRPS_SHIFT 12
+#define ID_AA64DFR0_PMUVER_SHIFT 8
+#define ID_AA64DFR0_TRACEVER_SHIFT 4
+#define ID_AA64DFR0_DEBUGVER_SHIFT 0
+
+#define ID_AA64DFR0_PMUVER_8_0 0x1
+#define ID_AA64DFR0_PMUVER_8_1 0x4
+#define ID_AA64DFR0_PMUVER_8_4 0x5
+#define ID_AA64DFR0_PMUVER_8_5 0x6
+#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf
+
+#define ID_AA64DFR0_PMSVER_8_2 0x1
+#define ID_AA64DFR0_PMSVER_8_3 0x2
+
+#define ID_DFR0_PERFMON_SHIFT 24
+
+#define ID_DFR0_PERFMON_8_0 0x3
+#define ID_DFR0_PERFMON_8_1 0x4
+#define ID_DFR0_PERFMON_8_4 0x5
+#define ID_DFR0_PERFMON_8_5 0x6
+
+#define ID_ISAR4_SWP_FRAC_SHIFT 28
+#define ID_ISAR4_PSR_M_SHIFT 24
+#define ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT 20
+#define ID_ISAR4_BARRIER_SHIFT 16
+#define ID_ISAR4_SMC_SHIFT 12
+#define ID_ISAR4_WRITEBACK_SHIFT 8
+#define ID_ISAR4_WITHSHIFTS_SHIFT 4
+#define ID_ISAR4_UNPRIV_SHIFT 0
+
+#define ID_DFR1_MTPMU_SHIFT 0
+
+#define ID_ISAR0_DIVIDE_SHIFT 24
+#define ID_ISAR0_DEBUG_SHIFT 20
+#define ID_ISAR0_COPROC_SHIFT 16
+#define ID_ISAR0_CMPBRANCH_SHIFT 12
+#define ID_ISAR0_BITFIELD_SHIFT 8
+#define ID_ISAR0_BITCOUNT_SHIFT 4
+#define ID_ISAR0_SWAP_SHIFT 0
+
+#define ID_ISAR5_RDM_SHIFT 24
+#define ID_ISAR5_CRC32_SHIFT 16
+#define ID_ISAR5_SHA2_SHIFT 12
+#define ID_ISAR5_SHA1_SHIFT 8
+#define ID_ISAR5_AES_SHIFT 4
+#define ID_ISAR5_SEVL_SHIFT 0
+
+#define ID_ISAR6_I8MM_SHIFT 24
+#define ID_ISAR6_BF16_SHIFT 20
+#define ID_ISAR6_SPECRES_SHIFT 16
+#define ID_ISAR6_SB_SHIFT 12
+#define ID_ISAR6_FHM_SHIFT 8
+#define ID_ISAR6_DP_SHIFT 4
+#define ID_ISAR6_JSCVT_SHIFT 0
+
+#define ID_MMFR0_INNERSHR_SHIFT 28
+#define ID_MMFR0_FCSE_SHIFT 24
+#define ID_MMFR0_AUXREG_SHIFT 20
+#define ID_MMFR0_TCM_SHIFT 16
+#define ID_MMFR0_SHARELVL_SHIFT 12
+#define ID_MMFR0_OUTERSHR_SHIFT 8
+#define ID_MMFR0_PMSA_SHIFT 4
+#define ID_MMFR0_VMSA_SHIFT 0
+
+#define ID_MMFR4_EVT_SHIFT 28
+#define ID_MMFR4_CCIDX_SHIFT 24
+#define ID_MMFR4_LSM_SHIFT 20
+#define ID_MMFR4_HPDS_SHIFT 16
+#define ID_MMFR4_CNP_SHIFT 12
+#define ID_MMFR4_XNX_SHIFT 8
+#define ID_MMFR4_AC2_SHIFT 4
+#define ID_MMFR4_SPECSEI_SHIFT 0
+
+#define ID_MMFR5_ETS_SHIFT 0
+
+#define ID_PFR0_DIT_SHIFT 24
+#define ID_PFR0_CSV2_SHIFT 16
+#define ID_PFR0_STATE3_SHIFT 12
+#define ID_PFR0_STATE2_SHIFT 8
+#define ID_PFR0_STATE1_SHIFT 4
+#define ID_PFR0_STATE0_SHIFT 0
+
+#define ID_DFR0_PERFMON_SHIFT 24
+#define ID_DFR0_MPROFDBG_SHIFT 20
+#define ID_DFR0_MMAPTRC_SHIFT 16
+#define ID_DFR0_COPTRC_SHIFT 12
+#define ID_DFR0_MMAPDBG_SHIFT 8
+#define ID_DFR0_COPSDBG_SHIFT 4
+#define ID_DFR0_COPDBG_SHIFT 0
+
+#define ID_PFR2_SSBS_SHIFT 4
+#define ID_PFR2_CSV3_SHIFT 0
+
+#define MVFR0_FPROUND_SHIFT 28
+#define MVFR0_FPSHVEC_SHIFT 24
+#define MVFR0_FPSQRT_SHIFT 20
+#define MVFR0_FPDIVIDE_SHIFT 16
+#define MVFR0_FPTRAP_SHIFT 12
+#define MVFR0_FPDP_SHIFT 8
+#define MVFR0_FPSP_SHIFT 4
+#define MVFR0_SIMD_SHIFT 0
+
+#define MVFR1_SIMDFMAC_SHIFT 28
+#define MVFR1_FPHP_SHIFT 24
+#define MVFR1_SIMDHP_SHIFT 20
+#define MVFR1_SIMDSP_SHIFT 16
+#define MVFR1_SIMDINT_SHIFT 12
+#define MVFR1_SIMDLS_SHIFT 8
+#define MVFR1_FPDNAN_SHIFT 4
+#define MVFR1_FPFTZ_SHIFT 0
+
+#define ID_PFR1_GIC_SHIFT 28
+#define ID_PFR1_VIRT_FRAC_SHIFT 24
+#define ID_PFR1_SEC_FRAC_SHIFT 20
+#define ID_PFR1_GENTIMER_SHIFT 16
+#define ID_PFR1_VIRTUALIZATION_SHIFT 12
+#define ID_PFR1_MPROGMOD_SHIFT 8
+#define ID_PFR1_SECURITY_SHIFT 4
+#define ID_PFR1_PROGMOD_SHIFT 0
+
+#if defined(CONFIG_ARM64_4K_PAGES)
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX
+#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT
+#elif defined(CONFIG_ARM64_16K_PAGES)
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX
+#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT
+#elif defined(CONFIG_ARM64_64K_PAGES)
+#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN
+#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX
+#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT
+#endif
+
+#define MVFR2_FPMISC_SHIFT 4
+#define MVFR2_SIMDMISC_SHIFT 0
+
+#define DCZID_DZP_SHIFT 4
+#define DCZID_BS_SHIFT 0
+
+/*
+ * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which
+ * are reserved by the SVE architecture for future expansion of the LEN
+ * field, with compatible semantics.
+ */
+#define ZCR_ELx_LEN_SHIFT 0
+#define ZCR_ELx_LEN_SIZE 9
+#define ZCR_ELx_LEN_MASK 0x1ff
+
+#define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */
+#define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */
+#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
+
+/* TCR EL1 Bit Definitions */
+#define SYS_TCR_EL1_TCMA1 (BIT(58))
+#define SYS_TCR_EL1_TCMA0 (BIT(57))
+
+/* GCR_EL1 Definitions */
+#define SYS_GCR_EL1_RRND (BIT(16))
+#define SYS_GCR_EL1_EXCL_MASK 0xffffUL
+
+/* RGSR_EL1 Definitions */
+#define SYS_RGSR_EL1_TAG_MASK 0xfUL
+#define SYS_RGSR_EL1_SEED_SHIFT 8
+#define SYS_RGSR_EL1_SEED_MASK 0xffffUL
+
+/* GMID_EL1 field definitions */
+#define SYS_GMID_EL1_BS_SHIFT 0
+#define SYS_GMID_EL1_BS_SIZE 4
+
+/* TFSR{,E0}_EL1 bit definitions */
+#define SYS_TFSR_EL1_TF0_SHIFT 0
+#define SYS_TFSR_EL1_TF1_SHIFT 1
+#define SYS_TFSR_EL1_TF0 (UL(1) << SYS_TFSR_EL1_TF0_SHIFT)
+#define SYS_TFSR_EL1_TF1 (UL(1) << SYS_TFSR_EL1_TF1_SHIFT)
+
+/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
+#define SYS_MPIDR_SAFE_VAL (BIT(31))
+
+#define TRFCR_ELx_TS_SHIFT 5
+#define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT)
+#define TRFCR_EL2_CX BIT(3)
+#define TRFCR_ELx_ExTRE BIT(1)
+#define TRFCR_ELx_E0TRE BIT(0)
+
+
+/* GIC Hypervisor interface registers */
+/* ICH_MISR_EL2 bit definitions */
+#define ICH_MISR_EOI (1 << 0)
+#define ICH_MISR_U (1 << 1)
+
+/* ICH_LR*_EL2 bit definitions */
+#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
+
+#define ICH_LR_EOI (1ULL << 41)
+#define ICH_LR_GROUP (1ULL << 60)
+#define ICH_LR_HW (1ULL << 61)
+#define ICH_LR_STATE (3ULL << 62)
+#define ICH_LR_PENDING_BIT (1ULL << 62)
+#define ICH_LR_ACTIVE_BIT (1ULL << 63)
+#define ICH_LR_PHYS_ID_SHIFT 32
+#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
+#define ICH_LR_PRIORITY_SHIFT 48
+#define ICH_LR_PRIORITY_MASK (0xffULL << ICH_LR_PRIORITY_SHIFT)
+
+/* ICH_HCR_EL2 bit definitions */
+#define ICH_HCR_EN (1 << 0)
+#define ICH_HCR_UIE (1 << 1)
+#define ICH_HCR_NPIE (1 << 3)
+#define ICH_HCR_TC (1 << 10)
+#define ICH_HCR_TALL0 (1 << 11)
+#define ICH_HCR_TALL1 (1 << 12)
+#define ICH_HCR_EOIcount_SHIFT 27
+#define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT)
+
+/* ICH_VMCR_EL2 bit definitions */
+#define ICH_VMCR_ACK_CTL_SHIFT 2
+#define ICH_VMCR_ACK_CTL_MASK (1 << ICH_VMCR_ACK_CTL_SHIFT)
+#define ICH_VMCR_FIQ_EN_SHIFT 3
+#define ICH_VMCR_FIQ_EN_MASK (1 << ICH_VMCR_FIQ_EN_SHIFT)
+#define ICH_VMCR_CBPR_SHIFT 4
+#define ICH_VMCR_CBPR_MASK (1 << ICH_VMCR_CBPR_SHIFT)
+#define ICH_VMCR_EOIM_SHIFT 9
+#define ICH_VMCR_EOIM_MASK (1 << ICH_VMCR_EOIM_SHIFT)
+#define ICH_VMCR_BPR1_SHIFT 18
+#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
+#define ICH_VMCR_BPR0_SHIFT 21
+#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
+#define ICH_VMCR_PMR_SHIFT 24
+#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
+#define ICH_VMCR_ENG0_SHIFT 0
+#define ICH_VMCR_ENG0_MASK (1 << ICH_VMCR_ENG0_SHIFT)
+#define ICH_VMCR_ENG1_SHIFT 1
+#define ICH_VMCR_ENG1_MASK (1 << ICH_VMCR_ENG1_SHIFT)
+
+/* ICH_VTR_EL2 bit definitions */
+#define ICH_VTR_PRI_BITS_SHIFT 29
+#define ICH_VTR_PRI_BITS_MASK (7 << ICH_VTR_PRI_BITS_SHIFT)
+#define ICH_VTR_ID_BITS_SHIFT 23
+#define ICH_VTR_ID_BITS_MASK (7 << ICH_VTR_ID_BITS_SHIFT)
+#define ICH_VTR_SEIS_SHIFT 22
+#define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT)
+#define ICH_VTR_A3V_SHIFT 21
+#define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT)
+
+#define ARM64_FEATURE_FIELD_BITS 4
+
+/* Create a mask for the feature bits of the specified feature. */
+#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT))
+
+#ifdef __ASSEMBLY__
+
+ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
+ .equ .L__reg_num_x\num, \num
+ .endr
+ .equ .L__reg_num_xzr, 31
+
+ .macro mrs_s, rt, sreg
+ __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
+ .endm
+
+ .macro msr_s, sreg, rt
+ __emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
+ .endm
+
+#else
+
+#include <linux/build_bug.h>
+#include <linux/types.h>
+#include <asm/alternative.h>
+
+#define __DEFINE_MRS_MSR_S_REGNUM \
+" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \
+" .equ .L__reg_num_x\\num, \\num\n" \
+" .endr\n" \
+" .equ .L__reg_num_xzr, 31\n"
+
+#define DEFINE_MRS_S \
+ __DEFINE_MRS_MSR_S_REGNUM \
+" .macro mrs_s, rt, sreg\n" \
+ __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \
+" .endm\n"
+
+#define DEFINE_MSR_S \
+ __DEFINE_MRS_MSR_S_REGNUM \
+" .macro msr_s, sreg, rt\n" \
+ __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \
+" .endm\n"
+
+#define UNDEFINE_MRS_S \
+" .purgem mrs_s\n"
+
+#define UNDEFINE_MSR_S \
+" .purgem msr_s\n"
+
+#define __mrs_s(v, r) \
+ DEFINE_MRS_S \
+" mrs_s " v ", " __stringify(r) "\n" \
+ UNDEFINE_MRS_S
+
+#define __msr_s(r, v) \
+ DEFINE_MSR_S \
+" msr_s " __stringify(r) ", " v "\n" \
+ UNDEFINE_MSR_S
+
+/*
+ * Unlike read_cpuid, calls to read_sysreg are never expected to be
+ * optimized away or replaced with synthetic values.
+ */
+#define read_sysreg(r) ({ \
+ u64 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+/*
+ * The "Z" constraint normally means a zero immediate, but when combined with
+ * the "%x0" template means XZR.
+ */
+#define write_sysreg(v, r) do { \
+ u64 __val = (u64)(v); \
+ asm volatile("msr " __stringify(r) ", %x0" \
+ : : "rZ" (__val)); \
+} while (0)
+
+/*
+ * For registers without architectural names, or simply unsupported by
+ * GAS.
+ */
+#define read_sysreg_s(r) ({ \
+ u64 __val; \
+ asm volatile(__mrs_s("%0", r) : "=r" (__val)); \
+ __val; \
+})
+
+#define write_sysreg_s(v, r) do { \
+ u64 __val = (u64)(v); \
+ asm volatile(__msr_s(r, "%x0") : : "rZ" (__val)); \
+} while (0)
+
+/*
+ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
+ * set mask are set. Other bits are left as-is.
+ */
+#define sysreg_clear_set(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg(__scs_new, sysreg); \
+} while (0)
+
+#define sysreg_clear_set_s(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg_s(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg_s(__scs_new, sysreg); \
+} while (0)
+
+#define read_sysreg_par() ({ \
+ u64 par; \
+ asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
+ par = read_sysreg(par_el1); \
+ asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); \
+ par; \
+})
+
+#endif
+
+#endif /* __ASM_SYSREG_H */
diff --git a/tools/arch/x86/lib/insn.c b/tools/arch/x86/lib/insn.c
index c41f95815480..797699462cd8 100644
--- a/tools/arch/x86/lib/insn.c
+++ b/tools/arch/x86/lib/insn.c
@@ -37,10 +37,10 @@
((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \
- ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
+ ({ t r; memcpy(&r, insn->next_byte, sizeof(t)); insn->next_byte += sizeof(t); leXX_to_cpu(t, r); })
#define __peek_nbyte_next(t, insn, n) \
- ({ t r = *(t*)((insn)->next_byte + n); leXX_to_cpu(t, r); })
+ ({ t r; memcpy(&r, (insn)->next_byte + n, sizeof(t)); leXX_to_cpu(t, r); })
#define get_next(t, insn) \
({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c
index 10911a8cad0f..2df880cefdae 100644
--- a/tools/lib/bpf/linker.c
+++ b/tools/lib/bpf/linker.c
@@ -1649,11 +1649,17 @@ static bool btf_is_non_static(const struct btf_type *t)
static int find_glob_sym_btf(struct src_obj *obj, Elf64_Sym *sym, const char *sym_name,
int *out_btf_sec_id, int *out_btf_id)
{
- int i, j, n = btf__get_nr_types(obj->btf), m, btf_id = 0;
+ int i, j, n, m, btf_id = 0;
const struct btf_type *t;
const struct btf_var_secinfo *vi;
const char *name;
+ if (!obj->btf) {
+ pr_warn("failed to find BTF info for object '%s'\n", obj->filename);
+ return -EINVAL;
+ }
+
+ n = btf__get_nr_types(obj->btf);
for (i = 1; i <= n; i++) {
t = btf__type_by_id(obj->btf, i);
diff --git a/tools/objtool/special.c b/tools/objtool/special.c
index bc925cf19e2d..f1428e32a505 100644
--- a/tools/objtool/special.c
+++ b/tools/objtool/special.c
@@ -58,6 +58,24 @@ void __weak arch_handle_alternative(unsigned short feature, struct special_alt *
{
}
+static bool reloc2sec_off(struct reloc *reloc, struct section **sec, unsigned long *off)
+{
+ switch (reloc->sym->type) {
+ case STT_FUNC:
+ *sec = reloc->sym->sec;
+ *off = reloc->sym->offset + reloc->addend;
+ return true;
+
+ case STT_SECTION:
+ *sec = reloc->sym->sec;
+ *off = reloc->addend;
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static int get_alt_entry(struct elf *elf, struct special_entry *entry,
struct section *sec, int idx,
struct special_alt *alt)
@@ -91,15 +109,14 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
WARN_FUNC("can't find orig reloc", sec, offset + entry->orig);
return -1;
}
- if (orig_reloc->sym->type != STT_SECTION) {
- WARN_FUNC("don't know how to handle non-section reloc symbol %s",
- sec, offset + entry->orig, orig_reloc->sym->name);
+ if (!reloc2sec_off(orig_reloc, &alt->orig_sec, &alt->orig_off)) {
+ WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
+ sec, offset + entry->orig,
+ orig_reloc->sym->type,
+ orig_reloc->sym->name);
return -1;
}
- alt->orig_sec = orig_reloc->sym->sec;
- alt->orig_off = orig_reloc->addend;
-
if (!entry->group || alt->new_len) {
new_reloc = find_reloc_by_dest(elf, sec, offset + entry->new);
if (!new_reloc) {
@@ -116,8 +133,13 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry,
if (arch_is_retpoline(new_reloc->sym))
return 1;
- alt->new_sec = new_reloc->sym->sec;
- alt->new_off = (unsigned int)new_reloc->addend;
+ if (!reloc2sec_off(new_reloc, &alt->new_sec, &alt->new_off)) {
+ WARN_FUNC("don't know how to handle reloc symbol type %d: %s",
+ sec, offset + entry->new,
+ new_reloc->sym->type,
+ new_reloc->sym->name);
+ return -1;
+ }
/* _ASM_EXTABLE_EX hack */
if (alt->new_off >= 0x7ffffff0)
diff --git a/tools/perf/Documentation/jitdump-specification.txt b/tools/perf/Documentation/jitdump-specification.txt
index 52152d156ad9..79936355d819 100644
--- a/tools/perf/Documentation/jitdump-specification.txt
+++ b/tools/perf/Documentation/jitdump-specification.txt
@@ -164,7 +164,7 @@ const char unwinding_data[n]: an array of unwinding data, consisting of the EH F
The EH Frame header follows the Linux Standard Base (LSB) specification as described in the document at https://refspecs.linuxfoundation.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html
-The EH Frame follows the LSB specicfication as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
+The EH Frame follows the LSB specification as described in the document at https://refspecs.linuxbase.org/LSB_3.0.0/LSB-PDA/LSB-PDA/ehframechpt.html
NOTE: The mapped_size is generally either the same as unwind_data_size (if the unwinding data was mapped in memory by the running process) or zero (if the unwinding data is not mapped by the process). If the unwinding data was not mapped, then only the EH Frame Header will be read, which can be used to specify FP based unwinding for a function which does not have unwinding information.
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index de6beedb7283..3b6a2c84ea02 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -261,7 +261,7 @@ COALESCE
User can specify how to sort offsets for cacheline.
Following fields are available and governs the final
-output fields set for caheline offsets output:
+output fields set for cacheline offsets output:
tid - coalesced by process TIDs
pid - coalesced by process PIDs
diff --git a/tools/perf/Documentation/perf-intel-pt.txt b/tools/perf/Documentation/perf-intel-pt.txt
index 184ba62420f0..db465fa7ee91 100644
--- a/tools/perf/Documentation/perf-intel-pt.txt
+++ b/tools/perf/Documentation/perf-intel-pt.txt
@@ -883,7 +883,7 @@ and "r" can be combined to get calls and returns.
"Transactions" events correspond to the start or end of transactions. The
'flags' field can be used in perf script to determine whether the event is a
-tranasaction start, commit or abort.
+transaction start, commit or abort.
Note that "instructions", "branches" and "transactions" events depend on code
flow packets which can be disabled by using the config term "branch=0". Refer
diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt
index 74d774592196..1b4d452923d7 100644
--- a/tools/perf/Documentation/perf-lock.txt
+++ b/tools/perf/Documentation/perf-lock.txt
@@ -44,7 +44,7 @@ COMMON OPTIONS
-f::
--force::
- Don't complan, do it.
+ Don't complain, do it.
REPORT OPTIONS
--------------
diff --git a/tools/perf/Documentation/perf-script-perl.txt b/tools/perf/Documentation/perf-script-perl.txt
index 5a1f68122f50..fa4f39d305a7 100644
--- a/tools/perf/Documentation/perf-script-perl.txt
+++ b/tools/perf/Documentation/perf-script-perl.txt
@@ -54,7 +54,7 @@ all sched_wakeup events in the system:
Traces meant to be processed using a script should be recorded with
the above option: -a to enable system-wide collection.
-The format file for the sched_wakep event defines the following fields
+The format file for the sched_wakeup event defines the following fields
(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
----
diff --git a/tools/perf/Documentation/perf-script-python.txt b/tools/perf/Documentation/perf-script-python.txt
index 0250dc61cf98..cf4b7f4b625a 100644
--- a/tools/perf/Documentation/perf-script-python.txt
+++ b/tools/perf/Documentation/perf-script-python.txt
@@ -448,7 +448,7 @@ all sched_wakeup events in the system:
Traces meant to be processed using a script should be recorded with
the above option: -a to enable system-wide collection.
-The format file for the sched_wakep event defines the following fields
+The format file for the sched_wakeup event defines the following fields
(see /sys/kernel/debug/tracing/events/sched/sched_wakeup/format):
----
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 4c9310be6acc..7e6fb7cbc0f4 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -385,7 +385,7 @@ Aggregate counts per physical processor for system-wide mode measurements.
Print metrics or metricgroups specified in a comma separated list.
For a group all metrics from the group are added.
The events from the metrics are automatically measured.
-See perf list output for the possble metrics and metricgroups.
+See perf list output for the possible metrics and metricgroups.
-A::
--no-aggr::
diff --git a/tools/perf/Documentation/topdown.txt b/tools/perf/Documentation/topdown.txt
index c6302df4cf29..a15b93fdcf50 100644
--- a/tools/perf/Documentation/topdown.txt
+++ b/tools/perf/Documentation/topdown.txt
@@ -2,7 +2,7 @@ Using TopDown metrics in user space
-----------------------------------
Intel CPUs (since Sandy Bridge and Silvermont) support a TopDown
-methology to break down CPU pipeline execution into 4 bottlenecks:
+methodology to break down CPU pipeline execution into 4 bottlenecks:
frontend bound, backend bound, bad speculation, retiring.
For more details on Topdown see [1][5]
diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c
index c7c7ec0812d5..5fc6a2a3dbc5 100644
--- a/tools/perf/arch/arm/util/auxtrace.c
+++ b/tools/perf/arch/arm/util/auxtrace.c
@@ -8,10 +8,10 @@
#include <linux/coresight-pmu.h>
#include <linux/zalloc.h>
-#include "../../util/auxtrace.h"
-#include "../../util/debug.h"
-#include "../../util/evlist.h"
-#include "../../util/pmu.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/debug.h"
+#include "../../../util/evlist.h"
+#include "../../../util/pmu.h"
#include "cs-etm.h"
#include "arm-spe.h"
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 515aae470e23..293a23bf8be3 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -16,19 +16,19 @@
#include <linux/zalloc.h>
#include "cs-etm.h"
-#include "../../util/debug.h"
-#include "../../util/record.h"
-#include "../../util/auxtrace.h"
-#include "../../util/cpumap.h"
-#include "../../util/event.h"
-#include "../../util/evlist.h"
-#include "../../util/evsel.h"
-#include "../../util/perf_api_probe.h"
-#include "../../util/evsel_config.h"
-#include "../../util/pmu.h"
-#include "../../util/cs-etm.h"
+#include "../../../util/debug.h"
+#include "../../../util/record.h"
+#include "../../../util/auxtrace.h"
+#include "../../../util/cpumap.h"
+#include "../../../util/event.h"
+#include "../../../util/evlist.h"
+#include "../../../util/evsel.h"
+#include "../../../util/perf_api_probe.h"
+#include "../../../util/evsel_config.h"
+#include "../../../util/pmu.h"
+#include "../../../util/cs-etm.h"
#include <internal/lib.h> // page_size
-#include "../../util/session.h"
+#include "../../../util/session.h"
#include <errno.h>
#include <stdlib.h>
diff --git a/tools/perf/arch/arm/util/perf_regs.c b/tools/perf/arch/arm/util/perf_regs.c
index 2864e2e3776d..2833e101a7c6 100644
--- a/tools/perf/arch/arm/util/perf_regs.c
+++ b/tools/perf/arch/arm/util/perf_regs.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include "../../util/perf_regs.h"
+#include "../../../util/perf_regs.h"
const struct sample_reg sample_reg_masks[] = {
SMPL_REG_END
diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c
index bbc297a7e2e3..b8b23b9dc598 100644
--- a/tools/perf/arch/arm/util/pmu.c
+++ b/tools/perf/arch/arm/util/pmu.c
@@ -10,7 +10,7 @@
#include <linux/string.h>
#include "arm-spe.h"
-#include "../../util/pmu.h"
+#include "../../../util/pmu.h"
struct perf_event_attr
*perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused)
diff --git a/tools/perf/arch/arm/util/unwind-libdw.c b/tools/perf/arch/arm/util/unwind-libdw.c
index 36ba4c69c3c5..b7692cb0c733 100644
--- a/tools/perf/arch/arm/util/unwind-libdw.c
+++ b/tools/perf/arch/arm/util/unwind-libdw.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include <elfutils/libdwfl.h>
-#include "../../util/unwind-libdw.h"
-#include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../../util/unwind-libdw.h"
+#include "../../../util/perf_regs.h"
+#include "../../../util/event.h"
bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
{
diff --git a/tools/perf/arch/arm/util/unwind-libunwind.c b/tools/perf/arch/arm/util/unwind-libunwind.c
index 3a550225dfaf..438906bf0014 100644
--- a/tools/perf/arch/arm/util/unwind-libunwind.c
+++ b/tools/perf/arch/arm/util/unwind-libunwind.c
@@ -3,8 +3,8 @@
#include <errno.h>
#include <libunwind.h>
#include "perf_regs.h"
-#include "../../util/unwind.h"
-#include "../../util/debug.h"
+#include "../../../util/unwind.h"
+#include "../../../util/debug.h"
int libunwind__arch_reg_id(int regnum)
{
diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c
index eeafe97b8105..792cd75ade33 100644
--- a/tools/perf/arch/x86/util/iostat.c
+++ b/tools/perf/arch/x86/util/iostat.c
@@ -432,7 +432,7 @@ void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
u8 die = ((struct iio_root_port *)evsel->priv)->die;
struct perf_counts_values *count = perf_counts(evsel->counts, die, 0);
- if (count->run && count->ena) {
+ if (count && count->run && count->ena) {
if (evsel->prev_raw_counts && !out->force_header) {
struct perf_counts_values *prev_count =
perf_counts(evsel->prev_raw_counts, die, 0);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index f6e87b7be5fa..f0ecfda34ece 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -2408,6 +2408,8 @@ int cmd_stat(int argc, const char **argv)
goto out;
} else if (verbose)
iostat_list(evsel_list, &stat_config);
+ if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target))
+ target.system_wide = true;
}
if (add_default_attributes())
diff --git a/tools/perf/pmu-events/arch/powerpc/power8/other.json b/tools/perf/pmu-events/arch/powerpc/power8/other.json
index 84a0cedf1fd9..f1f2965f6775 100644
--- a/tools/perf/pmu-events/arch/powerpc/power8/other.json
+++ b/tools/perf/pmu-events/arch/powerpc/power8/other.json
@@ -1046,7 +1046,7 @@
{
"EventCode": "0x4e010",
"EventName": "PM_GCT_NOSLOT_IC_L3MISS",
- "BriefDescription": "Gct empty for this thread due to icach l3 miss",
+ "BriefDescription": "Gct empty for this thread due to icache l3 miss",
"PublicDescription": ""
},
{
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 9866cddebf23..9b4a765e4b73 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -229,8 +229,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
struct thread *thread, struct state *state)
{
struct addr_location al;
- unsigned char buf1[BUFSZ];
- unsigned char buf2[BUFSZ];
+ unsigned char buf1[BUFSZ] = {0};
+ unsigned char buf2[BUFSZ] = {0};
size_t ret_len;
u64 objdump_addr;
const char *objdump_name;
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index a288035eb362..c756284b3b13 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -20,6 +20,23 @@
/* For bsearch. We try to unwind functions in shared object. */
#include <stdlib.h>
+/*
+ * The test will assert frames are on the stack but tail call optimizations lose
+ * the frame of the caller. Clang can disable this optimization on a called
+ * function but GCC currently (11/2020) lacks this attribute. The barrier is
+ * used to inhibit tail calls in these cases.
+ */
+#ifdef __has_attribute
+#if __has_attribute(disable_tail_calls)
+#define NO_TAIL_CALL_ATTRIBUTE __attribute__((disable_tail_calls))
+#define NO_TAIL_CALL_BARRIER
+#endif
+#endif
+#ifndef NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_ATTRIBUTE
+#define NO_TAIL_CALL_BARRIER __asm__ __volatile__("" : : : "memory");
+#endif
+
static int mmap_handler(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample,
@@ -91,7 +108,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
return strcmp((const char *) symbol, funcs[idx]);
}
-noinline int test_dwarf_unwind__thread(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__thread(struct thread *thread)
{
struct perf_sample sample;
unsigned long cnt = 0;
@@ -122,7 +139,7 @@ noinline int test_dwarf_unwind__thread(struct thread *thread)
static int global_unwind_retval = -INT_MAX;
-noinline int test_dwarf_unwind__compare(void *p1, void *p2)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__compare(void *p1, void *p2)
{
/* Any possible value should be 'thread' */
struct thread *thread = *(struct thread **)p1;
@@ -141,7 +158,7 @@ noinline int test_dwarf_unwind__compare(void *p1, void *p2)
return p1 - p2;
}
-noinline int test_dwarf_unwind__krava_3(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_3(struct thread *thread)
{
struct thread *array[2] = {thread, thread};
void *fp = &bsearch;
@@ -160,14 +177,22 @@ noinline int test_dwarf_unwind__krava_3(struct thread *thread)
return global_unwind_retval;
}
-noinline int test_dwarf_unwind__krava_2(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_2(struct thread *thread)
{
- return test_dwarf_unwind__krava_3(thread);
+ int ret;
+
+ ret = test_dwarf_unwind__krava_3(thread);
+ NO_TAIL_CALL_BARRIER;
+ return ret;
}
-noinline int test_dwarf_unwind__krava_1(struct thread *thread)
+NO_TAIL_CALL_ATTRIBUTE noinline int test_dwarf_unwind__krava_1(struct thread *thread)
{
- return test_dwarf_unwind__krava_2(thread);
+ int ret;
+
+ ret = test_dwarf_unwind__krava_2(thread);
+ NO_TAIL_CALL_BARRIER;
+ return ret;
}
int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unused)
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 4fb5e90d7a57..60ce5908c664 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -801,7 +801,7 @@ int perf_config_set(struct perf_config_set *set,
section->name, item->name);
ret = fn(key, value, data);
if (ret < 0) {
- pr_err("Error: wrong config key-value pair %s=%s\n",
+ pr_err("Error in the given config file: wrong config key-value pair %s=%s\n",
key, value);
/*
* Can't be just a 'break', as perf_config_set__for_each_entry()
diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c
index 6836510a522f..22722abc9dfa 100644
--- a/tools/testing/selftests/arm64/signal/test_signals_utils.c
+++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c
@@ -266,16 +266,19 @@ int test_init(struct tdescr *td)
td->feats_supported |= FEAT_SSBS;
if (getauxval(AT_HWCAP) & HWCAP_SVE)
td->feats_supported |= FEAT_SVE;
- if (feats_ok(td))
+ if (feats_ok(td)) {
fprintf(stderr,
"Required Features: [%s] supported\n",
feats_to_string(td->feats_required &
td->feats_supported));
- else
+ } else {
fprintf(stderr,
"Required Features: [%s] NOT supported\n",
feats_to_string(td->feats_required &
~td->feats_supported));
+ td->result = KSFT_SKIP;
+ return 0;
+ }
}
/* Perform test specific additional initialization */
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 866531c08e4f..799b88152e9e 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -375,7 +375,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$(TRUNNER_BPF_PROGS_DIR)/%.c \
$(TRUNNER_BPF_PROGS_DIR)/*.h \
$$(INCLUDE_DIR)/vmlinux.h \
- $(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT)
+ $(wildcard $(BPFDIR)/bpf_*.h) \
+ | $(TRUNNER_OUTPUT) $$(BPFOBJ)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS))
diff --git a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
index 59ea56945e6c..b497bb85b667 100755
--- a/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
+++ b/tools/testing/selftests/bpf/test_lwt_ip_encap.sh
@@ -112,6 +112,14 @@ setup()
ip netns add "${NS2}"
ip netns add "${NS3}"
+ # rp_filter gets confused by what these tests are doing, so disable it
+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
+ ip netns exec ${NS1} sysctl -wq net.ipv4.conf.default.rp_filter=0
+ ip netns exec ${NS2} sysctl -wq net.ipv4.conf.default.rp_filter=0
+ ip netns exec ${NS3} sysctl -wq net.ipv4.conf.default.rp_filter=0
+
ip link add veth1 type veth peer name veth2
ip link add veth3 type veth peer name veth4
ip link add veth5 type veth peer name veth6
@@ -236,11 +244,6 @@ setup()
ip -netns ${NS1} -6 route add ${IPv6_GRE}/128 dev veth5 via ${IPv6_6} ${VRF}
ip -netns ${NS2} -6 route add ${IPv6_GRE}/128 dev veth7 via ${IPv6_8} ${VRF}
- # rp_filter gets confused by what these tests are doing, so disable it
- ip netns exec ${NS1} sysctl -wq net.ipv4.conf.all.rp_filter=0
- ip netns exec ${NS2} sysctl -wq net.ipv4.conf.all.rp_filter=0
- ip netns exec ${NS3} sysctl -wq net.ipv4.conf.all.rp_filter=0
-
TMPFILE=$(mktemp /tmp/test_lwt_ip_encap.XXXXXX)
sleep 1 # reduce flakiness
diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
index beee0d5646a6..f7d84549cc3e 100755
--- a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
+++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
@@ -1,6 +1,6 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-# Copyright 2020 NXP Semiconductors
+# Copyright 2020 NXP
WAIT_TIME=1
NUM_NETIFS=4
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index fdd13fbdcc8f..d4a830139683 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+/aarch64/arch_timer
/aarch64/debug-exceptions
/aarch64/get-reg-list
/aarch64/psci_cpu_on_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index fd20f271aac0..c23e89dea0b6 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -35,7 +35,7 @@ endif
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/rbtree.c lib/sparsebit.c lib/test_util.c lib/guest_modes.c lib/perf_test_util.c
LIBKVM_x86_64 = lib/x86_64/apic.c lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c lib/x86_64/handlers.S
-LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S
+LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c lib/aarch64/handlers.S lib/aarch64/spinlock.c lib/aarch64/gic.c lib/aarch64/gic_v3.c lib/aarch64/vgic.c
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c lib/s390x/diag318_test_handler.c
TEST_GEN_PROGS_x86_64 = x86_64/cr4_cpuid_sync_test
@@ -88,6 +88,7 @@ TEST_GEN_PROGS_x86_64 += steal_time
TEST_GEN_PROGS_x86_64 += kvm_binary_stats_test
TEST_GEN_PROGS_x86_64 += system_counter_offset_test
+TEST_GEN_PROGS_aarch64 += aarch64/arch_timer
TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
TEST_GEN_PROGS_aarch64 += aarch64/get-reg-list
TEST_GEN_PROGS_aarch64 += aarch64/psci_cpu_on_test
@@ -97,6 +98,8 @@ TEST_GEN_PROGS_aarch64 += dirty_log_test
TEST_GEN_PROGS_aarch64 += dirty_log_perf_test
TEST_GEN_PROGS_aarch64 += kvm_create_max_vcpus
TEST_GEN_PROGS_aarch64 += kvm_page_table_test
+TEST_GEN_PROGS_aarch64 += memslot_modification_stress_test
+TEST_GEN_PROGS_aarch64 += memslot_perf_test
TEST_GEN_PROGS_aarch64 += rseq_test
TEST_GEN_PROGS_aarch64 += set_memory_region_test
TEST_GEN_PROGS_aarch64 += steal_time
diff --git a/tools/testing/selftests/kvm/aarch64/arch_timer.c b/tools/testing/selftests/kvm/aarch64/arch_timer.c
new file mode 100644
index 000000000000..bf6a45b0b8dc
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/arch_timer.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * arch_timer.c - Tests the aarch64 timer IRQ functionality
+ *
+ * The test validates both the virtual and physical timer IRQs using
+ * CVAL and TVAL registers. This consitutes the four stages in the test.
+ * The guest's main thread configures the timer interrupt for a stage
+ * and waits for it to fire, with a timeout equal to the timer period.
+ * It asserts that the timeout doesn't exceed the timer period.
+ *
+ * On the other hand, upon receipt of an interrupt, the guest's interrupt
+ * handler validates the interrupt by checking if the architectural state
+ * is in compliance with the specifications.
+ *
+ * The test provides command-line options to configure the timer's
+ * period (-p), number of vCPUs (-n), and iterations per stage (-i).
+ * To stress-test the timer stack even more, an option to migrate the
+ * vCPUs across pCPUs (-m), at a particular rate, is also provided.
+ *
+ * Copyright (c) 2021, Google LLC.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <linux/kvm.h>
+#include <linux/sizes.h>
+#include <linux/bitmap.h>
+#include <sys/sysinfo.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "delay.h"
+#include "arch_timer.h"
+#include "gic.h"
+#include "vgic.h"
+
+#define NR_VCPUS_DEF 4
+#define NR_TEST_ITERS_DEF 5
+#define TIMER_TEST_PERIOD_MS_DEF 10
+#define TIMER_TEST_ERR_MARGIN_US 100
+#define TIMER_TEST_MIGRATION_FREQ_MS 2
+
+struct test_args {
+ int nr_vcpus;
+ int nr_iter;
+ int timer_period_ms;
+ int migration_freq_ms;
+};
+
+static struct test_args test_args = {
+ .nr_vcpus = NR_VCPUS_DEF,
+ .nr_iter = NR_TEST_ITERS_DEF,
+ .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF,
+ .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS,
+};
+
+#define msecs_to_usecs(msec) ((msec) * 1000LL)
+
+#define GICD_BASE_GPA 0x8000000ULL
+#define GICR_BASE_GPA 0x80A0000ULL
+
+enum guest_stage {
+ GUEST_STAGE_VTIMER_CVAL = 1,
+ GUEST_STAGE_VTIMER_TVAL,
+ GUEST_STAGE_PTIMER_CVAL,
+ GUEST_STAGE_PTIMER_TVAL,
+ GUEST_STAGE_MAX,
+};
+
+/* Shared variables between host and guest */
+struct test_vcpu_shared_data {
+ int nr_iter;
+ enum guest_stage guest_stage;
+ uint64_t xcnt;
+};
+
+struct test_vcpu {
+ uint32_t vcpuid;
+ pthread_t pt_vcpu_run;
+ struct kvm_vm *vm;
+};
+
+static struct test_vcpu test_vcpu[KVM_MAX_VCPUS];
+static struct test_vcpu_shared_data vcpu_shared_data[KVM_MAX_VCPUS];
+
+static int vtimer_irq, ptimer_irq;
+
+static unsigned long *vcpu_done_map;
+static pthread_mutex_t vcpu_done_map_lock;
+
+static void
+guest_configure_timer_action(struct test_vcpu_shared_data *shared_data)
+{
+ switch (shared_data->guest_stage) {
+ case GUEST_STAGE_VTIMER_CVAL:
+ timer_set_next_cval_ms(VIRTUAL, test_args.timer_period_ms);
+ shared_data->xcnt = timer_get_cntct(VIRTUAL);
+ timer_set_ctl(VIRTUAL, CTL_ENABLE);
+ break;
+ case GUEST_STAGE_VTIMER_TVAL:
+ timer_set_next_tval_ms(VIRTUAL, test_args.timer_period_ms);
+ shared_data->xcnt = timer_get_cntct(VIRTUAL);
+ timer_set_ctl(VIRTUAL, CTL_ENABLE);
+ break;
+ case GUEST_STAGE_PTIMER_CVAL:
+ timer_set_next_cval_ms(PHYSICAL, test_args.timer_period_ms);
+ shared_data->xcnt = timer_get_cntct(PHYSICAL);
+ timer_set_ctl(PHYSICAL, CTL_ENABLE);
+ break;
+ case GUEST_STAGE_PTIMER_TVAL:
+ timer_set_next_tval_ms(PHYSICAL, test_args.timer_period_ms);
+ shared_data->xcnt = timer_get_cntct(PHYSICAL);
+ timer_set_ctl(PHYSICAL, CTL_ENABLE);
+ break;
+ default:
+ GUEST_ASSERT(0);
+ }
+}
+
+static void guest_validate_irq(unsigned int intid,
+ struct test_vcpu_shared_data *shared_data)
+{
+ enum guest_stage stage = shared_data->guest_stage;
+ uint64_t xcnt = 0, xcnt_diff_us, cval = 0;
+ unsigned long xctl = 0;
+ unsigned int timer_irq = 0;
+
+ if (stage == GUEST_STAGE_VTIMER_CVAL ||
+ stage == GUEST_STAGE_VTIMER_TVAL) {
+ xctl = timer_get_ctl(VIRTUAL);
+ timer_set_ctl(VIRTUAL, CTL_IMASK);
+ xcnt = timer_get_cntct(VIRTUAL);
+ cval = timer_get_cval(VIRTUAL);
+ timer_irq = vtimer_irq;
+ } else if (stage == GUEST_STAGE_PTIMER_CVAL ||
+ stage == GUEST_STAGE_PTIMER_TVAL) {
+ xctl = timer_get_ctl(PHYSICAL);
+ timer_set_ctl(PHYSICAL, CTL_IMASK);
+ xcnt = timer_get_cntct(PHYSICAL);
+ cval = timer_get_cval(PHYSICAL);
+ timer_irq = ptimer_irq;
+ } else {
+ GUEST_ASSERT(0);
+ }
+
+ xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt);
+
+ /* Make sure we are dealing with the correct timer IRQ */
+ GUEST_ASSERT_2(intid == timer_irq, intid, timer_irq);
+
+ /* Basic 'timer condition met' check */
+ GUEST_ASSERT_3(xcnt >= cval, xcnt, cval, xcnt_diff_us);
+ GUEST_ASSERT_1(xctl & CTL_ISTATUS, xctl);
+}
+
+static void guest_irq_handler(struct ex_regs *regs)
+{
+ unsigned int intid = gic_get_and_ack_irq();
+ uint32_t cpu = guest_get_vcpuid();
+ struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
+
+ guest_validate_irq(intid, shared_data);
+
+ WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1);
+
+ gic_set_eoi(intid);
+}
+
+static void guest_run_stage(struct test_vcpu_shared_data *shared_data,
+ enum guest_stage stage)
+{
+ uint32_t irq_iter, config_iter;
+
+ shared_data->guest_stage = stage;
+ shared_data->nr_iter = 0;
+
+ for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) {
+ /* Setup the next interrupt */
+ guest_configure_timer_action(shared_data);
+
+ /* Setup a timeout for the interrupt to arrive */
+ udelay(msecs_to_usecs(test_args.timer_period_ms) +
+ TIMER_TEST_ERR_MARGIN_US);
+
+ irq_iter = READ_ONCE(shared_data->nr_iter);
+ GUEST_ASSERT_2(config_iter + 1 == irq_iter,
+ config_iter + 1, irq_iter);
+ }
+}
+
+static void guest_code(void)
+{
+ uint32_t cpu = guest_get_vcpuid();
+ struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu];
+
+ local_irq_disable();
+
+ gic_init(GIC_V3, test_args.nr_vcpus,
+ (void *)GICD_BASE_GPA, (void *)GICR_BASE_GPA);
+
+ timer_set_ctl(VIRTUAL, CTL_IMASK);
+ timer_set_ctl(PHYSICAL, CTL_IMASK);
+
+ gic_irq_enable(vtimer_irq);
+ gic_irq_enable(ptimer_irq);
+ local_irq_enable();
+
+ guest_run_stage(shared_data, GUEST_STAGE_VTIMER_CVAL);
+ guest_run_stage(shared_data, GUEST_STAGE_VTIMER_TVAL);
+ guest_run_stage(shared_data, GUEST_STAGE_PTIMER_CVAL);
+ guest_run_stage(shared_data, GUEST_STAGE_PTIMER_TVAL);
+
+ GUEST_DONE();
+}
+
+static void *test_vcpu_run(void *arg)
+{
+ struct ucall uc;
+ struct test_vcpu *vcpu = arg;
+ struct kvm_vm *vm = vcpu->vm;
+ uint32_t vcpuid = vcpu->vcpuid;
+ struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[vcpuid];
+
+ vcpu_run(vm, vcpuid);
+
+ /* Currently, any exit from guest is an indication of completion */
+ pthread_mutex_lock(&vcpu_done_map_lock);
+ set_bit(vcpuid, vcpu_done_map);
+ pthread_mutex_unlock(&vcpu_done_map_lock);
+
+ switch (get_ucall(vm, vcpuid, &uc)) {
+ case UCALL_SYNC:
+ case UCALL_DONE:
+ break;
+ case UCALL_ABORT:
+ sync_global_from_guest(vm, *shared_data);
+ TEST_FAIL("%s at %s:%ld\n\tvalues: %lu, %lu; %lu, vcpu: %u; stage: %u; iter: %u",
+ (const char *)uc.args[0], __FILE__, uc.args[1],
+ uc.args[2], uc.args[3], uc.args[4], vcpuid,
+ shared_data->guest_stage, shared_data->nr_iter);
+ break;
+ default:
+ TEST_FAIL("Unexpected guest exit\n");
+ }
+
+ return NULL;
+}
+
+static uint32_t test_get_pcpu(void)
+{
+ uint32_t pcpu;
+ unsigned int nproc_conf;
+ cpu_set_t online_cpuset;
+
+ nproc_conf = get_nprocs_conf();
+ sched_getaffinity(0, sizeof(cpu_set_t), &online_cpuset);
+
+ /* Randomly find an available pCPU to place a vCPU on */
+ do {
+ pcpu = rand() % nproc_conf;
+ } while (!CPU_ISSET(pcpu, &online_cpuset));
+
+ return pcpu;
+}
+
+static int test_migrate_vcpu(struct test_vcpu *vcpu)
+{
+ int ret;
+ cpu_set_t cpuset;
+ uint32_t new_pcpu = test_get_pcpu();
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(new_pcpu, &cpuset);
+
+ pr_debug("Migrating vCPU: %u to pCPU: %u\n", vcpu->vcpuid, new_pcpu);
+
+ ret = pthread_setaffinity_np(vcpu->pt_vcpu_run,
+ sizeof(cpuset), &cpuset);
+
+ /* Allow the error where the vCPU thread is already finished */
+ TEST_ASSERT(ret == 0 || ret == ESRCH,
+ "Failed to migrate the vCPU:%u to pCPU: %u; ret: %d\n",
+ vcpu->vcpuid, new_pcpu, ret);
+
+ return ret;
+}
+
+static void *test_vcpu_migration(void *arg)
+{
+ unsigned int i, n_done;
+ bool vcpu_done;
+
+ do {
+ usleep(msecs_to_usecs(test_args.migration_freq_ms));
+
+ for (n_done = 0, i = 0; i < test_args.nr_vcpus; i++) {
+ pthread_mutex_lock(&vcpu_done_map_lock);
+ vcpu_done = test_bit(i, vcpu_done_map);
+ pthread_mutex_unlock(&vcpu_done_map_lock);
+
+ if (vcpu_done) {
+ n_done++;
+ continue;
+ }
+
+ test_migrate_vcpu(&test_vcpu[i]);
+ }
+ } while (test_args.nr_vcpus != n_done);
+
+ return NULL;
+}
+
+static void test_run(struct kvm_vm *vm)
+{
+ int i, ret;
+ pthread_t pt_vcpu_migration;
+
+ pthread_mutex_init(&vcpu_done_map_lock, NULL);
+ vcpu_done_map = bitmap_zalloc(test_args.nr_vcpus);
+ TEST_ASSERT(vcpu_done_map, "Failed to allocate vcpu done bitmap\n");
+
+ for (i = 0; i < test_args.nr_vcpus; i++) {
+ ret = pthread_create(&test_vcpu[i].pt_vcpu_run, NULL,
+ test_vcpu_run, &test_vcpu[i]);
+ TEST_ASSERT(!ret, "Failed to create vCPU-%d pthread\n", i);
+ }
+
+ /* Spawn a thread to control the vCPU migrations */
+ if (test_args.migration_freq_ms) {
+ srand(time(NULL));
+
+ ret = pthread_create(&pt_vcpu_migration, NULL,
+ test_vcpu_migration, NULL);
+ TEST_ASSERT(!ret, "Failed to create the migration pthread\n");
+ }
+
+
+ for (i = 0; i < test_args.nr_vcpus; i++)
+ pthread_join(test_vcpu[i].pt_vcpu_run, NULL);
+
+ if (test_args.migration_freq_ms)
+ pthread_join(pt_vcpu_migration, NULL);
+
+ bitmap_free(vcpu_done_map);
+}
+
+static void test_init_timer_irq(struct kvm_vm *vm)
+{
+ /* Timer initid should be same for all the vCPUs, so query only vCPU-0 */
+ int vcpu0_fd = vcpu_get_fd(vm, 0);
+
+ kvm_device_access(vcpu0_fd, KVM_ARM_VCPU_TIMER_CTRL,
+ KVM_ARM_VCPU_TIMER_IRQ_PTIMER, &ptimer_irq, false);
+ kvm_device_access(vcpu0_fd, KVM_ARM_VCPU_TIMER_CTRL,
+ KVM_ARM_VCPU_TIMER_IRQ_VTIMER, &vtimer_irq, false);
+
+ sync_global_to_guest(vm, ptimer_irq);
+ sync_global_to_guest(vm, vtimer_irq);
+
+ pr_debug("ptimer_irq: %d; vtimer_irq: %d\n", ptimer_irq, vtimer_irq);
+}
+
+static struct kvm_vm *test_vm_create(void)
+{
+ struct kvm_vm *vm;
+ unsigned int i;
+ int nr_vcpus = test_args.nr_vcpus;
+
+ vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
+
+ vm_init_descriptor_tables(vm);
+ vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler);
+
+ for (i = 0; i < nr_vcpus; i++) {
+ vcpu_init_descriptor_tables(vm, i);
+
+ test_vcpu[i].vcpuid = i;
+ test_vcpu[i].vm = vm;
+ }
+
+ ucall_init(vm, NULL);
+ test_init_timer_irq(vm);
+ vgic_v3_setup(vm, nr_vcpus, GICD_BASE_GPA, GICR_BASE_GPA);
+
+ /* Make all the test's cmdline args visible to the guest */
+ sync_global_to_guest(vm, test_args);
+
+ return vm;
+}
+
+static void test_print_help(char *name)
+{
+ pr_info("Usage: %s [-h] [-n nr_vcpus] [-i iterations] [-p timer_period_ms]\n",
+ name);
+ pr_info("\t-n: Number of vCPUs to configure (default: %u; max: %u)\n",
+ NR_VCPUS_DEF, KVM_MAX_VCPUS);
+ pr_info("\t-i: Number of iterations per stage (default: %u)\n",
+ NR_TEST_ITERS_DEF);
+ pr_info("\t-p: Periodicity (in ms) of the guest timer (default: %u)\n",
+ TIMER_TEST_PERIOD_MS_DEF);
+ pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n",
+ TIMER_TEST_MIGRATION_FREQ_MS);
+ pr_info("\t-h: print this help screen\n");
+}
+
+static bool parse_args(int argc, char *argv[])
+{
+ int opt;
+
+ while ((opt = getopt(argc, argv, "hn:i:p:m:")) != -1) {
+ switch (opt) {
+ case 'n':
+ test_args.nr_vcpus = atoi(optarg);
+ if (test_args.nr_vcpus <= 0) {
+ pr_info("Positive value needed for -n\n");
+ goto err;
+ } else if (test_args.nr_vcpus > KVM_MAX_VCPUS) {
+ pr_info("Max allowed vCPUs: %u\n",
+ KVM_MAX_VCPUS);
+ goto err;
+ }
+ break;
+ case 'i':
+ test_args.nr_iter = atoi(optarg);
+ if (test_args.nr_iter <= 0) {
+ pr_info("Positive value needed for -i\n");
+ goto err;
+ }
+ break;
+ case 'p':
+ test_args.timer_period_ms = atoi(optarg);
+ if (test_args.timer_period_ms <= 0) {
+ pr_info("Positive value needed for -p\n");
+ goto err;
+ }
+ break;
+ case 'm':
+ test_args.migration_freq_ms = atoi(optarg);
+ if (test_args.migration_freq_ms < 0) {
+ pr_info("0 or positive value needed for -m\n");
+ goto err;
+ }
+ break;
+ case 'h':
+ default:
+ goto err;
+ }
+ }
+
+ return true;
+
+err:
+ test_print_help(argv[0]);
+ return false;
+}
+
+int main(int argc, char *argv[])
+{
+ struct kvm_vm *vm;
+
+ /* Tell stdout not to buffer its content */
+ setbuf(stdout, NULL);
+
+ if (!parse_args(argc, argv))
+ exit(KSFT_SKIP);
+
+ if (test_args.migration_freq_ms && get_nprocs() < 2) {
+ print_skip("At least two physical CPUs needed for vCPU migration");
+ exit(KSFT_SKIP);
+ }
+
+ vm = test_vm_create();
+ test_run(vm);
+ kvm_vm_free(vm);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
index e5e6c92b60da..ea189d83abf7 100644
--- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
+++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c
@@ -34,16 +34,16 @@ static void reset_debug_state(void)
{
asm volatile("msr daifset, #8");
- write_sysreg(osdlr_el1, 0);
- write_sysreg(oslar_el1, 0);
+ write_sysreg(0, osdlr_el1);
+ write_sysreg(0, oslar_el1);
isb();
- write_sysreg(mdscr_el1, 0);
+ write_sysreg(0, mdscr_el1);
/* This test only uses the first bp and wp slot. */
- write_sysreg(dbgbvr0_el1, 0);
- write_sysreg(dbgbcr0_el1, 0);
- write_sysreg(dbgwcr0_el1, 0);
- write_sysreg(dbgwvr0_el1, 0);
+ write_sysreg(0, dbgbvr0_el1);
+ write_sysreg(0, dbgbcr0_el1);
+ write_sysreg(0, dbgwcr0_el1);
+ write_sysreg(0, dbgwvr0_el1);
isb();
}
@@ -53,14 +53,14 @@ static void install_wp(uint64_t addr)
uint32_t mdscr;
wcr = DBGWCR_LEN8 | DBGWCR_RD | DBGWCR_WR | DBGWCR_EL1 | DBGWCR_E;
- write_sysreg(dbgwcr0_el1, wcr);
- write_sysreg(dbgwvr0_el1, addr);
+ write_sysreg(wcr, dbgwcr0_el1);
+ write_sysreg(addr, dbgwvr0_el1);
isb();
asm volatile("msr daifclr, #8");
mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
- write_sysreg(mdscr_el1, mdscr);
+ write_sysreg(mdscr, mdscr_el1);
isb();
}
@@ -70,14 +70,14 @@ static void install_hw_bp(uint64_t addr)
uint32_t mdscr;
bcr = DBGBCR_LEN8 | DBGBCR_EXEC | DBGBCR_EL1 | DBGBCR_E;
- write_sysreg(dbgbcr0_el1, bcr);
- write_sysreg(dbgbvr0_el1, addr);
+ write_sysreg(bcr, dbgbcr0_el1);
+ write_sysreg(addr, dbgbvr0_el1);
isb();
asm volatile("msr daifclr, #8");
mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_MDE;
- write_sysreg(mdscr_el1, mdscr);
+ write_sysreg(mdscr, mdscr_el1);
isb();
}
@@ -88,7 +88,7 @@ static void install_ss(void)
asm volatile("msr daifclr, #8");
mdscr = read_sysreg(mdscr_el1) | MDSCR_KDE | MDSCR_SS;
- write_sysreg(mdscr_el1, mdscr);
+ write_sysreg(mdscr, mdscr_el1);
isb();
}
@@ -190,7 +190,7 @@ static int debug_version(struct kvm_vm *vm)
{
uint64_t id_aa64dfr0;
- get_reg(vm, VCPU_ID, ARM64_SYS_REG(ID_AA64DFR0_EL1), &id_aa64dfr0);
+ get_reg(vm, VCPU_ID, KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &id_aa64dfr0);
return id_aa64dfr0 & 0xf;
}
diff --git a/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c b/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c
index 018c269990e1..4c5f6814030f 100644
--- a/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c
+++ b/tools/testing/selftests/kvm/aarch64/psci_cpu_on_test.c
@@ -91,7 +91,7 @@ int main(void)
init.features[0] |= (1 << KVM_ARM_VCPU_POWER_OFF);
aarch64_vcpu_add_default(vm, VCPU_ID_TARGET, &init, guest_main);
- get_reg(vm, VCPU_ID_TARGET, ARM64_SYS_REG(MPIDR_EL1), &target_mpidr);
+ get_reg(vm, VCPU_ID_TARGET, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), &target_mpidr);
vcpu_args_set(vm, VCPU_ID_SOURCE, 1, target_mpidr & MPIDR_HWID_BITMASK);
vcpu_run(vm, VCPU_ID_SOURCE);
diff --git a/tools/testing/selftests/kvm/aarch64/vgic_init.c b/tools/testing/selftests/kvm/aarch64/vgic_init.c
index 623f31a14326..34379c98d2f4 100644
--- a/tools/testing/selftests/kvm/aarch64/vgic_init.c
+++ b/tools/testing/selftests/kvm/aarch64/vgic_init.c
@@ -13,25 +13,28 @@
#include "test_util.h"
#include "kvm_util.h"
#include "processor.h"
+#include "vgic.h"
#define NR_VCPUS 4
-#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) (((uint64_t)(count) << 52) | \
- ((uint64_t)((base) >> 16) << 16) | ((uint64_t)(flags) << 12) | index)
#define REG_OFFSET(vcpu, offset) (((uint64_t)vcpu << 32) | offset)
#define GICR_TYPER 0x8
+#define VGIC_DEV_IS_V2(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V2)
+#define VGIC_DEV_IS_V3(_d) ((_d) == KVM_DEV_TYPE_ARM_VGIC_V3)
+
struct vm_gic {
struct kvm_vm *vm;
int gic_fd;
+ uint32_t gic_dev_type;
};
-static int max_ipa_bits;
+static uint64_t max_phys_size;
/* helper to access a redistributor register */
-static int access_redist_reg(int gicv3_fd, int vcpu, int offset,
- uint32_t *val, bool write)
+static int access_v3_redist_reg(int gicv3_fd, int vcpu, int offset,
+ uint32_t *val, bool write)
{
uint64_t attr = REG_OFFSET(vcpu, offset);
@@ -58,12 +61,13 @@ static int run_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
return 0;
}
-static struct vm_gic vm_gic_create(void)
+static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type, uint32_t nr_vcpus)
{
struct vm_gic v;
- v.vm = vm_create_default_with_vcpus(NR_VCPUS, 0, 0, guest_code, NULL);
- v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+ v.gic_dev_type = gic_dev_type;
+ v.vm = vm_create_default_with_vcpus(nr_vcpus, 0, 0, guest_code, NULL);
+ v.gic_fd = kvm_create_device(v.vm, gic_dev_type, false);
return v;
}
@@ -74,78 +78,129 @@ static void vm_gic_destroy(struct vm_gic *v)
kvm_vm_free(v->vm);
}
+struct vgic_region_attr {
+ uint64_t attr;
+ uint64_t size;
+ uint64_t alignment;
+};
+
+struct vgic_region_attr gic_v3_dist_region = {
+ .attr = KVM_VGIC_V3_ADDR_TYPE_DIST,
+ .size = 0x10000,
+ .alignment = 0x10000,
+};
+
+struct vgic_region_attr gic_v3_redist_region = {
+ .attr = KVM_VGIC_V3_ADDR_TYPE_REDIST,
+ .size = NR_VCPUS * 0x20000,
+ .alignment = 0x10000,
+};
+
+struct vgic_region_attr gic_v2_dist_region = {
+ .attr = KVM_VGIC_V2_ADDR_TYPE_DIST,
+ .size = 0x1000,
+ .alignment = 0x1000,
+};
+
+struct vgic_region_attr gic_v2_cpu_region = {
+ .attr = KVM_VGIC_V2_ADDR_TYPE_CPU,
+ .size = 0x2000,
+ .alignment = 0x1000,
+};
+
/**
- * Helper routine that performs KVM device tests in general and
- * especially ARM_VGIC_V3 ones. Eventually the ARM_VGIC_V3
- * device gets created, a legacy RDIST region is set at @0x0
- * and a DIST region is set @0x60000
+ * Helper routine that performs KVM device tests in general. Eventually the
+ * ARM_VGIC (GICv2 or GICv3) device gets created with an overlapping
+ * DIST/REDIST (or DIST/CPUIF for GICv2). Assumption is 4 vcpus are going to be
+ * used hence the overlap. In the case of GICv3, A RDIST region is set at @0x0
+ * and a DIST region is set @0x70000. The GICv2 case sets a CPUIF @0x0 and a
+ * DIST region @0x1000.
*/
static void subtest_dist_rdist(struct vm_gic *v)
{
int ret;
uint64_t addr;
+ struct vgic_region_attr rdist; /* CPU interface in GICv2*/
+ struct vgic_region_attr dist;
+
+ rdist = VGIC_DEV_IS_V3(v->gic_dev_type) ? gic_v3_redist_region
+ : gic_v2_cpu_region;
+ dist = VGIC_DEV_IS_V3(v->gic_dev_type) ? gic_v3_dist_region
+ : gic_v2_dist_region;
/* Check existing group/attributes */
kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_DIST);
+ dist.attr);
kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST);
+ rdist.attr);
/* check non existing attribute */
- ret = _kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, 0);
+ ret = _kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, -1);
TEST_ASSERT(ret && errno == ENXIO, "attribute not supported");
/* misaligned DIST and REDIST address settings */
- addr = 0x1000;
+ addr = dist.alignment / 0x10;
ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true);
- TEST_ASSERT(ret && errno == EINVAL, "GICv3 dist base not 64kB aligned");
+ dist.attr, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "GIC dist base not aligned");
+ addr = rdist.alignment / 0x10;
ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
- TEST_ASSERT(ret && errno == EINVAL, "GICv3 redist base not 64kB aligned");
+ rdist.attr, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL, "GIC redist/cpu base not aligned");
/* out of range address */
- if (max_ipa_bits) {
- addr = 1ULL << max_ipa_bits;
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true);
- TEST_ASSERT(ret && errno == E2BIG, "dist address beyond IPA limit");
+ addr = max_phys_size;
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ dist.attr, &addr, true);
+ TEST_ASSERT(ret && errno == E2BIG, "dist address beyond IPA limit");
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
- TEST_ASSERT(ret && errno == E2BIG, "redist address beyond IPA limit");
- }
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ rdist.attr, &addr, true);
+ TEST_ASSERT(ret && errno == E2BIG, "redist address beyond IPA limit");
+
+ /* Space for half a rdist (a rdist is: 2 * rdist.alignment). */
+ addr = max_phys_size - dist.alignment;
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ rdist.attr, &addr, true);
+ TEST_ASSERT(ret && errno == E2BIG,
+ "half of the redist is beyond IPA limit");
/* set REDIST base address @0x0*/
addr = 0x00000;
kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
+ rdist.attr, &addr, true);
/* Attempt to create a second legacy redistributor region */
addr = 0xE0000;
ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
- TEST_ASSERT(ret && errno == EEXIST, "GICv3 redist base set again");
+ rdist.attr, &addr, true);
+ TEST_ASSERT(ret && errno == EEXIST, "GIC redist base set again");
- /* Attempt to mix legacy and new redistributor regions */
- addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 0, 0);
- ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
- KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
- TEST_ASSERT(ret && errno == EINVAL, "attempt to mix GICv3 REDIST and REDIST_REGION");
+ ret = _kvm_device_check_attr(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST);
+ if (!ret) {
+ /* Attempt to mix legacy and new redistributor regions */
+ addr = REDIST_REGION_ATTR_ADDR(NR_VCPUS, 0x100000, 0, 0);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION,
+ &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "attempt to mix GICv3 REDIST and REDIST_REGION");
+ }
/*
* Set overlapping DIST / REDIST, cannot be detected here. Will be detected
* on first vcpu run instead.
*/
- addr = 3 * 2 * 0x10000;
- kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, KVM_VGIC_V3_ADDR_TYPE_DIST,
- &addr, true);
+ addr = rdist.size - rdist.alignment;
+ kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ dist.attr, &addr, true);
}
/* Test the new REDIST region API */
-static void subtest_redist_regions(struct vm_gic *v)
+static void subtest_v3_redist_regions(struct vm_gic *v)
{
uint64_t addr, expected_addr;
int ret;
@@ -199,12 +254,19 @@ static void subtest_redist_regions(struct vm_gic *v)
kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
- addr = REDIST_REGION_ATTR_ADDR(1, 1ULL << max_ipa_bits, 0, 2);
+ addr = REDIST_REGION_ATTR_ADDR(1, max_phys_size, 0, 2);
ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
TEST_ASSERT(ret && errno == E2BIG,
"register redist region with base address beyond IPA range");
+ /* The last redist is above the pa range. */
+ addr = REDIST_REGION_ATTR_ADDR(2, max_phys_size - 0x30000, 0, 2);
+ ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
+ TEST_ASSERT(ret && errno == E2BIG,
+ "register redist region with top address beyond IPA range");
+
addr = 0x260000;
ret = _kvm_device_access(v->gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
@@ -249,13 +311,12 @@ static void subtest_redist_regions(struct vm_gic *v)
* VGIC KVM device is created and initialized before the secondary CPUs
* get created
*/
-static void test_vgic_then_vcpus(void)
+static void test_vgic_then_vcpus(uint32_t gic_dev_type)
{
struct vm_gic v;
int ret, i;
- v.vm = vm_create_default(0, 0, guest_code);
- v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+ v = vm_gic_create_with_vcpus(gic_dev_type, 1);
subtest_dist_rdist(&v);
@@ -270,12 +331,12 @@ static void test_vgic_then_vcpus(void)
}
/* All the VCPUs are created before the VGIC KVM device gets initialized */
-static void test_vcpus_then_vgic(void)
+static void test_vcpus_then_vgic(uint32_t gic_dev_type)
{
struct vm_gic v;
int ret;
- v = vm_gic_create();
+ v = vm_gic_create_with_vcpus(gic_dev_type, NR_VCPUS);
subtest_dist_rdist(&v);
@@ -285,15 +346,15 @@ static void test_vcpus_then_vgic(void)
vm_gic_destroy(&v);
}
-static void test_new_redist_regions(void)
+static void test_v3_new_redist_regions(void)
{
void *dummy = NULL;
struct vm_gic v;
uint64_t addr;
int ret;
- v = vm_gic_create();
- subtest_redist_regions(&v);
+ v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS);
+ subtest_v3_redist_regions(&v);
kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
@@ -303,8 +364,8 @@ static void test_new_redist_regions(void)
/* step2 */
- v = vm_gic_create();
- subtest_redist_regions(&v);
+ v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS);
+ subtest_v3_redist_regions(&v);
addr = REDIST_REGION_ATTR_ADDR(1, 0x280000, 0, 2);
kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
@@ -317,8 +378,8 @@ static void test_new_redist_regions(void)
/* step 3 */
- v = vm_gic_create();
- subtest_redist_regions(&v);
+ v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS);
+ subtest_v3_redist_regions(&v);
_kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, dummy, true);
@@ -338,7 +399,7 @@ static void test_new_redist_regions(void)
vm_gic_destroy(&v);
}
-static void test_typer_accesses(void)
+static void test_v3_typer_accesses(void)
{
struct vm_gic v;
uint64_t addr;
@@ -351,12 +412,12 @@ static void test_typer_accesses(void)
vm_vcpu_add_default(v.vm, 3, guest_code);
- ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
TEST_ASSERT(ret && errno == EINVAL, "attempting to read GICR_TYPER of non created vcpu");
vm_vcpu_add_default(v.vm, 1, guest_code);
- ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
TEST_ASSERT(ret && errno == EBUSY, "read GICR_TYPER before GIC initialized");
vm_vcpu_add_default(v.vm, 2, guest_code);
@@ -365,7 +426,7 @@ static void test_typer_accesses(void)
KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
for (i = 0; i < NR_VCPUS ; i++) {
- ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && !val, "read GICR_TYPER before rdist region setting");
}
@@ -374,10 +435,10 @@ static void test_typer_accesses(void)
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
/* The 2 first rdists should be put there (vcpu 0 and 3) */
- ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && !val, "read typer of rdist #0");
- ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x310, "read typer of rdist #1");
addr = REDIST_REGION_ATTR_ADDR(10, 0x100000, 0, 1);
@@ -385,11 +446,11 @@ static void test_typer_accesses(void)
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
TEST_ASSERT(ret && errno == EINVAL, "collision with previous rdist region");
- ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x100,
"no redist region attached to vcpu #1 yet, last cannot be returned");
- ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x200,
"no redist region attached to vcpu #2, last cannot be returned");
@@ -397,10 +458,10 @@ static void test_typer_accesses(void)
kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
- ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #1");
- ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x210,
"read typer of rdist #1, last properly returned");
@@ -417,7 +478,7 @@ static void test_typer_accesses(void)
* rdist region #2 @0x200000 2 rdist capacity
* rdists: 1, 2
*/
-static void test_last_bit_redist_regions(void)
+static void test_v3_last_bit_redist_regions(void)
{
uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
struct vm_gic v;
@@ -444,29 +505,29 @@ static void test_last_bit_redist_regions(void)
kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &addr, true);
- ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x000, "read typer of rdist #0");
- ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #1");
- ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x200, "read typer of rdist #2");
- ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x310, "read typer of rdist #3");
- ret = access_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x500, "read typer of rdist #5");
- ret = access_redist_reg(v.gic_fd, 4, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 4, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x410, "read typer of rdist #4");
vm_gic_destroy(&v);
}
/* Test last bit with legacy region */
-static void test_last_bit_single_rdist(void)
+static void test_v3_last_bit_single_rdist(void)
{
uint32_t vcpuids[] = { 0, 3, 5, 4, 1, 2 };
struct vm_gic v;
@@ -485,28 +546,106 @@ static void test_last_bit_single_rdist(void)
kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
- ret = access_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 0, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x000, "read typer of rdist #0");
- ret = access_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 3, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x300, "read typer of rdist #1");
- ret = access_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 5, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x500, "read typer of rdist #2");
- ret = access_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 1, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x100, "read typer of rdist #3");
- ret = access_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
+ ret = access_v3_redist_reg(v.gic_fd, 2, GICR_TYPER, &val, false);
TEST_ASSERT(!ret && val == 0x210, "read typer of rdist #3");
vm_gic_destroy(&v);
}
-void test_kvm_device(void)
+/* Uses the legacy REDIST region API. */
+static void test_v3_redist_ipa_range_check_at_vcpu_run(void)
+{
+ struct vm_gic v;
+ int ret, i;
+ uint64_t addr;
+
+ v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, 1);
+
+ /* Set space for 3 redists, we have 1 vcpu, so this succeeds. */
+ addr = max_phys_size - (3 * 2 * 0x10000);
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST, &addr, true);
+
+ addr = 0x00000;
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_DIST, &addr, true);
+
+ /* Add the rest of the VCPUs */
+ for (i = 1; i < NR_VCPUS; ++i)
+ vm_vcpu_add_default(v.vm, i, guest_code);
+
+ kvm_device_access(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+
+ /* Attempt to run a vcpu without enough redist space. */
+ ret = run_vcpu(v.vm, 2);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "redist base+size above PA range detected on 1st vcpu run");
+
+ vm_gic_destroy(&v);
+}
+
+static void test_v3_its_region(void)
+{
+ struct vm_gic v;
+ uint64_t addr;
+ int its_fd, ret;
+
+ v = vm_gic_create_with_vcpus(KVM_DEV_TYPE_ARM_VGIC_V3, NR_VCPUS);
+ its_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_ITS, false);
+
+ addr = 0x401000;
+ ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, &addr, true);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "ITS region with misaligned address");
+
+ addr = max_phys_size;
+ ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, &addr, true);
+ TEST_ASSERT(ret && errno == E2BIG,
+ "register ITS region with base address beyond IPA range");
+
+ addr = max_phys_size - 0x10000;
+ ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, &addr, true);
+ TEST_ASSERT(ret && errno == E2BIG,
+ "Half of ITS region is beyond IPA range");
+
+ /* This one succeeds setting the ITS base */
+ addr = 0x400000;
+ kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, &addr, true);
+
+ addr = 0x300000;
+ ret = _kvm_device_access(its_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_ITS_ADDR_TYPE, &addr, true);
+ TEST_ASSERT(ret && errno == EEXIST, "ITS base set again");
+
+ close(its_fd);
+ vm_gic_destroy(&v);
+}
+
+/*
+ * Returns 0 if it's possible to create GIC device of a given type (V2 or V3).
+ */
+int test_kvm_device(uint32_t gic_dev_type)
{
struct vm_gic v;
int ret, fd;
+ uint32_t other;
v.vm = vm_create_default_with_vcpus(NR_VCPUS, 0, 0, guest_code, NULL);
@@ -514,38 +653,70 @@ void test_kvm_device(void)
ret = _kvm_create_device(v.vm, 0, true, &fd);
TEST_ASSERT(ret && errno == ENODEV, "unsupported device");
- /* trial mode with VGIC_V3 device */
- ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, true, &fd);
- if (ret) {
- print_skip("GICv3 not supported");
- exit(KSFT_SKIP);
- }
- v.gic_fd = kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+ /* trial mode */
+ ret = _kvm_create_device(v.vm, gic_dev_type, true, &fd);
+ if (ret)
+ return ret;
+ v.gic_fd = kvm_create_device(v.vm, gic_dev_type, false);
+
+ ret = _kvm_create_device(v.vm, gic_dev_type, false, &fd);
+ TEST_ASSERT(ret && errno == EEXIST, "create GIC device twice");
- ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, false, &fd);
- TEST_ASSERT(ret && errno == EEXIST, "create GICv3 device twice");
+ kvm_create_device(v.vm, gic_dev_type, true);
- kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V3, true);
+ /* try to create the other gic_dev_type */
+ other = VGIC_DEV_IS_V2(gic_dev_type) ? KVM_DEV_TYPE_ARM_VGIC_V3
+ : KVM_DEV_TYPE_ARM_VGIC_V2;
- if (!_kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V2, true, &fd)) {
- ret = _kvm_create_device(v.vm, KVM_DEV_TYPE_ARM_VGIC_V2, false, &fd);
- TEST_ASSERT(ret && errno == EINVAL, "create GICv2 while v3 exists");
+ if (!_kvm_create_device(v.vm, other, true, &fd)) {
+ ret = _kvm_create_device(v.vm, other, false, &fd);
+ TEST_ASSERT(ret && errno == EINVAL,
+ "create GIC device while other version exists");
}
vm_gic_destroy(&v);
+
+ return 0;
+}
+
+void run_tests(uint32_t gic_dev_type)
+{
+ test_vcpus_then_vgic(gic_dev_type);
+ test_vgic_then_vcpus(gic_dev_type);
+
+ if (VGIC_DEV_IS_V3(gic_dev_type)) {
+ test_v3_new_redist_regions();
+ test_v3_typer_accesses();
+ test_v3_last_bit_redist_regions();
+ test_v3_last_bit_single_rdist();
+ test_v3_redist_ipa_range_check_at_vcpu_run();
+ test_v3_its_region();
+ }
}
int main(int ac, char **av)
{
- max_ipa_bits = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE);
+ int ret;
+ int pa_bits;
+
+ pa_bits = vm_guest_mode_params[VM_MODE_DEFAULT].pa_bits;
+ max_phys_size = 1ULL << pa_bits;
- test_kvm_device();
- test_vcpus_then_vgic();
- test_vgic_then_vcpus();
- test_new_redist_regions();
- test_typer_accesses();
- test_last_bit_redist_regions();
- test_last_bit_single_rdist();
+ ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V3);
+ if (!ret) {
+ pr_info("Running GIC_v3 tests.\n");
+ run_tests(KVM_DEV_TYPE_ARM_VGIC_V3);
+ return 0;
+ }
+
+ ret = test_kvm_device(KVM_DEV_TYPE_ARM_VGIC_V2);
+ if (!ret) {
+ pr_info("Running GIC_v2 tests.\n");
+ run_tests(KVM_DEV_TYPE_ARM_VGIC_V2);
+ return 0;
+ }
+ print_skip("No GICv2 nor GICv3 support");
+ exit(KSFT_SKIP);
return 0;
}
diff --git a/tools/testing/selftests/kvm/include/aarch64/arch_timer.h b/tools/testing/selftests/kvm/include/aarch64/arch_timer.h
new file mode 100644
index 000000000000..cb7c03de3a21
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/aarch64/arch_timer.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARM Generic Timer specific interface
+ */
+
+#ifndef SELFTEST_KVM_ARCH_TIMER_H
+#define SELFTEST_KVM_ARCH_TIMER_H
+
+#include "processor.h"
+
+enum arch_timer {
+ VIRTUAL,
+ PHYSICAL,
+};
+
+#define CTL_ENABLE (1 << 0)
+#define CTL_IMASK (1 << 1)
+#define CTL_ISTATUS (1 << 2)
+
+#define msec_to_cycles(msec) \
+ (timer_get_cntfrq() * (uint64_t)(msec) / 1000)
+
+#define usec_to_cycles(usec) \
+ (timer_get_cntfrq() * (uint64_t)(usec) / 1000000)
+
+#define cycles_to_usec(cycles) \
+ ((uint64_t)(cycles) * 1000000 / timer_get_cntfrq())
+
+static inline uint32_t timer_get_cntfrq(void)
+{
+ return read_sysreg(cntfrq_el0);
+}
+
+static inline uint64_t timer_get_cntct(enum arch_timer timer)
+{
+ isb();
+
+ switch (timer) {
+ case VIRTUAL:
+ return read_sysreg(cntvct_el0);
+ case PHYSICAL:
+ return read_sysreg(cntpct_el0);
+ default:
+ GUEST_ASSERT_1(0, timer);
+ }
+
+ /* We should not reach here */
+ return 0;
+}
+
+static inline void timer_set_cval(enum arch_timer timer, uint64_t cval)
+{
+ switch (timer) {
+ case VIRTUAL:
+ write_sysreg(cval, cntv_cval_el0);
+ break;
+ case PHYSICAL:
+ write_sysreg(cval, cntp_cval_el0);
+ break;
+ default:
+ GUEST_ASSERT_1(0, timer);
+ }
+
+ isb();
+}
+
+static inline uint64_t timer_get_cval(enum arch_timer timer)
+{
+ switch (timer) {
+ case VIRTUAL:
+ return read_sysreg(cntv_cval_el0);
+ case PHYSICAL:
+ return read_sysreg(cntp_cval_el0);
+ default:
+ GUEST_ASSERT_1(0, timer);
+ }
+
+ /* We should not reach here */
+ return 0;
+}
+
+static inline void timer_set_tval(enum arch_timer timer, uint32_t tval)
+{
+ switch (timer) {
+ case VIRTUAL:
+ write_sysreg(tval, cntv_tval_el0);
+ break;
+ case PHYSICAL:
+ write_sysreg(tval, cntp_tval_el0);
+ break;
+ default:
+ GUEST_ASSERT_1(0, timer);
+ }
+
+ isb();
+}
+
+static inline void timer_set_ctl(enum arch_timer timer, uint32_t ctl)
+{
+ switch (timer) {
+ case VIRTUAL:
+ write_sysreg(ctl, cntv_ctl_el0);
+ break;
+ case PHYSICAL:
+ write_sysreg(ctl, cntp_ctl_el0);
+ break;
+ default:
+ GUEST_ASSERT_1(0, timer);
+ }
+
+ isb();
+}
+
+static inline uint32_t timer_get_ctl(enum arch_timer timer)
+{
+ switch (timer) {
+ case VIRTUAL:
+ return read_sysreg(cntv_ctl_el0);
+ case PHYSICAL:
+ return read_sysreg(cntp_ctl_el0);
+ default:
+ GUEST_ASSERT_1(0, timer);
+ }
+
+ /* We should not reach here */
+ return 0;
+}
+
+static inline void timer_set_next_cval_ms(enum arch_timer timer, uint32_t msec)
+{
+ uint64_t now_ct = timer_get_cntct(timer);
+ uint64_t next_ct = now_ct + msec_to_cycles(msec);
+
+ timer_set_cval(timer, next_ct);
+}
+
+static inline void timer_set_next_tval_ms(enum arch_timer timer, uint32_t msec)
+{
+ timer_set_tval(timer, msec_to_cycles(msec));
+}
+
+#endif /* SELFTEST_KVM_ARCH_TIMER_H */
diff --git a/tools/testing/selftests/kvm/include/aarch64/delay.h b/tools/testing/selftests/kvm/include/aarch64/delay.h
new file mode 100644
index 000000000000..329e4f5079ea
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/aarch64/delay.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARM simple delay routines
+ */
+
+#ifndef SELFTEST_KVM_ARM_DELAY_H
+#define SELFTEST_KVM_ARM_DELAY_H
+
+#include "arch_timer.h"
+
+static inline void __delay(uint64_t cycles)
+{
+ enum arch_timer timer = VIRTUAL;
+ uint64_t start = timer_get_cntct(timer);
+
+ while ((timer_get_cntct(timer) - start) < cycles)
+ cpu_relax();
+}
+
+static inline void udelay(unsigned long usec)
+{
+ __delay(usec_to_cycles(usec));
+}
+
+#endif /* SELFTEST_KVM_ARM_DELAY_H */
diff --git a/tools/testing/selftests/kvm/include/aarch64/gic.h b/tools/testing/selftests/kvm/include/aarch64/gic.h
new file mode 100644
index 000000000000..85dd1e53048e
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/aarch64/gic.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARM Generic Interrupt Controller (GIC) specific defines
+ */
+
+#ifndef SELFTEST_KVM_GIC_H
+#define SELFTEST_KVM_GIC_H
+
+enum gic_type {
+ GIC_V3,
+ GIC_TYPE_MAX,
+};
+
+void gic_init(enum gic_type type, unsigned int nr_cpus,
+ void *dist_base, void *redist_base);
+void gic_irq_enable(unsigned int intid);
+void gic_irq_disable(unsigned int intid);
+unsigned int gic_get_and_ack_irq(void);
+void gic_set_eoi(unsigned int intid);
+
+#endif /* SELFTEST_KVM_GIC_H */
diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
index c0273aefa63d..27d8e1bb5b36 100644
--- a/tools/testing/selftests/kvm/include/aarch64/processor.h
+++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
@@ -9,20 +9,24 @@
#include "kvm_util.h"
#include <linux/stringify.h>
+#include <linux/types.h>
+#include <asm/sysreg.h>
#define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
-#define CPACR_EL1 3, 0, 1, 0, 2
-#define TCR_EL1 3, 0, 2, 0, 2
-#define MAIR_EL1 3, 0, 10, 2, 0
-#define MPIDR_EL1 3, 0, 0, 0, 5
-#define TTBR0_EL1 3, 0, 2, 0, 0
-#define SCTLR_EL1 3, 0, 1, 0, 0
-#define VBAR_EL1 3, 0, 12, 0, 0
-
-#define ID_AA64DFR0_EL1 3, 0, 0, 5, 0
+/*
+ * KVM_ARM64_SYS_REG(sys_reg_id): Helper macro to convert
+ * SYS_* register definitions in asm/sysreg.h to use in KVM
+ * calls such as get_reg() and set_reg().
+ */
+#define KVM_ARM64_SYS_REG(sys_reg_id) \
+ ARM64_SYS_REG(sys_reg_Op0(sys_reg_id), \
+ sys_reg_Op1(sys_reg_id), \
+ sys_reg_CRn(sys_reg_id), \
+ sys_reg_CRm(sys_reg_id), \
+ sys_reg_Op2(sys_reg_id))
/*
* Default MAIR
@@ -59,7 +63,7 @@ static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint
vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg);
}
-void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init);
+void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init);
void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
struct kvm_vcpu_init *init, void *guest_code);
@@ -118,18 +122,64 @@ void vm_install_exception_handler(struct kvm_vm *vm,
void vm_install_sync_handler(struct kvm_vm *vm,
int vector, int ec, handler_fn handler);
-#define write_sysreg(reg, val) \
-({ \
- u64 __val = (u64)(val); \
- asm volatile("msr " __stringify(reg) ", %x0" : : "rZ" (__val)); \
-})
+static inline void cpu_relax(void)
+{
+ asm volatile("yield" ::: "memory");
+}
-#define read_sysreg(reg) \
-({ u64 val; \
- asm volatile("mrs %0, "__stringify(reg) : "=r"(val) : : "memory");\
- val; \
+#define isb() asm volatile("isb" : : : "memory")
+#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
+#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
+
+#define dma_wmb() dmb(oshst)
+#define __iowmb() dma_wmb()
+
+#define dma_rmb() dmb(oshld)
+
+#define __iormb(v) \
+({ \
+ unsigned long tmp; \
+ \
+ dma_rmb(); \
+ \
+ /* \
+ * Courtesy of arch/arm64/include/asm/io.h: \
+ * Create a dummy control dependency from the IO read to any \
+ * later instructions. This ensures that a subsequent call \
+ * to udelay() will be ordered due to the ISB in __delay(). \
+ */ \
+ asm volatile("eor %0, %1, %1\n" \
+ "cbnz %0, ." \
+ : "=r" (tmp) : "r" ((unsigned long)(v)) \
+ : "memory"); \
})
-#define isb() asm volatile("isb" : : : "memory")
+static __always_inline void __raw_writel(u32 val, volatile void *addr)
+{
+ asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
+}
+
+static __always_inline u32 __raw_readl(const volatile void *addr)
+{
+ u32 val;
+ asm volatile("ldr %w0, [%1]" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
+#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
+
+#define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c));})
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
+
+static inline void local_irq_enable(void)
+{
+ asm volatile("msr daifclr, #3" : : : "memory");
+}
+
+static inline void local_irq_disable(void)
+{
+ asm volatile("msr daifset, #3" : : : "memory");
+}
#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/aarch64/spinlock.h b/tools/testing/selftests/kvm/include/aarch64/spinlock.h
new file mode 100644
index 000000000000..cf0984106d14
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/aarch64/spinlock.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef SELFTEST_KVM_ARM64_SPINLOCK_H
+#define SELFTEST_KVM_ARM64_SPINLOCK_H
+
+struct spinlock {
+ int v;
+};
+
+extern void spin_lock(struct spinlock *lock);
+extern void spin_unlock(struct spinlock *lock);
+
+#endif /* SELFTEST_KVM_ARM64_SPINLOCK_H */
diff --git a/tools/testing/selftests/kvm/include/aarch64/vgic.h b/tools/testing/selftests/kvm/include/aarch64/vgic.h
new file mode 100644
index 000000000000..0ecfb253893c
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/aarch64/vgic.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARM Generic Interrupt Controller (GIC) host specific defines
+ */
+
+#ifndef SELFTEST_KVM_VGIC_H
+#define SELFTEST_KVM_VGIC_H
+
+#include <linux/kvm.h>
+
+#define REDIST_REGION_ATTR_ADDR(count, base, flags, index) \
+ (((uint64_t)(count) << 52) | \
+ ((uint64_t)((base) >> 16) << 16) | \
+ ((uint64_t)(flags) << 12) | \
+ index)
+
+int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
+ uint64_t gicd_base_gpa, uint64_t gicr_base_gpa);
+
+#endif /* SELFTEST_KVM_VGIC_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 1b3ef5757819..f6b3794f306b 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -411,4 +411,6 @@ uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
int vm_get_stats_fd(struct kvm_vm *vm);
int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid);
+uint32_t guest_get_vcpuid(void);
+
#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index 4fa1db32c05e..f8fddc84c0d3 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -97,6 +97,8 @@ struct vm_mem_backing_src_alias {
uint32_t flag;
};
+#define MIN_RUN_DELAY_NS 200000UL
+
bool thp_configured(void);
size_t get_trans_hugepagesz(void);
size_t get_def_hugetlb_pagesz(void);
@@ -104,6 +106,7 @@ const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i);
size_t get_backing_src_pagesz(uint32_t i);
void backing_src_help(const char *flag);
enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name);
+long get_run_delay(void);
/*
* Whether or not the given source type is shared memory (as opposed to
diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic.c b/tools/testing/selftests/kvm/lib/aarch64/gic.c
new file mode 100644
index 000000000000..fff4fc27504d
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/gic.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Generic Interrupt Controller (GIC) support
+ */
+
+#include <errno.h>
+#include <linux/bits.h>
+#include <linux/sizes.h>
+
+#include "kvm_util.h"
+
+#include <gic.h>
+#include "gic_private.h"
+#include "processor.h"
+#include "spinlock.h"
+
+static const struct gic_common_ops *gic_common_ops;
+static struct spinlock gic_lock;
+
+static void gic_cpu_init(unsigned int cpu, void *redist_base)
+{
+ gic_common_ops->gic_cpu_init(cpu, redist_base);
+}
+
+static void
+gic_dist_init(enum gic_type type, unsigned int nr_cpus, void *dist_base)
+{
+ const struct gic_common_ops *gic_ops = NULL;
+
+ spin_lock(&gic_lock);
+
+ /* Distributor initialization is needed only once per VM */
+ if (gic_common_ops) {
+ spin_unlock(&gic_lock);
+ return;
+ }
+
+ if (type == GIC_V3)
+ gic_ops = &gicv3_ops;
+
+ GUEST_ASSERT(gic_ops);
+
+ gic_ops->gic_init(nr_cpus, dist_base);
+ gic_common_ops = gic_ops;
+
+ /* Make sure that the initialized data is visible to all the vCPUs */
+ dsb(sy);
+
+ spin_unlock(&gic_lock);
+}
+
+void gic_init(enum gic_type type, unsigned int nr_cpus,
+ void *dist_base, void *redist_base)
+{
+ uint32_t cpu = guest_get_vcpuid();
+
+ GUEST_ASSERT(type < GIC_TYPE_MAX);
+ GUEST_ASSERT(dist_base);
+ GUEST_ASSERT(redist_base);
+ GUEST_ASSERT(nr_cpus);
+
+ gic_dist_init(type, nr_cpus, dist_base);
+ gic_cpu_init(cpu, redist_base);
+}
+
+void gic_irq_enable(unsigned int intid)
+{
+ GUEST_ASSERT(gic_common_ops);
+ gic_common_ops->gic_irq_enable(intid);
+}
+
+void gic_irq_disable(unsigned int intid)
+{
+ GUEST_ASSERT(gic_common_ops);
+ gic_common_ops->gic_irq_disable(intid);
+}
+
+unsigned int gic_get_and_ack_irq(void)
+{
+ uint64_t irqstat;
+ unsigned int intid;
+
+ GUEST_ASSERT(gic_common_ops);
+
+ irqstat = gic_common_ops->gic_read_iar();
+ intid = irqstat & GENMASK(23, 0);
+
+ return intid;
+}
+
+void gic_set_eoi(unsigned int intid)
+{
+ GUEST_ASSERT(gic_common_ops);
+ gic_common_ops->gic_write_eoir(intid);
+}
diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_private.h b/tools/testing/selftests/kvm/lib/aarch64/gic_private.h
new file mode 100644
index 000000000000..d81d739433dc
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/gic_private.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARM Generic Interrupt Controller (GIC) private defines that's only
+ * shared among the GIC library code.
+ */
+
+#ifndef SELFTEST_KVM_GIC_PRIVATE_H
+#define SELFTEST_KVM_GIC_PRIVATE_H
+
+struct gic_common_ops {
+ void (*gic_init)(unsigned int nr_cpus, void *dist_base);
+ void (*gic_cpu_init)(unsigned int cpu, void *redist_base);
+ void (*gic_irq_enable)(unsigned int intid);
+ void (*gic_irq_disable)(unsigned int intid);
+ uint64_t (*gic_read_iar)(void);
+ void (*gic_write_eoir)(uint32_t irq);
+};
+
+extern const struct gic_common_ops gicv3_ops;
+
+#endif /* SELFTEST_KVM_GIC_PRIVATE_H */
diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c
new file mode 100644
index 000000000000..2dbf3339b62e
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Generic Interrupt Controller (GIC) v3 support
+ */
+
+#include <linux/sizes.h>
+
+#include "kvm_util.h"
+#include "processor.h"
+#include "delay.h"
+
+#include "gic_v3.h"
+#include "gic_private.h"
+
+struct gicv3_data {
+ void *dist_base;
+ void *redist_base[GICV3_MAX_CPUS];
+ unsigned int nr_cpus;
+ unsigned int nr_spis;
+};
+
+#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K)
+
+enum gicv3_intid_range {
+ SGI_RANGE,
+ PPI_RANGE,
+ SPI_RANGE,
+ INVALID_RANGE,
+};
+
+static struct gicv3_data gicv3_data;
+
+static void gicv3_gicd_wait_for_rwp(void)
+{
+ unsigned int count = 100000; /* 1s */
+
+ while (readl(gicv3_data.dist_base + GICD_CTLR) & GICD_CTLR_RWP) {
+ GUEST_ASSERT(count--);
+ udelay(10);
+ }
+}
+
+static void gicv3_gicr_wait_for_rwp(void *redist_base)
+{
+ unsigned int count = 100000; /* 1s */
+
+ while (readl(redist_base + GICR_CTLR) & GICR_CTLR_RWP) {
+ GUEST_ASSERT(count--);
+ udelay(10);
+ }
+}
+
+static enum gicv3_intid_range get_intid_range(unsigned int intid)
+{
+ switch (intid) {
+ case 0 ... 15:
+ return SGI_RANGE;
+ case 16 ... 31:
+ return PPI_RANGE;
+ case 32 ... 1019:
+ return SPI_RANGE;
+ }
+
+ /* We should not be reaching here */
+ GUEST_ASSERT(0);
+
+ return INVALID_RANGE;
+}
+
+static uint64_t gicv3_read_iar(void)
+{
+ uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1);
+
+ dsb(sy);
+ return irqstat;
+}
+
+static void gicv3_write_eoir(uint32_t irq)
+{
+ write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
+ isb();
+}
+
+static void
+gicv3_config_irq(unsigned int intid, unsigned int offset)
+{
+ uint32_t cpu = guest_get_vcpuid();
+ uint32_t mask = 1 << (intid % 32);
+ enum gicv3_intid_range intid_range = get_intid_range(intid);
+ void *reg;
+
+ /* We care about 'cpu' only for SGIs or PPIs */
+ if (intid_range == SGI_RANGE || intid_range == PPI_RANGE) {
+ GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
+
+ reg = sgi_base_from_redist(gicv3_data.redist_base[cpu]) +
+ offset;
+ writel(mask, reg);
+ gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu]);
+ } else if (intid_range == SPI_RANGE) {
+ reg = gicv3_data.dist_base + offset + (intid / 32) * 4;
+ writel(mask, reg);
+ gicv3_gicd_wait_for_rwp();
+ } else {
+ GUEST_ASSERT(0);
+ }
+}
+
+static void gicv3_irq_enable(unsigned int intid)
+{
+ gicv3_config_irq(intid, GICD_ISENABLER);
+}
+
+static void gicv3_irq_disable(unsigned int intid)
+{
+ gicv3_config_irq(intid, GICD_ICENABLER);
+}
+
+static void gicv3_enable_redist(void *redist_base)
+{
+ uint32_t val = readl(redist_base + GICR_WAKER);
+ unsigned int count = 100000; /* 1s */
+
+ val &= ~GICR_WAKER_ProcessorSleep;
+ writel(val, redist_base + GICR_WAKER);
+
+ /* Wait until the processor is 'active' */
+ while (readl(redist_base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
+ GUEST_ASSERT(count--);
+ udelay(10);
+ }
+}
+
+static inline void *gicr_base_cpu(void *redist_base, uint32_t cpu)
+{
+ /* Align all the redistributors sequentially */
+ return redist_base + cpu * SZ_64K * 2;
+}
+
+static void gicv3_cpu_init(unsigned int cpu, void *redist_base)
+{
+ void *sgi_base;
+ unsigned int i;
+ void *redist_base_cpu;
+
+ GUEST_ASSERT(cpu < gicv3_data.nr_cpus);
+
+ redist_base_cpu = gicr_base_cpu(redist_base, cpu);
+ sgi_base = sgi_base_from_redist(redist_base_cpu);
+
+ gicv3_enable_redist(redist_base_cpu);
+
+ /*
+ * Mark all the SGI and PPI interrupts as non-secure Group-1.
+ * Also, deactivate and disable them.
+ */
+ writel(~0, sgi_base + GICR_IGROUPR0);
+ writel(~0, sgi_base + GICR_ICACTIVER0);
+ writel(~0, sgi_base + GICR_ICENABLER0);
+
+ /* Set a default priority for all the SGIs and PPIs */
+ for (i = 0; i < 32; i += 4)
+ writel(GICD_INT_DEF_PRI_X4,
+ sgi_base + GICR_IPRIORITYR0 + i);
+
+ gicv3_gicr_wait_for_rwp(redist_base_cpu);
+
+ /* Enable the GIC system register (ICC_*) access */
+ write_sysreg_s(read_sysreg_s(SYS_ICC_SRE_EL1) | ICC_SRE_EL1_SRE,
+ SYS_ICC_SRE_EL1);
+
+ /* Set a default priority threshold */
+ write_sysreg_s(ICC_PMR_DEF_PRIO, SYS_ICC_PMR_EL1);
+
+ /* Enable non-secure Group-1 interrupts */
+ write_sysreg_s(ICC_IGRPEN1_EL1_ENABLE, SYS_ICC_GRPEN1_EL1);
+
+ gicv3_data.redist_base[cpu] = redist_base_cpu;
+}
+
+static void gicv3_dist_init(void)
+{
+ void *dist_base = gicv3_data.dist_base;
+ unsigned int i;
+
+ /* Disable the distributor until we set things up */
+ writel(0, dist_base + GICD_CTLR);
+ gicv3_gicd_wait_for_rwp();
+
+ /*
+ * Mark all the SPI interrupts as non-secure Group-1.
+ * Also, deactivate and disable them.
+ */
+ for (i = 32; i < gicv3_data.nr_spis; i += 32) {
+ writel(~0, dist_base + GICD_IGROUPR + i / 8);
+ writel(~0, dist_base + GICD_ICACTIVER + i / 8);
+ writel(~0, dist_base + GICD_ICENABLER + i / 8);
+ }
+
+ /* Set a default priority for all the SPIs */
+ for (i = 32; i < gicv3_data.nr_spis; i += 4)
+ writel(GICD_INT_DEF_PRI_X4,
+ dist_base + GICD_IPRIORITYR + i);
+
+ /* Wait for the settings to sync-in */
+ gicv3_gicd_wait_for_rwp();
+
+ /* Finally, enable the distributor globally with ARE */
+ writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A |
+ GICD_CTLR_ENABLE_G1, dist_base + GICD_CTLR);
+ gicv3_gicd_wait_for_rwp();
+}
+
+static void gicv3_init(unsigned int nr_cpus, void *dist_base)
+{
+ GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS);
+
+ gicv3_data.nr_cpus = nr_cpus;
+ gicv3_data.dist_base = dist_base;
+ gicv3_data.nr_spis = GICD_TYPER_SPIS(
+ readl(gicv3_data.dist_base + GICD_TYPER));
+ if (gicv3_data.nr_spis > 1020)
+ gicv3_data.nr_spis = 1020;
+
+ /*
+ * Initialize only the distributor for now.
+ * The redistributor and CPU interfaces are initialized
+ * later for every PE.
+ */
+ gicv3_dist_init();
+}
+
+const struct gic_common_ops gicv3_ops = {
+ .gic_init = gicv3_init,
+ .gic_cpu_init = gicv3_cpu_init,
+ .gic_irq_enable = gicv3_irq_enable,
+ .gic_irq_disable = gicv3_irq_disable,
+ .gic_read_iar = gicv3_read_iar,
+ .gic_write_eoir = gicv3_write_eoir,
+};
diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.h b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.h
new file mode 100644
index 000000000000..b51536d469a6
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ARM Generic Interrupt Controller (GIC) v3 specific defines
+ */
+
+#ifndef SELFTEST_KVM_GICV3_H
+#define SELFTEST_KVM_GICV3_H
+
+#include <asm/sysreg.h>
+
+/*
+ * Distributor registers
+ */
+#define GICD_CTLR 0x0000
+#define GICD_TYPER 0x0004
+#define GICD_IGROUPR 0x0080
+#define GICD_ISENABLER 0x0100
+#define GICD_ICENABLER 0x0180
+#define GICD_ICACTIVER 0x0380
+#define GICD_IPRIORITYR 0x0400
+
+/*
+ * The assumption is that the guest runs in a non-secure mode.
+ * The following bits of GICD_CTLR are defined accordingly.
+ */
+#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_nASSGIreq (1U << 8)
+#define GICD_CTLR_ARE_NS (1U << 4)
+#define GICD_CTLR_ENABLE_G1A (1U << 1)
+#define GICD_CTLR_ENABLE_G1 (1U << 0)
+
+#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32)
+#define GICD_INT_DEF_PRI_X4 0xa0a0a0a0
+
+/*
+ * Redistributor registers
+ */
+#define GICR_CTLR 0x000
+#define GICR_WAKER 0x014
+
+#define GICR_CTLR_RWP (1U << 3)
+
+#define GICR_WAKER_ProcessorSleep (1U << 1)
+#define GICR_WAKER_ChildrenAsleep (1U << 2)
+
+/*
+ * Redistributor registers, offsets from SGI base
+ */
+#define GICR_IGROUPR0 GICD_IGROUPR
+#define GICR_ISENABLER0 GICD_ISENABLER
+#define GICR_ICENABLER0 GICD_ICENABLER
+#define GICR_ICACTIVER0 GICD_ICACTIVER
+#define GICR_IPRIORITYR0 GICD_IPRIORITYR
+
+/* CPU interface registers */
+#define SYS_ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
+#define SYS_ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
+#define SYS_ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
+#define SYS_ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
+#define SYS_ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
+
+#define ICC_PMR_DEF_PRIO 0xf0
+
+#define ICC_SRE_EL1_SRE (1U << 0)
+
+#define ICC_IGRPEN1_EL1_ENABLE (1U << 0)
+
+#define GICV3_MAX_CPUS 512
+
+#endif /* SELFTEST_KVM_GICV3_H */
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
index 632b74d6b3ca..b4eeeafd2a70 100644
--- a/tools/testing/selftests/kvm/lib/aarch64/processor.c
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -212,7 +212,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
}
}
-void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init)
+void aarch64_vcpu_setup(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_vcpu_init *init)
{
struct kvm_vcpu_init default_init = { .target = -1, };
uint64_t sctlr_el1, tcr_el1;
@@ -232,10 +232,10 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *ini
* Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
* registers, which the variable argument list macros do.
*/
- set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20);
+ set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20);
- get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1);
- get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1);
+ get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1);
+ get_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1);
switch (vm->mode) {
case VM_MODE_P52V48_4K:
@@ -273,10 +273,11 @@ void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *ini
tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
- set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1);
- set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1);
- set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1);
- set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
+ set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1);
+ set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1);
+ set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1);
+ set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), vm->pgd);
+ set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpuid);
}
void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
@@ -362,7 +363,7 @@ void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
{
extern char vectors;
- set_reg(vm, vcpuid, ARM64_SYS_REG(VBAR_EL1), (uint64_t)&vectors);
+ set_reg(vm, vcpuid, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors);
}
void route_exception(struct ex_regs *regs, int vector)
@@ -426,3 +427,8 @@ void vm_install_exception_handler(struct kvm_vm *vm, int vector,
assert(vector < VECTOR_NUM);
handlers->exception_handlers[vector][0] = handler;
}
+
+uint32_t guest_get_vcpuid(void)
+{
+ return read_sysreg(tpidr_el1);
+}
diff --git a/tools/testing/selftests/kvm/lib/aarch64/spinlock.c b/tools/testing/selftests/kvm/lib/aarch64/spinlock.c
new file mode 100644
index 000000000000..a076e780be5d
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/spinlock.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM64 Spinlock support
+ */
+#include <stdint.h>
+
+#include "spinlock.h"
+
+void spin_lock(struct spinlock *lock)
+{
+ int val, res;
+
+ asm volatile(
+ "1: ldaxr %w0, [%2]\n"
+ " cbnz %w0, 1b\n"
+ " mov %w0, #1\n"
+ " stxr %w1, %w0, [%2]\n"
+ " cbnz %w1, 1b\n"
+ : "=&r" (val), "=&r" (res)
+ : "r" (&lock->v)
+ : "memory");
+}
+
+void spin_unlock(struct spinlock *lock)
+{
+ asm volatile("stlr wzr, [%0]\n" : : "r" (&lock->v) : "memory");
+}
diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c
new file mode 100644
index 000000000000..b9b271ff520d
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Generic Interrupt Controller (GIC) v3 host support
+ */
+
+#include <linux/kvm.h>
+#include <linux/sizes.h>
+#include <asm/kvm.h>
+
+#include "kvm_util.h"
+#include "../kvm_util_internal.h"
+#include "vgic.h"
+
+/*
+ * vGIC-v3 default host setup
+ *
+ * Input args:
+ * vm - KVM VM
+ * nr_vcpus - Number of vCPUs supported by this VM
+ * gicd_base_gpa - Guest Physical Address of the Distributor region
+ * gicr_base_gpa - Guest Physical Address of the Redistributor region
+ *
+ * Output args: None
+ *
+ * Return: GIC file-descriptor or negative error code upon failure
+ *
+ * The function creates a vGIC-v3 device and maps the distributor and
+ * redistributor regions of the guest. Since it depends on the number of
+ * vCPUs for the VM, it must be called after all the vCPUs have been created.
+ */
+int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus,
+ uint64_t gicd_base_gpa, uint64_t gicr_base_gpa)
+{
+ int gic_fd;
+ uint64_t redist_attr;
+ struct list_head *iter;
+ unsigned int nr_gic_pages, nr_vcpus_created = 0;
+
+ TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty\n");
+
+ /*
+ * Make sure that the caller is infact calling this
+ * function after all the vCPUs are added.
+ */
+ list_for_each(iter, &vm->vcpus)
+ nr_vcpus_created++;
+ TEST_ASSERT(nr_vcpus == nr_vcpus_created,
+ "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)\n",
+ nr_vcpus, nr_vcpus_created);
+
+ /* Distributor setup */
+ gic_fd = kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3, false);
+ kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa, true);
+ nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE);
+ virt_map(vm, gicd_base_gpa, gicd_base_gpa, nr_gic_pages);
+
+ /* Redistributor setup */
+ redist_attr = REDIST_REGION_ATTR_ADDR(nr_vcpus, gicr_base_gpa, 0, 0);
+ kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR,
+ KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &redist_attr, true);
+ nr_gic_pages = vm_calc_num_guest_pages(vm->mode,
+ KVM_VGIC_V3_REDIST_SIZE * nr_vcpus);
+ virt_map(vm, gicr_base_gpa, gicr_base_gpa, nr_gic_pages);
+
+ kvm_device_access(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
+ KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true);
+
+ return gic_fd;
+}
diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c
index e487f798e095..b72429108993 100644
--- a/tools/testing/selftests/kvm/lib/test_util.c
+++ b/tools/testing/selftests/kvm/lib/test_util.c
@@ -11,6 +11,7 @@
#include <stdlib.h>
#include <time.h>
#include <sys/stat.h>
+#include <sys/syscall.h>
#include <linux/mman.h>
#include "linux/kernel.h"
@@ -129,13 +130,16 @@ size_t get_trans_hugepagesz(void)
{
size_t size;
FILE *f;
+ int ret;
TEST_ASSERT(thp_configured(), "THP is not configured in host kernel");
f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r");
TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size");
- fscanf(f, "%ld", &size);
+ ret = fscanf(f, "%ld", &size);
+ ret = fscanf(f, "%ld", &size);
+ TEST_ASSERT(ret < 1, "Error reading transparent_hugepage/hpage_pmd_size");
fclose(f);
return size;
@@ -309,3 +313,19 @@ enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name)
TEST_FAIL("Unknown backing src type: %s", type_name);
return -1;
}
+
+long get_run_delay(void)
+{
+ char path[64];
+ long val[2];
+ FILE *fp;
+
+ sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
+ fp = fopen(path, "r");
+ /* Return MIN_RUN_DELAY_NS upon failure just to be safe */
+ if (fscanf(fp, "%ld %ld ", &val[0], &val[1]) < 2)
+ val[1] = MIN_RUN_DELAY_NS;
+ fclose(fp);
+
+ return val[1];
+}
diff --git a/tools/testing/selftests/kvm/memslot_perf_test.c b/tools/testing/selftests/kvm/memslot_perf_test.c
index d6e381e01db7..1727f75e0c2c 100644
--- a/tools/testing/selftests/kvm/memslot_perf_test.c
+++ b/tools/testing/selftests/kvm/memslot_perf_test.c
@@ -127,43 +127,54 @@ static bool verbose;
pr_info(__VA_ARGS__); \
} while (0)
+static void check_mmio_access(struct vm_data *vm, struct kvm_run *run)
+{
+ TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
+ TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
+ TEST_ASSERT(run->mmio.len == 8,
+ "Unexpected exit mmio size = %u", run->mmio.len);
+ TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
+ run->mmio.phys_addr <= vm->mmio_gpa_max,
+ "Unexpected exit mmio address = 0x%llx",
+ run->mmio.phys_addr);
+}
+
static void *vcpu_worker(void *data)
{
struct vm_data *vm = data;
struct kvm_run *run;
struct ucall uc;
- uint64_t cmd;
run = vcpu_state(vm->vm, VCPU_ID);
while (1) {
vcpu_run(vm->vm, VCPU_ID);
- if (run->exit_reason == KVM_EXIT_IO) {
- cmd = get_ucall(vm->vm, VCPU_ID, &uc);
- if (cmd != UCALL_SYNC)
- break;
-
+ switch (get_ucall(vm->vm, VCPU_ID, &uc)) {
+ case UCALL_SYNC:
+ TEST_ASSERT(uc.args[1] == 0,
+ "Unexpected sync ucall, got %lx",
+ (ulong)uc.args[1]);
sem_post(&vcpu_ready);
continue;
- }
-
- if (run->exit_reason != KVM_EXIT_MMIO)
+ case UCALL_NONE:
+ if (run->exit_reason == KVM_EXIT_MMIO)
+ check_mmio_access(vm, run);
+ else
+ goto done;
break;
-
- TEST_ASSERT(vm->mmio_ok, "Unexpected mmio exit");
- TEST_ASSERT(run->mmio.is_write, "Unexpected mmio read");
- TEST_ASSERT(run->mmio.len == 8,
- "Unexpected exit mmio size = %u", run->mmio.len);
- TEST_ASSERT(run->mmio.phys_addr >= vm->mmio_gpa_min &&
- run->mmio.phys_addr <= vm->mmio_gpa_max,
- "Unexpected exit mmio address = 0x%llx",
- run->mmio.phys_addr);
+ case UCALL_ABORT:
+ TEST_FAIL("%s at %s:%ld, val = %lu",
+ (const char *)uc.args[0],
+ __FILE__, uc.args[1], uc.args[2]);
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_FAIL("Unknown ucall %lu", uc.cmd);
+ }
}
- if (run->exit_reason == KVM_EXIT_IO && cmd == UCALL_ABORT)
- TEST_FAIL("%s at %s:%ld, val = %lu", (const char *)uc.args[0],
- __FILE__, uc.args[1], uc.args[2]);
-
+done:
return NULL;
}
@@ -268,6 +279,7 @@ static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
TEST_ASSERT(data->hva_slots, "malloc() fail");
data->vm = vm_create_default(VCPU_ID, mempages, guest_code);
+ ucall_init(data->vm, NULL);
pr_info_v("Adding slots 1..%i, each slot with %"PRIu64" pages + %"PRIu64" extra pages last\n",
max_mem_slots - 1, data->pages_per_slot, rempages);
diff --git a/tools/testing/selftests/kvm/steal_time.c b/tools/testing/selftests/kvm/steal_time.c
index aafaa8e38b7c..62f2eb9ee3d5 100644
--- a/tools/testing/selftests/kvm/steal_time.c
+++ b/tools/testing/selftests/kvm/steal_time.c
@@ -10,7 +10,6 @@
#include <sched.h>
#include <pthread.h>
#include <linux/kernel.h>
-#include <sys/syscall.h>
#include <asm/kvm.h>
#include <asm/kvm_para.h>
@@ -20,7 +19,6 @@
#define NR_VCPUS 4
#define ST_GPA_BASE (1 << 30)
-#define MIN_RUN_DELAY_NS 200000UL
static void *st_gva[NR_VCPUS];
static uint64_t guest_stolen_time[NR_VCPUS];
@@ -217,20 +215,6 @@ static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
#endif
-static long get_run_delay(void)
-{
- char path[64];
- long val[2];
- FILE *fp;
-
- sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
- fp = fopen(path, "r");
- fscanf(fp, "%ld %ld ", &val[0], &val[1]);
- fclose(fp);
-
- return val[1];
-}
-
static void *do_steal_time(void *arg)
{
struct timespec ts, stop;
diff --git a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
index e6480fd5c4bd..8039e1eff938 100644
--- a/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
+++ b/tools/testing/selftests/kvm/x86_64/mmio_warning_test.c
@@ -82,7 +82,8 @@ int get_warnings_count(void)
FILE *f;
f = popen("dmesg | grep \"WARNING:\" | wc -l", "r");
- fscanf(f, "%d", &warnings);
+ if (fscanf(f, "%d", &warnings) < 1)
+ warnings = 0;
fclose(f);
return warnings;
diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index 117bf49a3d79..eda0d2a51224 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -14,7 +14,6 @@
#include <stdint.h>
#include <time.h>
#include <sched.h>
-#include <sys/syscall.h>
#define VCPU_ID 5
@@ -98,20 +97,6 @@ static void guest_code(void)
GUEST_DONE();
}
-static long get_run_delay(void)
-{
- char path[64];
- long val[2];
- FILE *fp;
-
- sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
- fp = fopen(path, "r");
- fscanf(fp, "%ld %ld ", &val[0], &val[1]);
- fclose(fp);
-
- return val[1];
-}
-
static int cmp_timespec(struct timespec *a, struct timespec *b)
{
if (a->tv_sec > b->tv_sec)
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index fa2ac0e56b43..fe7ee2b0f29c 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -48,6 +48,7 @@ ARCH ?= $(SUBARCH)
# When local build is done, headers are installed in the default
# INSTALL_HDR_PATH usr/include.
.PHONY: khdr
+.NOTPARALLEL:
khdr:
ifndef KSFT_KHDR_INSTALL_DONE
ifeq (1,$(DEFAULT_INSTALL_HDR_PATH))
diff --git a/tools/testing/selftests/net/af_unix/Makefile b/tools/testing/selftests/net/af_unix/Makefile
index cfc7f4f97fd1..df341648f818 100644
--- a/tools/testing/selftests/net/af_unix/Makefile
+++ b/tools/testing/selftests/net/af_unix/Makefile
@@ -1,5 +1,2 @@
-##TEST_GEN_FILES := test_unix_oob
-TEST_PROGS := test_unix_oob
+TEST_GEN_PROGS := test_unix_oob
include ../../lib.mk
-
-all: $(TEST_PROGS)
diff --git a/tools/testing/selftests/net/af_unix/test_unix_oob.c b/tools/testing/selftests/net/af_unix/test_unix_oob.c
index 0f3e3763f4f8..3dece8b29253 100644
--- a/tools/testing/selftests/net/af_unix/test_unix_oob.c
+++ b/tools/testing/selftests/net/af_unix/test_unix_oob.c
@@ -271,8 +271,9 @@ main(int argc, char **argv)
read_oob(pfd, &oob);
if (!signal_recvd || len != 127 || oob != '%' || atmark != 1) {
- fprintf(stderr, "Test 3 failed, sigurg %d len %d OOB %c ",
- "atmark %d\n", signal_recvd, len, oob, atmark);
+ fprintf(stderr,
+ "Test 3 failed, sigurg %d len %d OOB %c atmark %d\n",
+ signal_recvd, len, oob, atmark);
die(1);
}
diff --git a/tools/testing/selftests/netfilter/nft_nat_zones.sh b/tools/testing/selftests/netfilter/nft_nat_zones.sh
new file mode 100755
index 000000000000..b9ab37380f33
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_nat_zones.sh
@@ -0,0 +1,309 @@
+#!/bin/bash
+#
+# Test connection tracking zone and NAT source port reallocation support.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+# Don't increase too much, 2000 clients should work
+# just fine but script can then take several minutes with
+# KASAN/debug builds.
+maxclients=100
+
+have_iperf=1
+ret=0
+
+# client1---.
+# veth1-.
+# |
+# NAT Gateway --veth0--> Server
+# | |
+# veth2-' |
+# client2---' |
+# .... |
+# clientX----vethX---'
+
+# All clients share identical IP address.
+# NAT Gateway uses policy routing and conntrack zones to isolate client
+# namespaces. Each client connects to Server, each with colliding tuples:
+# clientsaddr:10000 -> serveraddr:dport
+# NAT Gateway is supposed to do port reallocation for each of the
+# connections.
+
+sfx=$(mktemp -u "XXXXXXXX")
+gw="ns-gw-$sfx"
+cl1="ns-cl1-$sfx"
+cl2="ns-cl2-$sfx"
+srv="ns-srv-$sfx"
+
+v4gc1=$(sysctl -n net.ipv4.neigh.default.gc_thresh1 2>/dev/null)
+v4gc2=$(sysctl -n net.ipv4.neigh.default.gc_thresh2 2>/dev/null)
+v4gc3=$(sysctl -n net.ipv4.neigh.default.gc_thresh3 2>/dev/null)
+v6gc1=$(sysctl -n net.ipv6.neigh.default.gc_thresh1 2>/dev/null)
+v6gc2=$(sysctl -n net.ipv6.neigh.default.gc_thresh2 2>/dev/null)
+v6gc3=$(sysctl -n net.ipv6.neigh.default.gc_thresh3 2>/dev/null)
+
+cleanup()
+{
+ ip netns del $gw
+ ip netns del $srv
+ for i in $(seq 1 $maxclients); do
+ ip netns del ns-cl$i-$sfx 2>/dev/null
+ done
+
+ sysctl -q net.ipv4.neigh.default.gc_thresh1=$v4gc1 2>/dev/null
+ sysctl -q net.ipv4.neigh.default.gc_thresh2=$v4gc2 2>/dev/null
+ sysctl -q net.ipv4.neigh.default.gc_thresh3=$v4gc3 2>/dev/null
+ sysctl -q net.ipv6.neigh.default.gc_thresh1=$v6gc1 2>/dev/null
+ sysctl -q net.ipv6.neigh.default.gc_thresh2=$v6gc2 2>/dev/null
+ sysctl -q net.ipv6.neigh.default.gc_thresh3=$v6gc3 2>/dev/null
+}
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+conntrack -V > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without conntrack tool"
+ exit $ksft_skip
+fi
+
+iperf3 -v >/dev/null 2>&1
+if [ $? -ne 0 ];then
+ have_iperf=0
+fi
+
+ip netns add "$gw"
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace $gw"
+ exit $ksft_skip
+fi
+ip -net "$gw" link set lo up
+
+trap cleanup EXIT
+
+ip netns add "$srv"
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create server netns $srv"
+ exit $ksft_skip
+fi
+
+ip link add veth0 netns "$gw" type veth peer name eth0 netns "$srv"
+ip -net "$gw" link set veth0 up
+ip -net "$srv" link set lo up
+ip -net "$srv" link set eth0 up
+
+sysctl -q net.ipv6.neigh.default.gc_thresh1=512 2>/dev/null
+sysctl -q net.ipv6.neigh.default.gc_thresh2=1024 2>/dev/null
+sysctl -q net.ipv6.neigh.default.gc_thresh3=4096 2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh1=512 2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh2=1024 2>/dev/null
+sysctl -q net.ipv4.neigh.default.gc_thresh3=4096 2>/dev/null
+
+for i in $(seq 1 $maxclients);do
+ cl="ns-cl$i-$sfx"
+
+ ip netns add "$cl"
+ if [ $? -ne 0 ];then
+ echo "SKIP: Could not create client netns $cl"
+ exit $ksft_skip
+ fi
+ ip link add veth$i netns "$gw" type veth peer name eth0 netns "$cl" > /dev/null 2>&1
+ if [ $? -ne 0 ];then
+ echo "SKIP: No virtual ethernet pair device support in kernel"
+ exit $ksft_skip
+ fi
+done
+
+for i in $(seq 1 $maxclients);do
+ cl="ns-cl$i-$sfx"
+ echo netns exec "$cl" ip link set lo up
+ echo netns exec "$cl" ip link set eth0 up
+ echo netns exec "$cl" sysctl -q net.ipv4.tcp_syn_retries=2
+ echo netns exec "$gw" ip link set veth$i up
+ echo netns exec "$gw" sysctl -q net.ipv4.conf.veth$i.arp_ignore=2
+ echo netns exec "$gw" sysctl -q net.ipv4.conf.veth$i.rp_filter=0
+
+ # clients have same IP addresses.
+ echo netns exec "$cl" ip addr add 10.1.0.3/24 dev eth0
+ echo netns exec "$cl" ip addr add dead:1::3/64 dev eth0
+ echo netns exec "$cl" ip route add default via 10.1.0.2 dev eth0
+ echo netns exec "$cl" ip route add default via dead:1::2 dev eth0
+
+ # NB: same addresses on client-facing interfaces.
+ echo netns exec "$gw" ip addr add 10.1.0.2/24 dev veth$i
+ echo netns exec "$gw" ip addr add dead:1::2/64 dev veth$i
+
+ # gw: policy routing
+ echo netns exec "$gw" ip route add 10.1.0.0/24 dev veth$i table $((1000+i))
+ echo netns exec "$gw" ip route add dead:1::0/64 dev veth$i table $((1000+i))
+ echo netns exec "$gw" ip route add 10.3.0.0/24 dev veth0 table $((1000+i))
+ echo netns exec "$gw" ip route add dead:3::0/64 dev veth0 table $((1000+i))
+ echo netns exec "$gw" ip rule add fwmark $i lookup $((1000+i))
+done | ip -batch /dev/stdin
+
+ip -net "$gw" addr add 10.3.0.1/24 dev veth0
+ip -net "$gw" addr add dead:3::1/64 dev veth0
+
+ip -net "$srv" addr add 10.3.0.99/24 dev eth0
+ip -net "$srv" addr add dead:3::99/64 dev eth0
+
+ip netns exec $gw nft -f /dev/stdin<<EOF
+table inet raw {
+ map iiftomark {
+ type ifname : mark
+ }
+
+ map iiftozone {
+ typeof iifname : ct zone
+ }
+
+ set inicmp {
+ flags dynamic
+ type ipv4_addr . ifname . ipv4_addr
+ }
+ set inflows {
+ flags dynamic
+ type ipv4_addr . inet_service . ifname . ipv4_addr . inet_service
+ }
+
+ set inflows6 {
+ flags dynamic
+ type ipv6_addr . inet_service . ifname . ipv6_addr . inet_service
+ }
+
+ chain prerouting {
+ type filter hook prerouting priority -64000; policy accept;
+ ct original zone set meta iifname map @iiftozone
+ meta mark set meta iifname map @iiftomark
+
+ tcp flags & (syn|ack) == ack add @inflows { ip saddr . tcp sport . meta iifname . ip daddr . tcp dport counter }
+ add @inflows6 { ip6 saddr . tcp sport . meta iifname . ip6 daddr . tcp dport counter }
+ ip protocol icmp add @inicmp { ip saddr . meta iifname . ip daddr counter }
+ }
+
+ chain nat_postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ ct mark set meta mark meta oifname veth0 masquerade
+ }
+
+ chain mangle_prerouting {
+ type filter hook prerouting priority -100; policy accept;
+ ct direction reply meta mark set ct mark
+ }
+}
+EOF
+
+( echo add element inet raw iiftomark \{
+ for i in $(seq 1 $((maxclients-1))); do
+ echo \"veth$i\" : $i,
+ done
+ echo \"veth$maxclients\" : $maxclients \}
+ echo add element inet raw iiftozone \{
+ for i in $(seq 1 $((maxclients-1))); do
+ echo \"veth$i\" : $i,
+ done
+ echo \"veth$maxclients\" : $maxclients \}
+) | ip netns exec $gw nft -f /dev/stdin
+
+ip netns exec "$gw" sysctl -q net.ipv4.conf.all.forwarding=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv6.conf.all.forwarding=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv4.conf.all.rp_filter=0 >/dev/null
+
+# useful for debugging: allows to use 'ping' from clients to gateway.
+ip netns exec "$gw" sysctl -q net.ipv4.fwmark_reflect=1 > /dev/null
+ip netns exec "$gw" sysctl -q net.ipv6.fwmark_reflect=1 > /dev/null
+
+for i in $(seq 1 $maxclients); do
+ cl="ns-cl$i-$sfx"
+ ip netns exec $cl ping -i 0.5 -q -c 3 10.3.0.99 > /dev/null 2>&1 &
+ if [ $? -ne 0 ]; then
+ echo FAIL: Ping failure from $cl 1>&2
+ ret=1
+ break
+ fi
+done
+
+wait
+
+for i in $(seq 1 $maxclients); do
+ ip netns exec $gw nft get element inet raw inicmp "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 }" | grep -q "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 counter packets 3 bytes 252 }"
+ if [ $? -ne 0 ];then
+ ret=1
+ echo "FAIL: counter icmp mismatch for veth$i" 1>&2
+ ip netns exec $gw nft get element inet raw inicmp "{ 10.1.0.3 . \"veth$i\" . 10.3.0.99 }" 1>&2
+ break
+ fi
+done
+
+ip netns exec $gw nft get element inet raw inicmp "{ 10.3.0.99 . \"veth0\" . 10.3.0.1 }" | grep -q "{ 10.3.0.99 . \"veth0\" . 10.3.0.1 counter packets $((3 * $maxclients)) bytes $((252 * $maxclients)) }"
+if [ $? -ne 0 ];then
+ ret=1
+ echo "FAIL: counter icmp mismatch for veth0: { 10.3.0.99 . \"veth0\" . 10.3.0.1 counter packets $((3 * $maxclients)) bytes $((252 * $maxclients)) }"
+ ip netns exec $gw nft get element inet raw inicmp "{ 10.3.99 . \"veth0\" . 10.3.0.1 }" 1>&2
+fi
+
+if [ $ret -eq 0 ]; then
+ echo "PASS: ping test from all $maxclients namespaces"
+fi
+
+if [ $have_iperf -eq 0 ];then
+ echo "SKIP: iperf3 not installed"
+ if [ $ret -ne 0 ];then
+ exit $ret
+ fi
+ exit $ksft_skip
+fi
+
+ip netns exec $srv iperf3 -s > /dev/null 2>&1 &
+iperfpid=$!
+sleep 1
+
+for i in $(seq 1 $maxclients); do
+ if [ $ret -ne 0 ]; then
+ break
+ fi
+ cl="ns-cl$i-$sfx"
+ ip netns exec $cl iperf3 -c 10.3.0.99 --cport 10000 -n 1 > /dev/null
+ if [ $? -ne 0 ]; then
+ echo FAIL: Failure to connect for $cl 1>&2
+ ip netns exec $gw conntrack -S 1>&2
+ ret=1
+ fi
+done
+if [ $ret -eq 0 ];then
+ echo "PASS: iperf3 connections for all $maxclients net namespaces"
+fi
+
+kill $iperfpid
+wait
+
+for i in $(seq 1 $maxclients); do
+ ip netns exec $gw nft get element inet raw inflows "{ 10.1.0.3 . 10000 . \"veth$i\" . 10.3.0.99 . 5201 }" > /dev/null
+ if [ $? -ne 0 ];then
+ ret=1
+ echo "FAIL: can't find expected tcp entry for veth$i" 1>&2
+ break
+ fi
+done
+if [ $ret -eq 0 ];then
+ echo "PASS: Found client connection for all $maxclients net namespaces"
+fi
+
+ip netns exec $gw nft get element inet raw inflows "{ 10.3.0.99 . 5201 . \"veth0\" . 10.3.0.1 . 10000 }" > /dev/null
+if [ $? -ne 0 ];then
+ ret=1
+ echo "FAIL: cannot find return entry on veth0" 1>&2
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh
new file mode 100755
index 000000000000..ac646376eb01
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_zones_many.sh
@@ -0,0 +1,156 @@
+#!/bin/bash
+
+# Test insertion speed for packets with identical addresses/ports
+# that are all placed in distinct conntrack zones.
+
+sfx=$(mktemp -u "XXXXXXXX")
+ns="ns-$sfx"
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+zones=20000
+have_ct_tool=0
+ret=0
+
+cleanup()
+{
+ ip netns del $ns
+}
+
+ip netns add $ns
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not create net namespace $gw"
+ exit $ksft_skip
+fi
+
+trap cleanup EXIT
+
+conntrack -V > /dev/null 2>&1
+if [ $? -eq 0 ];then
+ have_ct_tool=1
+fi
+
+ip -net "$ns" link set lo up
+
+test_zones() {
+ local max_zones=$1
+
+ip netns exec $ns sysctl -q net.netfilter.nf_conntrack_udp_timeout=3600
+ip netns exec $ns nft -f /dev/stdin<<EOF
+flush ruleset
+table inet raw {
+ map rndzone {
+ typeof numgen inc mod $max_zones : ct zone
+ }
+
+ chain output {
+ type filter hook output priority -64000; policy accept;
+ udp dport 12345 ct zone set numgen inc mod 65536 map @rndzone
+ }
+}
+EOF
+ (
+ echo "add element inet raw rndzone {"
+ for i in $(seq 1 $max_zones);do
+ echo -n "$i : $i"
+ if [ $i -lt $max_zones ]; then
+ echo ","
+ else
+ echo "}"
+ fi
+ done
+ ) | ip netns exec $ns nft -f /dev/stdin
+
+ local i=0
+ local j=0
+ local outerstart=$(date +%s%3N)
+ local stop=$outerstart
+
+ while [ $i -lt $max_zones ]; do
+ local start=$(date +%s%3N)
+ i=$((i + 10000))
+ j=$((j + 1))
+ dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null
+ if [ $? -ne 0 ] ;then
+ ret=1
+ break
+ fi
+
+ stop=$(date +%s%3N)
+ local duration=$((stop-start))
+ echo "PASS: added 10000 entries in $duration ms (now $i total, loop $j)"
+ done
+
+ if [ $have_ct_tool -eq 1 ]; then
+ local count=$(ip netns exec "$ns" conntrack -C)
+ local duration=$((stop-outerstart))
+
+ if [ $count -eq $max_zones ]; then
+ echo "PASS: inserted $count entries from packet path in $duration ms total"
+ else
+ ip netns exec $ns conntrack -S 1>&2
+ echo "FAIL: inserted $count entries from packet path in $duration ms total, expected $max_zones entries"
+ ret=1
+ fi
+ fi
+
+ if [ $ret -ne 0 ];then
+ echo "FAIL: insert $max_zones entries from packet path" 1>&2
+ fi
+}
+
+test_conntrack_tool() {
+ local max_zones=$1
+
+ ip netns exec $ns conntrack -F >/dev/null 2>/dev/null
+
+ local outerstart=$(date +%s%3N)
+ local start=$(date +%s%3N)
+ local stop=$start
+ local i=0
+ while [ $i -lt $max_zones ]; do
+ i=$((i + 1))
+ ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \
+ --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i >/dev/null 2>&1
+ if [ $? -ne 0 ];then
+ ip netns exec "$ns" conntrack -I -s 1.1.1.1 -d 2.2.2.2 --protonum 6 \
+ --timeout 3600 --state ESTABLISHED --sport 12345 --dport 1000 --zone $i > /dev/null
+ echo "FAIL: conntrack -I returned an error"
+ ret=1
+ break
+ fi
+
+ if [ $((i%10000)) -eq 0 ];then
+ stop=$(date +%s%3N)
+
+ local duration=$((stop-start))
+ echo "PASS: added 10000 entries in $duration ms (now $i total)"
+ start=$stop
+ fi
+ done
+
+ local count=$(ip netns exec "$ns" conntrack -C)
+ local duration=$((stop-outerstart))
+
+ if [ $count -eq $max_zones ]; then
+ echo "PASS: inserted $count entries via ctnetlink in $duration ms"
+ else
+ ip netns exec $ns conntrack -S 1>&2
+ echo "FAIL: inserted $count entries via ctnetlink in $duration ms, expected $max_zones entries ($duration ms)"
+ ret=1
+ fi
+}
+
+test_zones $zones
+
+if [ $have_ct_tool -eq 1 ];then
+ test_conntrack_tool $zones
+else
+ echo "SKIP: Could not run ctnetlink insertion test without conntrack tool"
+ if [ $ret -eq 0 ];then
+ exit $ksft_skip
+ fi
+fi
+
+exit $ret
diff --git a/tools/usb/testusb.c b/tools/usb/testusb.c
index ee8208b2f946..69c3ead25313 100644
--- a/tools/usb/testusb.c
+++ b/tools/usb/testusb.c
@@ -265,12 +265,6 @@ nomem:
}
entry->ifnum = ifnum;
-
- /* FIXME update USBDEVFS_CONNECTINFO so it tells about high speed etc */
-
- fprintf(stderr, "%s speed\t%s\t%u\n",
- speed(entry->speed), entry->name, entry->ifnum);
-
entry->next = testdevs;
testdevs = entry;
return 0;
@@ -299,6 +293,14 @@ static void *handle_testdev (void *arg)
return 0;
}
+ status = ioctl(fd, USBDEVFS_GET_SPEED, NULL);
+ if (status < 0)
+ fprintf(stderr, "USBDEVFS_GET_SPEED failed %d\n", status);
+ else
+ dev->speed = status;
+ fprintf(stderr, "%s speed\t%s\t%u\n",
+ speed(dev->speed), dev->name, dev->ifnum);
+
restart:
for (i = 0; i < TEST_CASES; i++) {
if (dev->test != -1 && dev->test != i)
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 0517c744b04e..f62f10c988db 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -1331,7 +1331,7 @@ int main(int argc, char *argv[])
if (opt_list && opt_list_mapcnt)
kpagecount_fd = checked_open(PROC_KPAGECOUNT, O_RDONLY);
- if (opt_mark_idle && opt_file)
+ if (opt_mark_idle)
page_idle_fd = checked_open(SYS_KERNEL_MM_PAGE_IDLE, O_RDWR);
if (opt_list && opt_pid)