summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/overlayfs.txt16
-rw-r--r--MAINTAINERS5
-rw-r--r--Makefile5
-rw-r--r--arch/arc/boot/dts/hsdk.dts45
-rw-r--r--arch/arc/configs/hsdk_defconfig3
-rw-r--r--arch/arc/include/asm/cmpxchg.h14
-rw-r--r--arch/arc/mm/fault.c9
-rw-r--r--arch/arc/mm/tlb.c13
-rw-r--r--arch/arm64/Makefile1
-rw-r--r--arch/arm64/include/asm/arch_timer.h8
-rw-r--r--arch/arm64/include/asm/smp.h6
-rw-r--r--arch/arm64/include/asm/smp_plat.h5
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c1
-rw-r--r--arch/nds32/include/asm/bitfield.h2
-rw-r--r--arch/nds32/include/asm/fpu.h2
-rw-r--r--arch/nds32/include/asm/fpuemu.h12
-rw-r--r--arch/nds32/include/asm/syscalls.h2
-rw-r--r--arch/nds32/include/uapi/asm/fp_udfiex_crtl.h16
-rw-r--r--arch/nds32/include/uapi/asm/sigcontext.h24
-rw-r--r--arch/nds32/include/uapi/asm/udftrap.h13
-rw-r--r--arch/nds32/include/uapi/asm/unistd.h4
-rw-r--r--arch/nds32/kernel/fpu.c15
-rw-r--r--arch/nds32/kernel/sys_nds32.c26
-rw-r--r--arch/nds32/math-emu/Makefile4
-rw-r--r--arch/nds32/math-emu/fd2si.c30
-rw-r--r--arch/nds32/math-emu/fd2siz.c30
-rw-r--r--arch/nds32/math-emu/fd2ui.c30
-rw-r--r--arch/nds32/math-emu/fd2uiz.c30
-rw-r--r--arch/nds32/math-emu/fpuemu.c57
-rw-r--r--arch/nds32/math-emu/fs2si.c29
-rw-r--r--arch/nds32/math-emu/fs2siz.c29
-rw-r--r--arch/nds32/math-emu/fs2ui.c29
-rw-r--r--arch/nds32/math-emu/fs2uiz.c30
-rw-r--r--arch/nds32/math-emu/fsi2d.c22
-rw-r--r--arch/nds32/math-emu/fsi2s.c22
-rw-r--r--arch/nds32/math-emu/fui2d.c22
-rw-r--r--arch/nds32/math-emu/fui2s.c22
-rw-r--r--arch/parisc/Kconfig4
-rw-r--r--arch/parisc/configs/712_defconfig1
-rw-r--r--arch/parisc/configs/a500_defconfig1
-rw-r--r--arch/parisc/configs/b180_defconfig1
-rw-r--r--arch/parisc/configs/c3000_defconfig1
-rw-r--r--arch/parisc/configs/c8000_defconfig1
-rw-r--r--arch/parisc/configs/default_defconfig1
-rw-r--r--arch/parisc/configs/generic-32bit_defconfig1
-rw-r--r--arch/parisc/include/asm/special_insns.h24
-rw-r--r--arch/parisc/kernel/alternative.c3
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S21
-rw-r--r--arch/parisc/math-emu/cnv_float.h8
-rw-r--r--arch/sparc/kernel/mdesc.c2
-rw-r--r--arch/sparc/kernel/perf_event.c4
-rw-r--r--arch/sparc/mm/ultra.S4
-rw-r--r--arch/x86/kernel/cpu/Makefile5
-rw-r--r--arch/x86/kernel/cpu/intel_epb.c22
-rw-r--r--arch/x86/lib/insn-eval.c47
-rw-r--r--arch/x86/power/cpu.c10
-rw-r--r--arch/x86/power/hibernate.c33
-rw-r--r--arch/xtensa/kernel/setup.c3
-rw-r--r--block/bfq-cgroup.c6
-rw-r--r--block/blk-cgroup.c2
-rw-r--r--block/blk-core.c13
-rw-r--r--block/blk-mq-sched.c30
-rw-r--r--block/blk-mq-sched.h1
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk.h10
-rw-r--r--block/elevator.c2
-rw-r--r--crypto/hmac.c4
-rw-r--r--crypto/jitterentropy-kcapi.c2
-rw-r--r--drivers/block/aoe/aoeblk.c16
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c1
-rw-r--r--drivers/block/rsxx/core.c1
-rw-r--r--drivers/dma-buf/udmabuf.c1
-rw-r--r--drivers/dma/dma-jz4780.c32
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c3
-rw-r--r--drivers/dma/fsl-qdma.c4
-rw-r--r--drivers/dma/mediatek/mtk-cqdma.c4
-rw-r--r--drivers/dma/sprd-dma.c49
-rw-r--r--drivers/dma/tegra210-adma.c57
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c2
-rw-r--r--drivers/fpga/dfl.c22
-rw-r--r--drivers/fpga/stratix10-soc.c6
-rw-r--r--drivers/fpga/zynqmp-fpga.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c12
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c31
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h10
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c4
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c14
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c13
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c38
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c49
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c50
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c22
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c38
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c51
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/hwmon/hwmon.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c37
-rw-r--r--drivers/i2c/busses/i2c-xiic.c5
-rw-r--r--drivers/infiniband/core/device.c49
-rw-r--r--drivers/infiniband/core/rdma_core.h2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c30
-rw-r--r--drivers/infiniband/core/uverbs_std_types_cq.c2
-rw-r--r--drivers/infiniband/core/uverbs_std_types_mr.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c1
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c1
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c3
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c1
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c9
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c18
-rw-r--r--drivers/memstick/core/mspro_block.c13
-rw-r--r--drivers/misc/genwqe/card_dev.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c4
-rw-r--r--drivers/misc/habanalabs/context.c6
-rw-r--r--drivers/misc/habanalabs/debugfs.c65
-rw-r--r--drivers/misc/habanalabs/device.c2
-rw-r--r--drivers/misc/habanalabs/goya/goya.c3
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h1
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c31
-rw-r--r--drivers/misc/habanalabs/habanalabs.h2
-rw-r--r--drivers/misc/habanalabs/memory.c6
-rw-r--r--drivers/misc/habanalabs/mmu.c8
-rw-r--r--drivers/misc/lkdtm/bugs.c23
-rw-r--r--drivers/misc/lkdtm/core.c6
-rw-r--r--drivers/misc/lkdtm/lkdtm.h2
-rw-r--r--drivers/misc/lkdtm/usercopy.c10
-rw-r--r--drivers/mmc/core/queue.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-tegra.c2
-rw-r--r--drivers/mmc/host/sdhci.c24
-rw-r--r--drivers/mmc/host/sdhci_am654.c2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c32
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c14
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c4
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c15
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c2
-rw-r--r--drivers/net/phy/phylink.c13
-rw-r--r--drivers/net/phy/sfp.c24
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/nvme/host/rdma.c152
-rw-r--r--drivers/nvme/host/tcp.c57
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c1
-rw-r--r--drivers/parisc/ccio-dma.c6
-rw-r--r--drivers/parisc/sba_iommu.c5
-rw-r--r--drivers/parport/share.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c22
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c32
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6
-rw-r--r--drivers/scsi/ufs/ufshcd.c3
-rw-r--r--drivers/vhost/net.c41
-rw-r--r--drivers/vhost/scsi.c21
-rw-r--r--drivers/vhost/vhost.c20
-rw-r--r--drivers/vhost/vhost.h5
-rw-r--r--drivers/vhost/vsock.c28
-rw-r--r--drivers/virtio/Kconfig8
-rw-r--r--drivers/w1/slaves/w1_ds2408.c2
-rw-r--r--fs/adfs/adfs.h14
-rw-r--r--fs/adfs/dir.c137
-rw-r--r--fs/adfs/dir_f.c43
-rw-r--r--fs/adfs/dir_fplus.c24
-rw-r--r--fs/fuse/file.c43
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/lops.c33
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/nfs/nfs4proc.c32
-rw-r--r--fs/overlayfs/file.c9
-rw-r--r--fs/overlayfs/inode.c48
-rw-r--r--fs/overlayfs/namei.c8
-rw-r--r--fs/overlayfs/overlayfs.h3
-rw-r--r--fs/overlayfs/ovl_entry.h6
-rw-r--r--fs/overlayfs/super.c169
-rw-r--r--fs/overlayfs/util.c12
-rw-r--r--fs/pstore/platform.c7
-rw-r--r--fs/pstore/ram.c36
-rw-r--r--fs/xfs/scrub/ialloc.c3
-rw-r--r--fs/xfs/xfs_log.c11
-rw-r--r--include/drm/drm_modeset_helper_vtables.h8
-rw-r--r--include/linux/cgroup-defs.h3
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/dsa/sja1105.h12
-rw-r--r--include/linux/mm.h11
-rw-r--r--include/linux/rcupdate.h6
-rw-r--r--include/linux/suspend.h31
-rw-r--r--include/math-emu/op-2.h17
-rw-r--r--include/math-emu/op-common.h11
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/tls.h4
-rw-r--r--include/rdma/ib_verbs.h1
-rw-r--r--include/uapi/linux/fuse.h7
-rw-r--r--include/uapi/misc/habanalabs.h22
-rw-r--r--init/Kconfig17
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/cgroup/cgroup.c33
-rw-r--r--kernel/cpu.c4
-rwxr-xr-xkernel/gen_kheaders.sh (renamed from kernel/gen_ikh_data.sh)17
-rw-r--r--kernel/kheaders.c40
-rw-r--r--kernel/power/hibernate.c9
-rw-r--r--kernel/power/suspend.c6
-rw-r--r--kernel/signal.c11
-rw-r--r--lib/lockref.c3
-rw-r--r--lib/test_firmware.c14
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/fib_rules.c6
-rw-r--r--net/core/pktgen.c11
-rw-r--r--net/dsa/tag_sja1105.c10
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv6/raw.c25
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rds/ib.c2
-rw-r--r--net/rds/ib_rdma.c10
-rw-r--r--net/rds/ib_recv.c3
-rw-r--r--net/sctp/sm_make_chunk.c13
-rw-r--r--net/sctp/sm_sideeffect.c5
-rw-r--r--net/sunrpc/clnt.c30
-rw-r--r--net/sunrpc/xprtrdma/verbs.c3
-rw-r--r--net/tls/tls_device.c26
-rw-r--r--samples/pidfd/pidfd-metadata.c4
-rw-r--r--scripts/Kbuild.include7
-rwxr-xr-xscripts/checkstack.pl2
-rw-r--r--scripts/kconfig/tests/err_recursive_inc/expected_stderr6
-rw-r--r--scripts/package/Makefile2
-rw-r--r--tools/testing/selftests/cgroup/test_core.c7
-rw-r--r--tools/testing/selftests/cgroup/test_memcontrol.c4
-rwxr-xr-xtools/testing/selftests/net/forwarding/router_broadcast.sh5
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c4
-rw-r--r--tools/testing/selftests/vm/Makefile6
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c2
-rw-r--r--tools/virtio/linux/kernel.h2
278 files changed, 2756 insertions, 1166 deletions
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index eef7d9d259e8..1da2f1668f08 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -336,8 +336,20 @@ the copied layers will fail the verification of the lower root file handle.
Non-standard behavior
---------------------
-Overlayfs can now act as a POSIX compliant filesystem with the following
-features turned on:
+Current version of overlayfs can act as a mostly POSIX compliant
+filesystem.
+
+This is the list of cases that overlayfs doesn't currently handle:
+
+a) POSIX mandates updating st_atime for reads. This is currently not
+done in the case when the file resides on a lower layer.
+
+b) If a file residing on a lower layer is opened for read-only and then
+memory mapped with MAP_SHARED, then subsequent changes to the file are not
+reflected in the memory mapping.
+
+The following options allow overlayfs to act more like a standards
+compliant filesystem:
1) "redirect_dir"
diff --git a/MAINTAINERS b/MAINTAINERS
index a6954776a37e..57f496cff999 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13057,7 +13057,6 @@ F: Documentation/devicetree/bindings/net/qcom,dwmac.txt
QUALCOMM GENERIC INTERFACE I2C DRIVER
M: Alok Chauhan <alokc@codeaurora.org>
-M: Karthikeyan Ramasubramanian <kramasub@codeaurora.org>
L: linux-i2c@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Supported
@@ -14995,7 +14994,7 @@ S: Odd Fixes
F: drivers/net/ethernet/adaptec/starfire*
STEC S1220 SKD DRIVER
-M: Bart Van Assche <bart.vanassche@wdc.com>
+M: Damien Le Moal <Damien.LeMoal@wdc.com>
L: linux-block@vger.kernel.org
S: Maintained
F: drivers/block/skd*[ch]
@@ -17312,7 +17311,7 @@ F: Documentation/ABI/stable/sysfs-hypervisor-xen
F: Documentation/ABI/testing/sysfs-hypervisor-xen
XEN NETWORK BACKEND DRIVER
-M: Wei Liu <wei.liu2@citrix.com>
+M: Wei Liu <wei.liu@kernel.org>
M: Paul Durrant <paul.durrant@citrix.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 004d67a4405f..d27e1326cc03 100644
--- a/Makefile
+++ b/Makefile
@@ -1228,9 +1228,8 @@ kselftest-clean:
PHONY += kselftest-merge
kselftest-merge:
$(if $(wildcard $(objtree)/.config),, $(error No .config exists, config your kernel first!))
- $(Q)$(CONFIG_SHELL) $(srctree)/scripts/kconfig/merge_config.sh \
- -m $(objtree)/.config \
- $(srctree)/tools/testing/selftests/*/config
+ $(Q)find $(srctree)/tools/testing/selftests -name config | \
+ xargs $(srctree)/scripts/kconfig/merge_config.sh -m $(objtree)/.config
+$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
# ---------------------------------------------------------------------------
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
index 7425bb0f2d1b..acfbed41b020 100644
--- a/arch/arc/boot/dts/hsdk.dts
+++ b/arch/arc/boot/dts/hsdk.dts
@@ -11,7 +11,6 @@
*/
/dts-v1/;
-#include <dt-bindings/net/ti-dp83867.h>
#include <dt-bindings/reset/snps,hsdk-reset.h>
/ {
@@ -167,6 +166,24 @@
#clock-cells = <0>;
};
+ gpu_core_clk: gpu-core-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <400000000>;
+ #clock-cells = <0>;
+ };
+
+ gpu_dma_clk: gpu-dma-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <400000000>;
+ #clock-cells = <0>;
+ };
+
+ gpu_cfg_clk: gpu-cfg-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <200000000>;
+ #clock-cells = <0>;
+ };
+
dmac_core_clk: dmac-core-clk {
compatible = "fixed-clock";
clock-frequency = <400000000>;
@@ -187,6 +204,7 @@
interrupt-names = "macirq";
phy-mode = "rgmii";
snps,pbl = <32>;
+ snps,multicast-filter-bins = <256>;
clocks = <&gmacclk>;
clock-names = "stmmaceth";
phy-handle = <&phy0>;
@@ -195,15 +213,15 @@
mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
dma-coherent;
+ tx-fifo-depth = <4096>;
+ rx-fifo-depth = <4096>;
+
mdio {
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,dwmac-mdio";
phy0: ethernet-phy@0 {
reg = <0>;
- ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
- ti,tx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>;
- ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>;
};
};
};
@@ -237,6 +255,14 @@
dma-coherent;
};
+ creg_gpio: gpio@14b0 {
+ compatible = "snps,creg-gpio-hsdk";
+ reg = <0x14b0 0x4>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ngpios = <2>;
+ };
+
gpio: gpio@3000 {
compatible = "snps,dw-apb-gpio";
reg = <0x3000 0x20>;
@@ -252,6 +278,17 @@
};
};
+ gpu_3d: gpu@90000 {
+ compatible = "vivante,gc";
+ reg = <0x90000 0x4000>;
+ clocks = <&gpu_dma_clk>,
+ <&gpu_cfg_clk>,
+ <&gpu_core_clk>,
+ <&gpu_core_clk>;
+ clock-names = "bus", "reg", "core", "shader";
+ interrupts = <28>;
+ };
+
dmac: dmac@80000 {
compatible = "snps,axi-dma-1.01a";
reg = <0x80000 0x400>;
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 0e5fd29ed238..c8fb5d60c53f 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -49,10 +49,12 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_DWAPB=y
+CONFIG_GPIO_SNPS_CREG=y
# CONFIG_HWMON is not set
CONFIG_DRM=y
# CONFIG_DRM_FBDEV_EMULATION is not set
CONFIG_DRM_UDL=y
+CONFIG_DRM_ETNAVIV=y
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB_EHCI_HCD=y
@@ -64,7 +66,6 @@ CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_DW=y
-# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index d819de1c5d10..3ea4112c8302 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -92,8 +92,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
#endif /* CONFIG_ARC_HAS_LLSC */
-#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
- (unsigned long)(o), (unsigned long)(n)))
+#define cmpxchg(ptr, o, n) ({ \
+ (typeof(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n)); \
+})
/*
* atomic_cmpxchg is same as cmpxchg
@@ -198,8 +201,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
return __xchg_bad_pointer();
}
-#define xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
- sizeof(*(ptr))))
+#define xchg(ptr, with) ({ \
+ (typeof(*(ptr)))__xchg((unsigned long)(with), \
+ (ptr), \
+ sizeof(*(ptr))); \
+})
#endif /* CONFIG_ARC_PLAT_EZNPS */
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 8df1638259f3..6836095251ed 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -66,7 +66,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- int si_code = 0;
+ int si_code = SEGV_MAPERR;
int ret;
vm_fault_t fault;
int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
@@ -81,16 +81,14 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
* only copy the information from the master page table,
* nothing more.
*/
- if (address >= VMALLOC_START) {
+ if (address >= VMALLOC_START && !user_mode(regs)) {
ret = handle_kernel_vaddr_fault(address);
if (unlikely(ret))
- goto bad_area_nosemaphore;
+ goto no_context;
else
return;
}
- si_code = SEGV_MAPERR;
-
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -198,7 +196,6 @@ good_area:
bad_area:
up_read(&mm->mmap_sem);
-bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.fault_address = address;
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 4097764fea23..fa18c00b0cfd 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -911,9 +911,11 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
struct pt_regs *regs)
{
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
- unsigned int pd0[mmu->ways];
unsigned long flags;
- int set;
+ int set, n_ways = mmu->ways;
+
+ n_ways = min(n_ways, 4);
+ BUG_ON(mmu->ways > 4);
local_irq_save(flags);
@@ -921,9 +923,10 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
for (set = 0; set < mmu->sets; set++) {
int is_valid, way;
+ unsigned int pd0[4];
/* read out all the ways of current set */
- for (way = 0, is_valid = 0; way < mmu->ways; way++) {
+ for (way = 0, is_valid = 0; way < n_ways; way++) {
write_aux_reg(ARC_REG_TLBINDEX,
SET_WAY_TO_IDX(mmu, set, way));
write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
@@ -937,14 +940,14 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
continue;
/* Scan the set for duplicate ways: needs a nested loop */
- for (way = 0; way < mmu->ways - 1; way++) {
+ for (way = 0; way < n_ways - 1; way++) {
int n;
if (!pd0[way])
continue;
- for (n = way + 1; n < mmu->ways; n++) {
+ for (n = way + 1; n < n_ways; n++) {
if (pd0[way] != pd0[n])
continue;
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index b025304bde46..8fbd583b18e1 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -51,6 +51,7 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+KBUILD_CFLAGS += -Wno-psabi
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index b7bca1ae09e6..50b3ab7ded4f 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -193,7 +193,7 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl)
: "=r" (tmp) : "r" (_val)); \
} while (0)
-static inline u64 __arch_counter_get_cntpct_stable(void)
+static __always_inline u64 __arch_counter_get_cntpct_stable(void)
{
u64 cnt;
@@ -203,7 +203,7 @@ static inline u64 __arch_counter_get_cntpct_stable(void)
return cnt;
}
-static inline u64 __arch_counter_get_cntpct(void)
+static __always_inline u64 __arch_counter_get_cntpct(void)
{
u64 cnt;
@@ -213,7 +213,7 @@ static inline u64 __arch_counter_get_cntpct(void)
return cnt;
}
-static inline u64 __arch_counter_get_cntvct_stable(void)
+static __always_inline u64 __arch_counter_get_cntvct_stable(void)
{
u64 cnt;
@@ -223,7 +223,7 @@ static inline u64 __arch_counter_get_cntvct_stable(void)
return cnt;
}
-static inline u64 __arch_counter_get_cntvct(void)
+static __always_inline u64 __arch_counter_get_cntvct(void)
{
u64 cnt;
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 18553f399e08..eae2d6c01262 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -53,6 +53,12 @@ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
*/
#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
+/*
+ * Logical CPU mapping.
+ */
+extern u64 __cpu_logical_map[NR_CPUS];
+#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
+
struct seq_file;
/*
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index af58dcdefb21..7a495403a18a 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -37,11 +37,6 @@ static inline u32 mpidr_hash_size(void)
}
/*
- * Logical CPU mapping.
- */
-extern u64 __cpu_logical_map[NR_CPUS];
-#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
-/*
* Retrieve logical cpu index corresponding to a given MPIDR.Aff*
* - mpidr: MPIDR.Aff* bits to be used for the look-up
*
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index eb3ef73e07cf..f1d032be628a 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -75,7 +75,7 @@ void arch_release_task_struct(struct task_struct *tsk);
* TIF_SYSCALL_TRACE - syscall trace active
* TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace
* TIF_SYSCALL_AUDIT - syscall auditing
- * TIF_SECOMP - syscall secure computing
+ * TIF_SECCOMP - syscall secure computing
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index ca27e08e3d8a..80babf451519 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -830,6 +830,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
read_sysreg_case(SYS_ID_AA64PFR0_EL1);
read_sysreg_case(SYS_ID_AA64PFR1_EL1);
+ read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR0_EL1);
read_sysreg_case(SYS_ID_AA64DFR1_EL1);
read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
diff --git a/arch/nds32/include/asm/bitfield.h b/arch/nds32/include/asm/bitfield.h
index e75212c76b20..b02a58e71f80 100644
--- a/arch/nds32/include/asm/bitfield.h
+++ b/arch/nds32/include/asm/bitfield.h
@@ -937,7 +937,7 @@
#define FPCSR_mskDNIT ( 0x1 << FPCSR_offDNIT )
#define FPCSR_mskRIT ( 0x1 << FPCSR_offRIT )
#define FPCSR_mskALL (FPCSR_mskIVO | FPCSR_mskDBZ | FPCSR_mskOVF | FPCSR_mskUDF | FPCSR_mskIEX)
-#define FPCSR_mskALLE_NO_UDFE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE | FPCSR_mskIEXE)
+#define FPCSR_mskALLE_NO_UDF_IEXE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE)
#define FPCSR_mskALLE (FPCSR_mskIVOE | FPCSR_mskDBZE | FPCSR_mskOVFE | FPCSR_mskUDFE | FPCSR_mskIEXE)
#define FPCSR_mskALLT (FPCSR_mskIVOT | FPCSR_mskDBZT | FPCSR_mskOVFT | FPCSR_mskUDFT | FPCSR_mskIEXT |FPCSR_mskDNIT | FPCSR_mskRIT)
diff --git a/arch/nds32/include/asm/fpu.h b/arch/nds32/include/asm/fpu.h
index 019f1bcfc5ee..8294ed4aaa2c 100644
--- a/arch/nds32/include/asm/fpu.h
+++ b/arch/nds32/include/asm/fpu.h
@@ -36,7 +36,7 @@ extern int do_fpuemu(struct pt_regs *regs, struct fpu_struct *fpu);
* enabled by default and kerenl will re-execute it by fpu emulator
* when getting underflow exception.
*/
-#define FPCSR_INIT FPCSR_mskUDFE
+#define FPCSR_INIT (FPCSR_mskUDFE | FPCSR_mskIEXE)
#else
#define FPCSR_INIT 0x0UL
#endif
diff --git a/arch/nds32/include/asm/fpuemu.h b/arch/nds32/include/asm/fpuemu.h
index c4bd0c7faa75..63e7ef5f7969 100644
--- a/arch/nds32/include/asm/fpuemu.h
+++ b/arch/nds32/include/asm/fpuemu.h
@@ -13,6 +13,12 @@ void fsubs(void *ft, void *fa, void *fb);
void fmuls(void *ft, void *fa, void *fb);
void fdivs(void *ft, void *fa, void *fb);
void fs2d(void *ft, void *fa);
+void fs2si(void *ft, void *fa);
+void fs2si_z(void *ft, void *fa);
+void fs2ui(void *ft, void *fa);
+void fs2ui_z(void *ft, void *fa);
+void fsi2s(void *ft, void *fa);
+void fui2s(void *ft, void *fa);
void fsqrts(void *ft, void *fa);
void fnegs(void *ft, void *fa);
int fcmps(void *ft, void *fa, void *fb, int cop);
@@ -26,6 +32,12 @@ void fmuld(void *ft, void *fa, void *fb);
void fdivd(void *ft, void *fa, void *fb);
void fsqrtd(void *ft, void *fa);
void fd2s(void *ft, void *fa);
+void fd2si(void *ft, void *fa);
+void fd2si_z(void *ft, void *fa);
+void fd2ui(void *ft, void *fa);
+void fd2ui_z(void *ft, void *fa);
+void fsi2d(void *ft, void *fa);
+void fui2d(void *ft, void *fa);
void fnegd(void *ft, void *fa);
int fcmpd(void *ft, void *fa, void *fb, int cop);
diff --git a/arch/nds32/include/asm/syscalls.h b/arch/nds32/include/asm/syscalls.h
index f3b16f602cb5..4e7216082a67 100644
--- a/arch/nds32/include/asm/syscalls.h
+++ b/arch/nds32/include/asm/syscalls.h
@@ -7,7 +7,7 @@
asmlinkage long sys_cacheflush(unsigned long addr, unsigned long len, unsigned int op);
asmlinkage long sys_fadvise64_64_wrapper(int fd, int advice, loff_t offset, loff_t len);
asmlinkage long sys_rt_sigreturn_wrapper(void);
-asmlinkage long sys_udftrap(int option);
+asmlinkage long sys_fp_udfiex_crtl(int cmd, int act);
#include <asm-generic/syscalls.h>
diff --git a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h
new file mode 100644
index 000000000000..d54a5d6c6538
--- /dev/null
+++ b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2005-2019 Andes Technology Corporation */
+#ifndef _FP_UDF_IEX_CRTL_H
+#define _FP_UDF_IEX_CRTL_H
+
+/*
+ * The cmd list of sys_fp_udfiex_crtl()
+ */
+/* Disable UDF or IEX trap based on the content of parameter act */
+#define DISABLE_UDF_IEX_TRAP 0
+/* Enable UDF or IEX trap based on the content of parameter act */
+#define ENABLE_UDF_IEX_TRAP 1
+/* Get current status of UDF and IEX trap */
+#define GET_UDF_IEX_TRAP 2
+
+#endif /* _FP_UDF_IEX_CRTL_H */
diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h
index 628ff6b75825..dc89af7ddcc3 100644
--- a/arch/nds32/include/uapi/asm/sigcontext.h
+++ b/arch/nds32/include/uapi/asm/sigcontext.h
@@ -13,14 +13,24 @@ struct fpu_struct {
unsigned long long fd_regs[32];
unsigned long fpcsr;
/*
- * UDF_trap is used to recognize whether underflow trap is enabled
- * or not. When UDF_trap == 1, this process will be traped and then
- * get a SIGFPE signal when encountering an underflow exception.
- * UDF_trap is only modified through setfputrap syscall. Therefore,
- * UDF_trap needn't be saved or loaded to context in each context
- * switch.
+ * When CONFIG_SUPPORT_DENORMAL_ARITHMETIC is defined, kernel prevents
+ * hardware from treating the denormalized output as an underflow case
+ * and rounding it to a normal number. Hence kernel enables the UDF and
+ * IEX trap in the fpcsr register to step in the calculation.
+ * However, the UDF and IEX trap enable bit in $fpcsr also lose
+ * their use.
+ *
+ * UDF_IEX_trap replaces the feature of UDF and IEX trap enable bit in
+ * $fpcsr to control the trap of underflow and inexact. The bit filed
+ * of UDF_IEX_trap is the same as $fpcsr, 10th bit is used to enable UDF
+ * exception trapping and 11th bit is used to enable IEX exception
+ * trapping.
+ *
+ * UDF_IEX_trap is only modified through fp_udfiex_crtl syscall.
+ * Therefore, UDF_IEX_trap needn't be saved and restored in each
+ * context switch.
*/
- unsigned long UDF_trap;
+ unsigned long UDF_IEX_trap;
};
struct zol_struct {
diff --git a/arch/nds32/include/uapi/asm/udftrap.h b/arch/nds32/include/uapi/asm/udftrap.h
deleted file mode 100644
index 433f79d679c0..000000000000
--- a/arch/nds32/include/uapi/asm/udftrap.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (C) 2005-2018 Andes Technology Corporation */
-#ifndef _ASM_SETFPUTRAP
-#define _ASM_SETFPUTRAP
-
-/*
- * Options for setfputrap system call
- */
-#define DISABLE_UDFTRAP 0 /* disable underflow exception trap */
-#define ENABLE_UDFTRAP 1 /* enable undeflos exception trap */
-#define GET_UDFTRAP 2 /* only get undeflos exception trap status */
-
-#endif /* _ASM_CACHECTL */
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h
index c691735017ed..a0b2f7b9c0f2 100644
--- a/arch/nds32/include/uapi/asm/unistd.h
+++ b/arch/nds32/include/uapi/asm/unistd.h
@@ -11,6 +11,6 @@
/* Additional NDS32 specific syscalls. */
#define __NR_cacheflush (__NR_arch_specific_syscall)
-#define __NR_udftrap (__NR_arch_specific_syscall + 1)
+#define __NR_fp_udfiex_crtl (__NR_arch_specific_syscall + 1)
__SYSCALL(__NR_cacheflush, sys_cacheflush)
-__SYSCALL(__NR_udftrap, sys_udftrap)
+__SYSCALL(__NR_fp_udfiex_crtl, sys_fp_udfiex_crtl)
diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c
index fddd40c7a16f..cf0b8760f261 100644
--- a/arch/nds32/kernel/fpu.c
+++ b/arch/nds32/kernel/fpu.c
@@ -14,7 +14,7 @@ const struct fpu_struct init_fpuregs = {
.fd_regs = {[0 ... 31] = sNAN64},
.fpcsr = FPCSR_INIT,
#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
- .UDF_trap = 0
+ .UDF_IEX_trap = 0
#endif
};
@@ -178,7 +178,7 @@ inline void do_fpu_context_switch(struct pt_regs *regs)
/* First time FPU user. */
load_fpu(&init_fpuregs);
#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
- current->thread.fpu.UDF_trap = init_fpuregs.UDF_trap;
+ current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap;
#endif
set_used_math();
}
@@ -206,7 +206,7 @@ inline void handle_fpu_exception(struct pt_regs *regs)
unsigned int fpcsr;
int si_code = 0, si_signo = SIGFPE;
#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
- unsigned long redo_except = FPCSR_mskDNIT|FPCSR_mskUDFT;
+ unsigned long redo_except = FPCSR_mskDNIT|FPCSR_mskUDFT|FPCSR_mskIEXT;
#else
unsigned long redo_except = FPCSR_mskDNIT;
#endif
@@ -215,21 +215,18 @@ inline void handle_fpu_exception(struct pt_regs *regs)
fpcsr = current->thread.fpu.fpcsr;
if (fpcsr & redo_except) {
-#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
- if (fpcsr & FPCSR_mskUDFT)
- current->thread.fpu.fpcsr &= ~FPCSR_mskIEX;
-#endif
si_signo = do_fpuemu(regs, &current->thread.fpu);
fpcsr = current->thread.fpu.fpcsr;
- if (!si_signo)
+ if (!si_signo) {
+ current->thread.fpu.fpcsr &= ~(redo_except);
goto done;
+ }
} else if (fpcsr & FPCSR_mskRIT) {
if (!user_mode(regs))
do_exit(SIGILL);
si_signo = SIGILL;
}
-
switch (si_signo) {
case SIGFPE:
fill_sigfpe_signo(fpcsr, &si_code);
diff --git a/arch/nds32/kernel/sys_nds32.c b/arch/nds32/kernel/sys_nds32.c
index 0835277636ce..cb2d1e219bb3 100644
--- a/arch/nds32/kernel/sys_nds32.c
+++ b/arch/nds32/kernel/sys_nds32.c
@@ -6,8 +6,8 @@
#include <asm/cachectl.h>
#include <asm/proc-fns.h>
-#include <asm/udftrap.h>
#include <asm/fpu.h>
+#include <asm/fp_udfiex_crtl.h>
SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
unsigned long, prot, unsigned long, flags,
@@ -51,31 +51,33 @@ SYSCALL_DEFINE3(cacheflush, unsigned int, start, unsigned int, end, int, cache)
return 0;
}
-SYSCALL_DEFINE1(udftrap, int, option)
+SYSCALL_DEFINE2(fp_udfiex_crtl, unsigned int, cmd, unsigned int, act)
{
#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
- int old_udftrap;
+ int old_udf_iex;
if (!used_math()) {
load_fpu(&init_fpuregs);
- current->thread.fpu.UDF_trap = init_fpuregs.UDF_trap;
+ current->thread.fpu.UDF_IEX_trap = init_fpuregs.UDF_IEX_trap;
set_used_math();
}
- old_udftrap = current->thread.fpu.UDF_trap;
- switch (option) {
- case DISABLE_UDFTRAP:
- current->thread.fpu.UDF_trap = 0;
+ old_udf_iex = current->thread.fpu.UDF_IEX_trap;
+ act &= (FPCSR_mskUDFE | FPCSR_mskIEXE);
+
+ switch (cmd) {
+ case DISABLE_UDF_IEX_TRAP:
+ current->thread.fpu.UDF_IEX_trap &= ~act;
break;
- case ENABLE_UDFTRAP:
- current->thread.fpu.UDF_trap = FPCSR_mskUDFE;
+ case ENABLE_UDF_IEX_TRAP:
+ current->thread.fpu.UDF_IEX_trap |= act;
break;
- case GET_UDFTRAP:
+ case GET_UDF_IEX_TRAP:
break;
default:
return -EINVAL;
}
- return old_udftrap;
+ return old_udf_iex;
#else
return -ENOTSUPP;
#endif
diff --git a/arch/nds32/math-emu/Makefile b/arch/nds32/math-emu/Makefile
index 14fa01f4574a..3bed7e5d5d05 100644
--- a/arch/nds32/math-emu/Makefile
+++ b/arch/nds32/math-emu/Makefile
@@ -5,4 +5,6 @@
obj-y := fpuemu.o \
fdivd.o fmuld.o fsubd.o faddd.o fs2d.o fsqrtd.o fcmpd.o fnegs.o \
- fdivs.o fmuls.o fsubs.o fadds.o fd2s.o fsqrts.o fcmps.o fnegd.o
+ fd2si.o fd2ui.o fd2siz.o fd2uiz.o fsi2d.o fui2d.o \
+ fdivs.o fmuls.o fsubs.o fadds.o fd2s.o fsqrts.o fcmps.o fnegd.o \
+ fs2si.o fs2ui.o fs2siz.o fs2uiz.o fsi2s.o fui2s.o
diff --git a/arch/nds32/math-emu/fd2si.c b/arch/nds32/math-emu/fd2si.c
new file mode 100644
index 000000000000..fae3e16a0a10
--- /dev/null
+++ b/arch/nds32/math-emu/fd2si.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/double.h>
+
+void fd2si(void *ft, void *fa)
+{
+ int r;
+
+ FP_DECL_D(A);
+ FP_DECL_EX;
+
+ FP_UNPACK_DP(A, fa);
+
+ if (A_c == FP_CLS_INF) {
+ *(int *)ft = (A_s == 0) ? 0x7fffffff : 0x80000000;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else if (A_c == FP_CLS_NAN) {
+ *(int *)ft = 0xffffffff;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else {
+ FP_TO_INT_ROUND_D(r, A, 32, 1);
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+ *(int *)ft = r;
+ }
+
+}
diff --git a/arch/nds32/math-emu/fd2siz.c b/arch/nds32/math-emu/fd2siz.c
new file mode 100644
index 000000000000..92fe6774f112
--- /dev/null
+++ b/arch/nds32/math-emu/fd2siz.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/double.h>
+
+void fd2si_z(void *ft, void *fa)
+{
+ int r;
+
+ FP_DECL_D(A);
+ FP_DECL_EX;
+
+ FP_UNPACK_DP(A, fa);
+
+ if (A_c == FP_CLS_INF) {
+ *(int *)ft = (A_s == 0) ? 0x7fffffff : 0x80000000;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else if (A_c == FP_CLS_NAN) {
+ *(int *)ft = 0xffffffff;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else {
+ FP_TO_INT_D(r, A, 32, 1);
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+ *(int *)ft = r;
+ }
+
+}
diff --git a/arch/nds32/math-emu/fd2ui.c b/arch/nds32/math-emu/fd2ui.c
new file mode 100644
index 000000000000..a0423b699aa4
--- /dev/null
+++ b/arch/nds32/math-emu/fd2ui.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/double.h>
+
+void fd2ui(void *ft, void *fa)
+{
+ unsigned int r;
+
+ FP_DECL_D(A);
+ FP_DECL_EX;
+
+ FP_UNPACK_DP(A, fa);
+
+ if (A_c == FP_CLS_INF) {
+ *(unsigned int *)ft = (A_s == 0) ? 0xffffffff : 0x00000000;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else if (A_c == FP_CLS_NAN) {
+ *(unsigned int *)ft = 0xffffffff;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else {
+ FP_TO_INT_ROUND_D(r, A, 32, 0);
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+ *(unsigned int *)ft = r;
+ }
+
+}
diff --git a/arch/nds32/math-emu/fd2uiz.c b/arch/nds32/math-emu/fd2uiz.c
new file mode 100644
index 000000000000..8ae17cfce90d
--- /dev/null
+++ b/arch/nds32/math-emu/fd2uiz.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/double.h>
+
+void fd2ui_z(void *ft, void *fa)
+{
+ unsigned int r;
+
+ FP_DECL_D(A);
+ FP_DECL_EX;
+
+ FP_UNPACK_DP(A, fa);
+
+ if (A_c == FP_CLS_INF) {
+ *(unsigned int *)ft = (A_s == 0) ? 0xffffffff : 0x00000000;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else if (A_c == FP_CLS_NAN) {
+ *(unsigned int *)ft = 0xffffffff;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else {
+ FP_TO_INT_D(r, A, 32, 0);
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+ *(unsigned int *)ft = r;
+ }
+
+}
diff --git a/arch/nds32/math-emu/fpuemu.c b/arch/nds32/math-emu/fpuemu.c
index 75cf1643fa78..46558a15c0dc 100644
--- a/arch/nds32/math-emu/fpuemu.c
+++ b/arch/nds32/math-emu/fpuemu.c
@@ -113,6 +113,30 @@ static int fpu_emu(struct fpu_struct *fpu_reg, unsigned long insn)
func.b = fs2d;
ftype = S1D;
break;
+ case fs2si_op:
+ func.b = fs2si;
+ ftype = S1S;
+ break;
+ case fs2si_z_op:
+ func.b = fs2si_z;
+ ftype = S1S;
+ break;
+ case fs2ui_op:
+ func.b = fs2ui;
+ ftype = S1S;
+ break;
+ case fs2ui_z_op:
+ func.b = fs2ui_z;
+ ftype = S1S;
+ break;
+ case fsi2s_op:
+ func.b = fsi2s;
+ ftype = S1S;
+ break;
+ case fui2s_op:
+ func.b = fui2s;
+ ftype = S1S;
+ break;
case fsqrts_op:
func.b = fsqrts;
ftype = S1S;
@@ -182,6 +206,30 @@ static int fpu_emu(struct fpu_struct *fpu_reg, unsigned long insn)
func.b = fd2s;
ftype = D1S;
break;
+ case fd2si_op:
+ func.b = fd2si;
+ ftype = D1S;
+ break;
+ case fd2si_z_op:
+ func.b = fd2si_z;
+ ftype = D1S;
+ break;
+ case fd2ui_op:
+ func.b = fd2ui;
+ ftype = D1S;
+ break;
+ case fd2ui_z_op:
+ func.b = fd2ui_z;
+ ftype = D1S;
+ break;
+ case fsi2d_op:
+ func.b = fsi2d;
+ ftype = D1S;
+ break;
+ case fui2d_op:
+ func.b = fui2d;
+ ftype = D1S;
+ break;
case fsqrtd_op:
func.b = fsqrtd;
ftype = D1D;
@@ -305,16 +353,16 @@ static int fpu_emu(struct fpu_struct *fpu_reg, unsigned long insn)
* If an exception is required, generate a tidy SIGFPE exception.
*/
#if IS_ENABLED(CONFIG_SUPPORT_DENORMAL_ARITHMETIC)
- if (((fpu_reg->fpcsr << 5) & fpu_reg->fpcsr & FPCSR_mskALLE_NO_UDFE) ||
- ((fpu_reg->fpcsr & FPCSR_mskUDF) && (fpu_reg->UDF_trap)))
+ if (((fpu_reg->fpcsr << 5) & fpu_reg->fpcsr & FPCSR_mskALLE_NO_UDF_IEXE)
+ || ((fpu_reg->fpcsr << 5) & (fpu_reg->UDF_IEX_trap))) {
#else
- if ((fpu_reg->fpcsr << 5) & fpu_reg->fpcsr & FPCSR_mskALLE)
+ if ((fpu_reg->fpcsr << 5) & fpu_reg->fpcsr & FPCSR_mskALLE) {
#endif
return SIGFPE;
+ }
return 0;
}
-
int do_fpuemu(struct pt_regs *regs, struct fpu_struct *fpu)
{
unsigned long insn = 0, addr = regs->ipc;
@@ -336,6 +384,7 @@ int do_fpuemu(struct pt_regs *regs, struct fpu_struct *fpu)
if (NDS32Insn_OPCODE(insn) != cop0_op)
return SIGILL;
+
switch (NDS32Insn_OPCODE_COP0(insn)) {
case fs1_op:
case fs2_op:
diff --git a/arch/nds32/math-emu/fs2si.c b/arch/nds32/math-emu/fs2si.c
new file mode 100644
index 000000000000..b4931d60980e
--- /dev/null
+++ b/arch/nds32/math-emu/fs2si.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+
+void fs2si(void *ft, void *fa)
+{
+ int r;
+
+ FP_DECL_S(A);
+ FP_DECL_EX;
+
+ FP_UNPACK_SP(A, fa);
+
+ if (A_c == FP_CLS_INF) {
+ *(int *)ft = (A_s == 0) ? 0x7fffffff : 0x80000000;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else if (A_c == FP_CLS_NAN) {
+ *(int *)ft = 0xffffffff;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else {
+ FP_TO_INT_ROUND_S(r, A, 32, 1);
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+ *(int *)ft = r;
+ }
+}
diff --git a/arch/nds32/math-emu/fs2siz.c b/arch/nds32/math-emu/fs2siz.c
new file mode 100644
index 000000000000..1c2b99ce3e38
--- /dev/null
+++ b/arch/nds32/math-emu/fs2siz.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+
+void fs2si_z(void *ft, void *fa)
+{
+ int r;
+
+ FP_DECL_S(A);
+ FP_DECL_EX;
+
+ FP_UNPACK_SP(A, fa);
+
+ if (A_c == FP_CLS_INF) {
+ *(int *)ft = (A_s == 0) ? 0x7fffffff : 0x80000000;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else if (A_c == FP_CLS_NAN) {
+ *(int *)ft = 0xffffffff;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else {
+ FP_TO_INT_S(r, A, 32, 1);
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+ *(int *)ft = r;
+ }
+}
diff --git a/arch/nds32/math-emu/fs2ui.c b/arch/nds32/math-emu/fs2ui.c
new file mode 100644
index 000000000000..c337f0384d06
--- /dev/null
+++ b/arch/nds32/math-emu/fs2ui.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+
+void fs2ui(void *ft, void *fa)
+{
+ unsigned int r;
+
+ FP_DECL_S(A);
+ FP_DECL_EX;
+
+ FP_UNPACK_SP(A, fa);
+
+ if (A_c == FP_CLS_INF) {
+ *(unsigned int *)ft = (A_s == 0) ? 0xffffffff : 0x00000000;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else if (A_c == FP_CLS_NAN) {
+ *(unsigned int *)ft = 0xffffffff;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else {
+ FP_TO_INT_ROUND_S(r, A, 32, 0);
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+ *(unsigned int *)ft = r;
+ }
+}
diff --git a/arch/nds32/math-emu/fs2uiz.c b/arch/nds32/math-emu/fs2uiz.c
new file mode 100644
index 000000000000..22c5e4768044
--- /dev/null
+++ b/arch/nds32/math-emu/fs2uiz.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+
+void fs2ui_z(void *ft, void *fa)
+{
+ unsigned int r;
+
+ FP_DECL_S(A);
+ FP_DECL_EX;
+
+ FP_UNPACK_SP(A, fa);
+
+ if (A_c == FP_CLS_INF) {
+ *(unsigned int *)ft = (A_s == 0) ? 0xffffffff : 0x00000000;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else if (A_c == FP_CLS_NAN) {
+ *(unsigned int *)ft = 0xffffffff;
+ __FPU_FPCSR |= FP_EX_INVALID;
+ } else {
+ FP_TO_INT_S(r, A, 32, 0);
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+ *(unsigned int *)ft = r;
+ }
+
+}
diff --git a/arch/nds32/math-emu/fsi2d.c b/arch/nds32/math-emu/fsi2d.c
new file mode 100644
index 000000000000..6b04cec0c5c5
--- /dev/null
+++ b/arch/nds32/math-emu/fsi2d.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/double.h>
+
+void fsi2d(void *ft, void *fa)
+{
+ int a = *(int *)fa;
+
+ FP_DECL_D(R);
+ FP_DECL_EX;
+
+ FP_FROM_INT_D(R, a, 32, int);
+
+ FP_PACK_DP(ft, R);
+
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+
+}
diff --git a/arch/nds32/math-emu/fsi2s.c b/arch/nds32/math-emu/fsi2s.c
new file mode 100644
index 000000000000..689864a5df90
--- /dev/null
+++ b/arch/nds32/math-emu/fsi2s.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+
+void fsi2s(void *ft, void *fa)
+{
+ int a = *(int *)fa;
+
+ FP_DECL_S(R);
+ FP_DECL_EX;
+
+ FP_FROM_INT_S(R, a, 32, int);
+
+ FP_PACK_SP(ft, R);
+
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+
+}
diff --git a/arch/nds32/math-emu/fui2d.c b/arch/nds32/math-emu/fui2d.c
new file mode 100644
index 000000000000..9689d33a8d50
--- /dev/null
+++ b/arch/nds32/math-emu/fui2d.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/double.h>
+
+void fui2d(void *ft, void *fa)
+{
+ unsigned int a = *(unsigned int *)fa;
+
+ FP_DECL_D(R);
+ FP_DECL_EX;
+
+ FP_FROM_INT_D(R, a, 32, int);
+
+ FP_PACK_DP(ft, R);
+
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+
+}
diff --git a/arch/nds32/math-emu/fui2s.c b/arch/nds32/math-emu/fui2s.c
new file mode 100644
index 000000000000..f70f0762547d
--- /dev/null
+++ b/arch/nds32/math-emu/fui2s.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2019 Andes Technology Corporation
+#include <linux/uaccess.h>
+
+#include <asm/sfp-machine.h>
+#include <math-emu/soft-fp.h>
+#include <math-emu/single.h>
+
+void fui2s(void *ft, void *fa)
+{
+ unsigned int a = *(unsigned int *)fa;
+
+ FP_DECL_S(R);
+ FP_DECL_EX;
+
+ FP_FROM_INT_S(R, a, 32, int);
+
+ FP_PACK_SP(ft, R);
+
+ __FPU_FPCSR |= FP_CUR_EXCEPTIONS;
+
+}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 09407ed1aacd..4860efa91d7b 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -36,7 +36,6 @@ config PARISC
select GENERIC_STRNCPY_FROM_USER
select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE
- select ARCH_DISCARD_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC
select VIRT_TO_BUS
select MODULES_USE_ELF_RELA
@@ -195,7 +194,8 @@ config PREFETCH
config MLONGCALLS
bool "Enable the -mlong-calls compiler option for big kernels"
- default y
+ default y if !MODULES || UBSAN || FTRACE
+ default n
depends on PA8X00
help
If you configure the kernel to include many drivers built-in instead
diff --git a/arch/parisc/configs/712_defconfig b/arch/parisc/configs/712_defconfig
index ccc109761f44..d3e3d94e90c3 100644
--- a/arch/parisc/configs/712_defconfig
+++ b/arch/parisc/configs/712_defconfig
@@ -34,7 +34,6 @@ CONFIG_INET_DIAG=m
CONFIG_NETFILTER=y
CONFIG_LLC2=m
CONFIG_NET_PKTGEN=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/configs/a500_defconfig b/arch/parisc/configs/a500_defconfig
index 5acb93dcaabf..a8859496b0b9 100644
--- a/arch/parisc/configs/a500_defconfig
+++ b/arch/parisc/configs/a500_defconfig
@@ -70,7 +70,6 @@ CONFIG_IP_DCCP=m
# CONFIG_IP_DCCP_CCID3 is not set
CONFIG_LLC2=m
CONFIG_NET_PKTGEN=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/configs/b180_defconfig b/arch/parisc/configs/b180_defconfig
index 83ffd161aec5..0cae9664bf67 100644
--- a/arch/parisc/configs/b180_defconfig
+++ b/arch/parisc/configs/b180_defconfig
@@ -24,7 +24,6 @@ CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_BOOTP=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
diff --git a/arch/parisc/configs/c3000_defconfig b/arch/parisc/configs/c3000_defconfig
index 8d41a73bd71b..6c29b841735c 100644
--- a/arch/parisc/configs/c3000_defconfig
+++ b/arch/parisc/configs/c3000_defconfig
@@ -32,7 +32,6 @@ CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
CONFIG_NET_PKTGEN=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
index 900b00084953..507f0644fcf8 100644
--- a/arch/parisc/configs/c8000_defconfig
+++ b/arch/parisc/configs/c8000_defconfig
@@ -57,7 +57,6 @@ CONFIG_IP_DCCP=m
CONFIG_TIPC=m
CONFIG_LLC2=m
CONFIG_DNS_RESOLVER=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/default_defconfig
index 52c9050a7c5c..6a91cc2623e8 100644
--- a/arch/parisc/configs/default_defconfig
+++ b/arch/parisc/configs/default_defconfig
@@ -44,7 +44,6 @@ CONFIG_INET6_AH=y
CONFIG_INET6_ESP=y
CONFIG_INET6_IPCOMP=y
CONFIG_LLC2=m
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/configs/generic-32bit_defconfig b/arch/parisc/configs/generic-32bit_defconfig
index a8f9bbef0975..18b072a47a10 100644
--- a/arch/parisc/configs/generic-32bit_defconfig
+++ b/arch/parisc/configs/generic-32bit_defconfig
@@ -47,7 +47,6 @@ CONFIG_INET_ESP=m
CONFIG_INET_DIAG=m
CONFIG_LLC2=m
# CONFIG_WIRELESS is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h
index 3d4dd68e181b..a303ae9a77f4 100644
--- a/arch/parisc/include/asm/special_insns.h
+++ b/arch/parisc/include/asm/special_insns.h
@@ -2,6 +2,30 @@
#ifndef __PARISC_SPECIAL_INSNS_H
#define __PARISC_SPECIAL_INSNS_H
+#define lpa(va) ({ \
+ unsigned long pa; \
+ __asm__ __volatile__( \
+ "copy %%r0,%0\n\t" \
+ "lpa %%r0(%1),%0" \
+ : "=r" (pa) \
+ : "r" (va) \
+ : "memory" \
+ ); \
+ pa; \
+})
+
+#define lpa_user(va) ({ \
+ unsigned long pa; \
+ __asm__ __volatile__( \
+ "copy %%r0,%0\n\t" \
+ "lpa %%r0(%%sr3,%1),%0" \
+ : "=r" (pa) \
+ : "r" (va) \
+ : "memory" \
+ ); \
+ pa; \
+})
+
#define mfctl(reg) ({ \
unsigned long cr; \
__asm__ __volatile__( \
diff --git a/arch/parisc/kernel/alternative.c b/arch/parisc/kernel/alternative.c
index bf2274e01a96..ca1f5ca0540a 100644
--- a/arch/parisc/kernel/alternative.c
+++ b/arch/parisc/kernel/alternative.c
@@ -56,7 +56,8 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
* time IO-PDIR is changed in Ike/Astro.
*/
if ((cond & ALT_COND_NO_IOC_FDC) &&
- (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC))
+ ((boot_cpu_data.cpu_type <= pcxw_) ||
+ (boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)))
continue;
/* Want to replace pdtlb by a pdtlb,l instruction? */
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index c3b1b9c24ede..cd33b4feacb1 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -35,6 +35,15 @@ OUTPUT_FORMAT("elf64-hppa-linux")
OUTPUT_ARCH(hppa:hppa2.0w)
#endif
+#define EXIT_TEXT_SECTIONS() .exit.text : { EXIT_TEXT }
+#if !defined(CONFIG_64BIT) || defined(CONFIG_MLONGCALLS)
+#define MLONGCALL_KEEP(x)
+#define MLONGCALL_DISCARD(x) x
+#else
+#define MLONGCALL_KEEP(x) x
+#define MLONGCALL_DISCARD(x)
+#endif
+
ENTRY(parisc_kernel_start)
#ifndef CONFIG_64BIT
jiffies = jiffies_64 + 4;
@@ -47,15 +56,11 @@ SECTIONS
__init_begin = .;
HEAD_TEXT_SECTION
- INIT_TEXT_SECTION(8)
+ MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(PAGE_SIZE)
- /* we have to discard exit text and such at runtime, not link time */
- .exit.text :
- {
- EXIT_TEXT
- }
+ MLONGCALL_DISCARD(EXIT_TEXT_SECTIONS())
.exit.data :
{
EXIT_DATA
@@ -73,11 +78,12 @@ SECTIONS
_text = .; /* Text and read-only data */
_stext = .;
+ MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
.text ALIGN(PAGE_SIZE) : {
TEXT_TEXT
+ LOCK_TEXT
SCHED_TEXT
CPUIDLE_TEXT
- LOCK_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
@@ -92,6 +98,7 @@ SECTIONS
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
}
+ MLONGCALL_KEEP(EXIT_TEXT_SECTIONS())
. = ALIGN(PAGE_SIZE);
_etext = .;
/* End of text section */
diff --git a/arch/parisc/math-emu/cnv_float.h b/arch/parisc/math-emu/cnv_float.h
index bfcd834f42d0..ef783a383c5a 100644
--- a/arch/parisc/math-emu/cnv_float.h
+++ b/arch/parisc/math-emu/cnv_float.h
@@ -47,19 +47,19 @@
((exponent < (SGL_P - 1)) ? \
(Sall(sgl_value) << (SGL_EXP_LENGTH + 1 + exponent)) : FALSE)
-#define Int_isinexact_to_sgl(int_value) (int_value << 33 - SGL_EXP_LENGTH)
+#define Int_isinexact_to_sgl(int_value) ((int_value << 33 - SGL_EXP_LENGTH) != 0)
#define Sgl_roundnearest_from_int(int_value,sgl_value) \
if (int_value & 1<<(SGL_EXP_LENGTH - 2)) /* round bit */ \
- if ((int_value << 34 - SGL_EXP_LENGTH) || Slow(sgl_value)) \
+ if (((int_value << 34 - SGL_EXP_LENGTH) != 0) || Slow(sgl_value)) \
Sall(sgl_value)++
#define Dint_isinexact_to_sgl(dint_valueA,dint_valueB) \
- ((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) || Dintp2(dint_valueB))
+ (((Dintp1(dint_valueA) << 33 - SGL_EXP_LENGTH) != 0) || Dintp2(dint_valueB))
#define Sgl_roundnearest_from_dint(dint_valueA,dint_valueB,sgl_value) \
if (Dintp1(dint_valueA) & 1<<(SGL_EXP_LENGTH - 2)) \
- if ((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) || \
+ if (((Dintp1(dint_valueA) << 34 - SGL_EXP_LENGTH) != 0) || \
Dintp2(dint_valueB) || Slow(sgl_value)) Sall(sgl_value)++
#define Dint_isinexact_to_dbl(dint_value) \
diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c
index 9a26b442f820..8e645ddac58e 100644
--- a/arch/sparc/kernel/mdesc.c
+++ b/arch/sparc/kernel/mdesc.c
@@ -356,6 +356,8 @@ static int get_vdev_port_node_info(struct mdesc_handle *md, u64 node,
node_info->vdev_port.id = *idp;
node_info->vdev_port.name = kstrdup_const(name, GFP_KERNEL);
+ if (!node_info->vdev_port.name)
+ return -1;
node_info->vdev_port.parent_cfg_hdl = *parent_cfg_hdlp;
return 0;
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 6de7c684c29f..a58ae9c42803 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -891,6 +891,10 @@ static int sparc_perf_event_set_period(struct perf_event *event,
s64 period = hwc->sample_period;
int ret = 0;
+ /* The period may have been changed by PERF_EVENT_IOC_PERIOD */
+ if (unlikely(period != hwc->last_period))
+ left = period - (hwc->last_period - left);
+
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index d245f89d1395..d220b6848746 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -587,7 +587,7 @@ xcall_flush_tlb_kernel_range: /* 44 insns */
sub %g7, %g1, %g3
srlx %g3, 18, %g2
brnz,pn %g2, 2f
- add %g2, 1, %g2
+ sethi %hi(PAGE_SIZE), %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
@@ -751,7 +751,7 @@ __cheetah_xcall_flush_tlb_kernel_range: /* 44 insns */
sub %g7, %g1, %g3
srlx %g3, 18, %g2
brnz,pn %g2, 2f
- add %g2, 1, %g2
+ sethi %hi(PAGE_SIZE), %g2
sub %g3, %g2, %g3
or %g1, 0x20, %g1 ! Nucleus
1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 1796d2bdcaaa..5102bf7c8192 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -28,7 +28,10 @@ obj-y += cpuid-deps.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
-obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o intel_epb.o
+ifdef CONFIG_CPU_SUP_INTEL
+obj-y += intel.o intel_pconfig.o
+obj-$(CONFIG_PM) += intel_epb.o
+endif
obj-$(CONFIG_CPU_SUP_AMD) += amd.o
obj-$(CONFIG_CPU_SUP_HYGON) += hygon.o
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/intel_epb.c b/arch/x86/kernel/cpu/intel_epb.c
index ebb14a26f117..f4dd73396f28 100644
--- a/arch/x86/kernel/cpu/intel_epb.c
+++ b/arch/x86/kernel/cpu/intel_epb.c
@@ -97,7 +97,6 @@ static void intel_epb_restore(void)
wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, (epb & ~EPB_MASK) | val);
}
-#ifdef CONFIG_PM
static struct syscore_ops intel_epb_syscore_ops = {
.suspend = intel_epb_save,
.resume = intel_epb_restore,
@@ -194,25 +193,6 @@ static int intel_epb_offline(unsigned int cpu)
return 0;
}
-static inline void register_intel_ebp_syscore_ops(void)
-{
- register_syscore_ops(&intel_epb_syscore_ops);
-}
-#else /* !CONFIG_PM */
-static int intel_epb_online(unsigned int cpu)
-{
- intel_epb_restore();
- return 0;
-}
-
-static int intel_epb_offline(unsigned int cpu)
-{
- return intel_epb_save();
-}
-
-static inline void register_intel_ebp_syscore_ops(void) {}
-#endif
-
static __init int intel_epb_init(void)
{
int ret;
@@ -226,7 +206,7 @@ static __init int intel_epb_init(void)
if (ret < 0)
goto err_out_online;
- register_intel_ebp_syscore_ops();
+ register_syscore_ops(&intel_epb_syscore_ops);
return 0;
err_out_online:
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index cf00ab6c6621..306c3a0902ba 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -557,7 +557,8 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
}
/**
- * get_desc() - Obtain pointer to a segment descriptor
+ * get_desc() - Obtain contents of a segment descriptor
+ * @out: Segment descriptor contents on success
* @sel: Segment selector
*
* Given a segment selector, obtain a pointer to the segment descriptor.
@@ -565,18 +566,18 @@ static int get_reg_offset_16(struct insn *insn, struct pt_regs *regs,
*
* Returns:
*
- * Pointer to segment descriptor on success.
+ * True on success, false on failure.
*
* NULL on error.
*/
-static struct desc_struct *get_desc(unsigned short sel)
+static bool get_desc(struct desc_struct *out, unsigned short sel)
{
struct desc_ptr gdt_desc = {0, 0};
unsigned long desc_base;
#ifdef CONFIG_MODIFY_LDT_SYSCALL
if ((sel & SEGMENT_TI_MASK) == SEGMENT_LDT) {
- struct desc_struct *desc = NULL;
+ bool success = false;
struct ldt_struct *ldt;
/* Bits [15:3] contain the index of the desired entry. */
@@ -584,12 +585,14 @@ static struct desc_struct *get_desc(unsigned short sel)
mutex_lock(&current->active_mm->context.lock);
ldt = current->active_mm->context.ldt;
- if (ldt && sel < ldt->nr_entries)
- desc = &ldt->entries[sel];
+ if (ldt && sel < ldt->nr_entries) {
+ *out = ldt->entries[sel];
+ success = true;
+ }
mutex_unlock(&current->active_mm->context.lock);
- return desc;
+ return success;
}
#endif
native_store_gdt(&gdt_desc);
@@ -604,9 +607,10 @@ static struct desc_struct *get_desc(unsigned short sel)
desc_base = sel & ~(SEGMENT_RPL_MASK | SEGMENT_TI_MASK);
if (desc_base > gdt_desc.size)
- return NULL;
+ return false;
- return (struct desc_struct *)(gdt_desc.address + desc_base);
+ *out = *(struct desc_struct *)(gdt_desc.address + desc_base);
+ return true;
}
/**
@@ -628,7 +632,7 @@ static struct desc_struct *get_desc(unsigned short sel)
*/
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
{
- struct desc_struct *desc;
+ struct desc_struct desc;
short sel;
sel = get_segment_selector(regs, seg_reg_idx);
@@ -666,11 +670,10 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
if (!sel)
return -1L;
- desc = get_desc(sel);
- if (!desc)
+ if (!get_desc(&desc, sel))
return -1L;
- return get_desc_base(desc);
+ return get_desc_base(&desc);
}
/**
@@ -692,7 +695,7 @@ unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx)
*/
static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
{
- struct desc_struct *desc;
+ struct desc_struct desc;
unsigned long limit;
short sel;
@@ -706,8 +709,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
if (!sel)
return 0;
- desc = get_desc(sel);
- if (!desc)
+ if (!get_desc(&desc, sel))
return 0;
/*
@@ -716,8 +718,8 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
* not tested when checking the segment limits. In practice,
* this means that the segment ends in (limit << 12) + 0xfff.
*/
- limit = get_desc_limit(desc);
- if (desc->g)
+ limit = get_desc_limit(&desc);
+ if (desc.g)
limit = (limit << 12) + 0xfff;
return limit;
@@ -741,7 +743,7 @@ static unsigned long get_seg_limit(struct pt_regs *regs, int seg_reg_idx)
*/
int insn_get_code_seg_params(struct pt_regs *regs)
{
- struct desc_struct *desc;
+ struct desc_struct desc;
short sel;
if (v8086_mode(regs))
@@ -752,8 +754,7 @@ int insn_get_code_seg_params(struct pt_regs *regs)
if (sel < 0)
return sel;
- desc = get_desc(sel);
- if (!desc)
+ if (!get_desc(&desc, sel))
return -EINVAL;
/*
@@ -761,10 +762,10 @@ int insn_get_code_seg_params(struct pt_regs *regs)
* determines whether a segment contains data or code. If this is a data
* segment, return error.
*/
- if (!(desc->type & BIT(3)))
+ if (!(desc.type & BIT(3)))
return -EINVAL;
- switch ((desc->l << 1) | desc->d) {
+ switch ((desc.l << 1) | desc.d) {
case 0: /*
* Legacy mode. CS.L=0, CS.D=0. Address and operand size are
* both 16-bit.
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0bc07a9bc27b..24b079e94bc2 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -298,7 +298,17 @@ int hibernate_resume_nonboot_cpu_disable(void)
* address in its instruction pointer may not be possible to resolve
* any more at that point (the page tables used by it previously may
* have been overwritten by hibernate image data).
+ *
+ * First, make sure that we wake up all the potentially disabled SMT
+ * threads which have been initially brought up and then put into
+ * mwait/cpuidle sleep.
+ * Those will be put to proper (not interfering with hibernation
+ * resume) sleep afterwards, and the resumed kernel will decide itself
+ * what to do with them.
*/
+ ret = cpuhp_smt_enable();
+ if (ret)
+ return ret;
smp_ops.play_dead = resume_play_dead;
ret = disable_nonboot_cpus();
smp_ops.play_dead = play_dead;
diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
index 4845b8c7be7f..fc413717a45f 100644
--- a/arch/x86/power/hibernate.c
+++ b/arch/x86/power/hibernate.c
@@ -11,6 +11,7 @@
#include <linux/suspend.h>
#include <linux/scatterlist.h>
#include <linux/kdebug.h>
+#include <linux/cpu.h>
#include <crypto/hash.h>
@@ -245,3 +246,35 @@ out:
__flush_tlb_all();
return 0;
}
+
+int arch_resume_nosmt(void)
+{
+ int ret = 0;
+ /*
+ * We reached this while coming out of hibernation. This means
+ * that SMT siblings are sleeping in hlt, as mwait is not safe
+ * against control transition during resume (see comment in
+ * hibernate_resume_nonboot_cpu_disable()).
+ *
+ * If the resumed kernel has SMT disabled, we have to take all the
+ * SMT siblings out of hlt, and offline them again so that they
+ * end up in mwait proper.
+ *
+ * Called with hotplug disabled.
+ */
+ cpu_hotplug_enable();
+ if (cpu_smt_control == CPU_SMT_DISABLED ||
+ cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
+ enum cpuhp_smt_control old = cpu_smt_control;
+
+ ret = cpuhp_smt_enable();
+ if (ret)
+ goto out;
+ ret = cpuhp_smt_disable(old);
+ if (ret)
+ goto out;
+ }
+out:
+ cpu_hotplug_disable();
+ return ret;
+}
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index c0ec24349421..176cb46bcf12 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -310,7 +310,8 @@ extern char _SecondaryResetVector_text_start;
extern char _SecondaryResetVector_text_end;
#endif
-static inline int mem_reserve(unsigned long start, unsigned long end)
+static inline int __init_memblock mem_reserve(unsigned long start,
+ unsigned long end)
{
return memblock_reserve(start, end - start);
}
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index b3796a40a61a..59f46904cb11 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -1046,7 +1046,8 @@ struct blkcg_policy blkcg_policy_bfq = {
struct cftype bfq_blkcg_legacy_files[] = {
{
.name = "bfq.weight",
- .flags = CFTYPE_NOT_ON_ROOT,
+ .link_name = "weight",
+ .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
.seq_show = bfq_io_show_weight,
.write_u64 = bfq_io_set_weight_legacy,
},
@@ -1166,7 +1167,8 @@ struct cftype bfq_blkcg_legacy_files[] = {
struct cftype bfq_blkg_files[] = {
{
.name = "bfq.weight",
- .flags = CFTYPE_NOT_ON_ROOT,
+ .link_name = "weight",
+ .flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
.seq_show = bfq_io_show_weight,
.write = bfq_io_set_weight,
},
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b97b479e4f64..1f7127b03490 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -881,7 +881,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
blkg_free(new_blkg);
} else {
blkg = blkg_create(pos, q, new_blkg);
- if (unlikely(IS_ERR(blkg))) {
+ if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
goto fail_unlock;
}
diff --git a/block/blk-core.c b/block/blk-core.c
index ee1b35fe8572..8340f69670d8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -320,6 +320,19 @@ void blk_cleanup_queue(struct request_queue *q)
if (queue_is_mq(q))
blk_mq_exit_queue(q);
+ /*
+ * In theory, request pool of sched_tags belongs to request queue.
+ * However, the current implementation requires tag_set for freeing
+ * requests, so free the pool now.
+ *
+ * Queue has become frozen, there can't be any in-queue requests, so
+ * it is safe to free requests now.
+ */
+ mutex_lock(&q->sysfs_lock);
+ if (q->elevator)
+ blk_mq_sched_free_requests(q);
+ mutex_unlock(&q->sysfs_lock);
+
percpu_ref_exit(&q->q_usage_counter);
/* @q is and will stay empty, shutdown and put */
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 74c6bb871f7e..500cb04901cc 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -475,14 +475,18 @@ static int blk_mq_sched_alloc_tags(struct request_queue *q,
return ret;
}
+/* called in queue's release handler, tagset has gone away */
static void blk_mq_sched_tags_teardown(struct request_queue *q)
{
- struct blk_mq_tag_set *set = q->tag_set;
struct blk_mq_hw_ctx *hctx;
int i;
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_sched_free_tags(set, hctx, i);
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->sched_tags) {
+ blk_mq_free_rq_map(hctx->sched_tags);
+ hctx->sched_tags = NULL;
+ }
+ }
}
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
@@ -523,6 +527,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
ret = e->ops.init_hctx(hctx, i);
if (ret) {
eq = q->elevator;
+ blk_mq_sched_free_requests(q);
blk_mq_exit_sched(q, eq);
kobject_put(&eq->kobj);
return ret;
@@ -534,11 +539,30 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
return 0;
err:
+ blk_mq_sched_free_requests(q);
blk_mq_sched_tags_teardown(q);
q->elevator = NULL;
return ret;
}
+/*
+ * called in either blk_queue_cleanup or elevator_switch, tagset
+ * is required for freeing requests
+ */
+void blk_mq_sched_free_requests(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ lockdep_assert_held(&q->sysfs_lock);
+ WARN_ON(!q->elevator);
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (hctx->sched_tags)
+ blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i);
+ }
+}
+
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e)
{
struct blk_mq_hw_ctx *hctx;
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index c7bdb52367ac..3cf92cbbd8ac 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -28,6 +28,7 @@ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
+void blk_mq_sched_free_requests(struct request_queue *q);
static inline bool
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 75b5281cc577..977c659dcd18 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -850,7 +850,7 @@ static void blk_exit_queue(struct request_queue *q)
*/
if (q->elevator) {
ioc_clear_queue(q);
- elevator_exit(q, q->elevator);
+ __elevator_exit(q, q->elevator);
q->elevator = NULL;
}
diff --git a/block/blk.h b/block/blk.h
index 91b3581b7c7a..7814aa207153 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -6,6 +6,7 @@
#include <linux/blk-mq.h>
#include <xen/xen.h>
#include "blk-mq.h"
+#include "blk-mq-sched.h"
/* Max future timer expiry for timeouts */
#define BLK_MAX_TIMEOUT (5 * HZ)
@@ -176,10 +177,17 @@ void blk_insert_flush(struct request *rq);
int elevator_init_mq(struct request_queue *q);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
-void elevator_exit(struct request_queue *, struct elevator_queue *);
+void __elevator_exit(struct request_queue *, struct elevator_queue *);
int elv_register_queue(struct request_queue *q);
void elv_unregister_queue(struct request_queue *q);
+static inline void elevator_exit(struct request_queue *q,
+ struct elevator_queue *e)
+{
+ blk_mq_sched_free_requests(q);
+ __elevator_exit(q, e);
+}
+
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
#ifdef CONFIG_FAIL_IO_TIMEOUT
diff --git a/block/elevator.c b/block/elevator.c
index ec55d5fc0b3e..2f17d66d0e61 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -178,7 +178,7 @@ static void elevator_release(struct kobject *kobj)
kfree(e);
}
-void elevator_exit(struct request_queue *q, struct elevator_queue *e)
+void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
{
mutex_lock(&e->sysfs_lock);
if (e->type->ops.exit_sched)
diff --git a/crypto/hmac.c b/crypto/hmac.c
index f03cb32147cc..8b2a212eb0ad 100644
--- a/crypto/hmac.c
+++ b/crypto/hmac.c
@@ -152,8 +152,10 @@ static int hmac_init_tfm(struct crypto_tfm *tfm)
parent->descsize = sizeof(struct shash_desc) +
crypto_shash_descsize(hash);
- if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE))
+ if (WARN_ON(parent->descsize > HASH_MAX_DESCSIZE)) {
+ crypto_free_shash(hash);
return -EINVAL;
+ }
ctx->hash = hash;
return 0;
diff --git a/crypto/jitterentropy-kcapi.c b/crypto/jitterentropy-kcapi.c
index 6ea1a270b8dc..787dccca3715 100644
--- a/crypto/jitterentropy-kcapi.c
+++ b/crypto/jitterentropy-kcapi.c
@@ -198,7 +198,7 @@ static void __exit jent_mod_exit(void)
crypto_unregister_rng(&jent_alg);
}
-subsys_initcall(jent_mod_init);
+module_init(jent_mod_init);
module_exit(jent_mod_exit);
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index e2c6aae2d636..bd19f8af950b 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -196,7 +196,6 @@ static const struct file_operations aoe_debugfs_fops = {
static void
aoedisk_add_debugfs(struct aoedev *d)
{
- struct dentry *entry;
char *p;
if (aoe_debugfs_dir == NULL)
@@ -207,15 +206,8 @@ aoedisk_add_debugfs(struct aoedev *d)
else
p++;
BUG_ON(*p == '\0');
- entry = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
- &aoe_debugfs_fops);
- if (IS_ERR_OR_NULL(entry)) {
- pr_info("aoe: cannot create debugfs file for %s\n",
- d->gd->disk_name);
- return;
- }
- BUG_ON(d->debugfs);
- d->debugfs = entry;
+ d->debugfs = debugfs_create_file(p, 0444, aoe_debugfs_dir, d,
+ &aoe_debugfs_fops);
}
void
aoedisk_rm_debugfs(struct aoedev *d)
@@ -472,10 +464,6 @@ aoeblk_init(void)
if (buf_pool_cache == NULL)
return -ENOMEM;
aoe_debugfs_dir = debugfs_create_dir("aoe", NULL);
- if (IS_ERR_OR_NULL(aoe_debugfs_dir)) {
- pr_info("aoe: cannot create debugfs directory\n");
- aoe_debugfs_dir = NULL;
- }
return 0;
}
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index bacfdac7161c..a14b09ab3a41 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3676,6 +3676,7 @@ skip_create_disk:
blk_queue_physical_block_size(dd->queue, 4096);
blk_queue_max_hw_sectors(dd->queue, 0xffff);
blk_queue_max_segment_size(dd->queue, 0x400000);
+ dma_set_max_seg_size(&dd->pdev->dev, 0x400000);
blk_queue_io_min(dd->queue, 4096);
/* Set the capacity of the device in 512 byte sectors. */
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index de9b2d2f8654..76b73ddf8fd7 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -767,7 +767,6 @@ static int rsxx_pci_probe(struct pci_dev *dev,
goto failed_enable;
pci_set_master(dev);
- dma_set_max_seg_size(&dev->dev, RSXX_HW_BLK_SIZE);
st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
if (st) {
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index cd57747286f2..9635897458a0 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -77,6 +77,7 @@ static void unmap_udmabuf(struct dma_buf_attachment *at,
struct sg_table *sg,
enum dma_data_direction direction)
{
+ dma_unmap_sg(at->dev, sg->sgl, sg->nents, direction);
sg_free_table(sg);
kfree(sg);
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 7204fdeff6c5..263bee76ef0d 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -662,10 +662,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
return status;
}
-static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
- struct jz4780_dma_chan *jzchan)
+static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
+ struct jz4780_dma_chan *jzchan)
{
uint32_t dcs;
+ bool ack = true;
spin_lock(&jzchan->vchan.lock);
@@ -688,12 +689,20 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
if (jzchan->desc->type == DMA_CYCLIC) {
vchan_cyclic_callback(&jzchan->desc->vdesc);
- } else {
+
+ jz4780_dma_begin(jzchan);
+ } else if (dcs & JZ_DMA_DCS_TT) {
vchan_cookie_complete(&jzchan->desc->vdesc);
jzchan->desc = NULL;
- }
- jz4780_dma_begin(jzchan);
+ jz4780_dma_begin(jzchan);
+ } else {
+ /* False positive - continue the transfer */
+ ack = false;
+ jz4780_dma_chn_writel(jzdma, jzchan->id,
+ JZ_DMA_REG_DCS,
+ JZ_DMA_DCS_CTE);
+ }
}
} else {
dev_err(&jzchan->vchan.chan.dev->device,
@@ -701,21 +710,22 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
}
spin_unlock(&jzchan->vchan.lock);
+
+ return ack;
}
static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
{
struct jz4780_dma_dev *jzdma = data;
+ unsigned int nb_channels = jzdma->soc_data->nb_channels;
uint32_t pending, dmac;
int i;
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
- for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
- if (!(pending & (1<<i)))
- continue;
-
- jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
+ for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
+ if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
+ pending &= ~BIT(i);
}
/* Clear halt and address error status of all channels. */
@@ -724,7 +734,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
/* Clear interrupt pending status. */
- jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index b2ac1d2c5b86..a1ce307c502f 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
return vchan_tx_prep(&chan->vc, &first->vd, flags);
err_desc_get:
- axi_desc_put(first);
+ if (first)
+ axi_desc_put(first);
return NULL;
}
diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c
index aa1d0ae3d207..60b062c3647b 100644
--- a/drivers/dma/fsl-qdma.c
+++ b/drivers/dma/fsl-qdma.c
@@ -701,10 +701,8 @@ static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
- if (intr) {
+ if (intr)
dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
- return IRQ_NONE;
- }
qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
return IRQ_HANDLED;
diff --git a/drivers/dma/mediatek/mtk-cqdma.c b/drivers/dma/mediatek/mtk-cqdma.c
index 814853842e29..723b11c190b3 100644
--- a/drivers/dma/mediatek/mtk-cqdma.c
+++ b/drivers/dma/mediatek/mtk-cqdma.c
@@ -225,7 +225,7 @@ static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc)
mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
- return mtk_cqdma_poll_engine_done(pc, false);
+ return mtk_cqdma_poll_engine_done(pc, true);
}
static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
@@ -671,7 +671,7 @@ static void mtk_cqdma_free_chan_resources(struct dma_chan *c)
mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
/* wait for the completion of flush operation */
- if (mtk_cqdma_poll_engine_done(cvc->pc, false) < 0)
+ if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0)
dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
/* clear the flush bit and interrupt flag */
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 48431e2da987..baac476c8622 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -62,6 +62,8 @@
/* SPRD_DMA_GLB_2STAGE_GRP register definition */
#define SPRD_DMA_GLB_2STAGE_EN BIT(24)
#define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20)
+#define SPRD_DMA_GLB_DEST_INT BIT(22)
+#define SPRD_DMA_GLB_SRC_INT BIT(20)
#define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19)
#define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18)
#define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17)
@@ -135,6 +137,7 @@
/* define DMA channel mode & trigger mode mask */
#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
+#define SPRD_DMA_INT_TYPE_MASK GENMASK(7, 0)
/* define the DMA transfer step type */
#define SPRD_DMA_NONE_STEP 0
@@ -190,6 +193,7 @@ struct sprd_dma_chn {
u32 dev_id;
enum sprd_dma_chn_mode chn_mode;
enum sprd_dma_trg_mode trg_mode;
+ enum sprd_dma_int_type int_type;
struct sprd_dma_desc *cur_desc;
};
@@ -429,6 +433,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
val |= SPRD_DMA_GLB_2STAGE_EN;
+ if (schan->int_type != SPRD_DMA_NO_INT)
+ val |= SPRD_DMA_GLB_SRC_INT;
+
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
break;
@@ -436,6 +443,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
val |= SPRD_DMA_GLB_2STAGE_EN;
+ if (schan->int_type != SPRD_DMA_NO_INT)
+ val |= SPRD_DMA_GLB_SRC_INT;
+
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
break;
@@ -443,6 +453,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
SPRD_DMA_GLB_DEST_CHN_MASK;
val |= SPRD_DMA_GLB_2STAGE_EN;
+ if (schan->int_type != SPRD_DMA_NO_INT)
+ val |= SPRD_DMA_GLB_DEST_INT;
+
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
break;
@@ -450,6 +463,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
SPRD_DMA_GLB_DEST_CHN_MASK;
val |= SPRD_DMA_GLB_2STAGE_EN;
+ if (schan->int_type != SPRD_DMA_NO_INT)
+ val |= SPRD_DMA_GLB_DEST_INT;
+
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
break;
@@ -510,7 +526,9 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
sprd_dma_set_uid(schan);
sprd_dma_enable_chn(schan);
- if (schan->dev_id == SPRD_DMA_SOFTWARE_UID)
+ if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
+ schan->chn_mode != SPRD_DMA_DST_CHN0 &&
+ schan->chn_mode != SPRD_DMA_DST_CHN1)
sprd_dma_soft_request(schan);
}
@@ -552,12 +570,17 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
schan = &sdev->channels[i];
spin_lock(&schan->vc.lock);
+
+ sdesc = schan->cur_desc;
+ if (!sdesc) {
+ spin_unlock(&schan->vc.lock);
+ return IRQ_HANDLED;
+ }
+
int_type = sprd_dma_get_int_type(schan);
req_type = sprd_dma_get_req_type(schan);
sprd_dma_clear_int(schan);
- sdesc = schan->cur_desc;
-
/* cyclic mode schedule callback */
cyclic = schan->linklist.phy_addr ? true : false;
if (cyclic == true) {
@@ -625,7 +648,7 @@ static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
else
pos = 0;
} else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
- struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
+ struct sprd_dma_desc *sdesc = schan->cur_desc;
if (sdesc->dir == DMA_DEV_TO_MEM)
pos = sprd_dma_get_dst_addr(schan);
@@ -771,7 +794,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
hw->frg_len = temp;
- hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
+ hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
@@ -904,6 +927,16 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
schan->linklist.virt_addr = 0;
}
+ /*
+ * Set channel mode, interrupt mode and trigger mode for 2-stage
+ * transfer.
+ */
+ schan->chn_mode =
+ (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
+ schan->trg_mode =
+ (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
+ schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK;
+
sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
if (!sdesc)
return NULL;
@@ -937,12 +970,6 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
}
- /* Set channel mode and trigger mode for 2-stage transfer */
- schan->chn_mode =
- (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
- schan->trg_mode =
- (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
-
ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
dir, flags, slave_cfg);
if (ret) {
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index d51550dd91c7..2805853e963f 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -42,10 +42,14 @@
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define ADMA_CH_FIFO_CTRL 0x2c
-#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24)
-#define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16)
-#define ADMA_CH_FIFO_CTRL_TX_FIFO_SIZE_SHIFT 8
-#define ADMA_CH_FIFO_CTRL_RX_FIFO_SIZE_SHIFT 0
+#define TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0xf) << 24)
+#define TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0xf) << 16)
+#define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
+#define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0x1f) << 24)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0x1f) << 16)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8)
+#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f)
#define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
@@ -60,8 +64,15 @@
#define TEGRA_ADMA_BURST_COMPLETE_TIME 20
-#define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \
- ADMA_CH_FIFO_CTRL_STARV_THRES(1))
+#define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
+ TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
+ TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
+ TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
+
+#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
+ TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
+ TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
+ TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
@@ -73,7 +84,8 @@ struct tegra_adma;
* @global_int_clear: Register offset of DMA global interrupt clear.
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
- * @ch_base_offset: Reister offset of DMA channel registers.
+ * @ch_base_offset: Register offset of DMA channel registers.
+ * @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
* @ch_req_mask: Mask for Tx or Rx channel select.
* @ch_req_max: Maximum number of Tx or Rx channels available.
* @ch_reg_size: Size of DMA channel register space.
@@ -86,6 +98,7 @@ struct tegra_adma_chip_data {
unsigned int ch_req_tx_shift;
unsigned int ch_req_rx_shift;
unsigned int ch_base_offset;
+ unsigned int ch_fifo_ctrl;
unsigned int ch_req_mask;
unsigned int ch_req_max;
unsigned int ch_reg_size;
@@ -589,7 +602,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size);
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
- ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT;
+ ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
return tegra_adma_request_alloc(tdc, direction);
@@ -773,6 +786,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.ch_req_tx_shift = 28,
.ch_req_rx_shift = 24,
.ch_base_offset = 0,
+ .ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0xf,
.ch_req_max = 10,
.ch_reg_size = 0x80,
@@ -786,6 +800,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.ch_req_tx_shift = 27,
.ch_req_rx_shift = 22,
.ch_base_offset = 0x10000,
+ .ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0x1f,
.ch_req_max = 20,
.ch_reg_size = 0x100,
@@ -834,16 +849,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
return PTR_ERR(tdma->ahub_clk);
}
- pm_runtime_enable(&pdev->dev);
-
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
- goto rpm_disable;
-
- ret = tegra_adma_init(tdma);
- if (ret)
- goto rpm_put;
-
INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i];
@@ -862,6 +867,16 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->tdma = tdma;
}
+ pm_runtime_enable(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ goto rpm_disable;
+
+ ret = tegra_adma_init(tdma);
+ if (ret)
+ goto rpm_put;
+
dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
@@ -905,13 +920,13 @@ static int tegra_adma_probe(struct platform_device *pdev)
dma_remove:
dma_async_device_unregister(&tdma->dma_dev);
-irq_dispose:
- while (--i >= 0)
- irq_dispose_mapping(tdma->channels[i].irq);
rpm_put:
pm_runtime_put_sync(&pdev->dev);
rpm_disable:
pm_runtime_disable(&pdev->dev);
+irq_dispose:
+ while (--i >= 0)
+ irq_dispose_mapping(tdma->channels[i].irq);
return ret;
}
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index c438722bf4e1..dcd80b088c7b 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -399,7 +399,7 @@ int afu_dma_map_region(struct dfl_feature_platform_data *pdata,
region->pages[0], 0,
region->length,
DMA_BIDIRECTIONAL);
- if (dma_mapping_error(&pdata->dev->dev, region->iova)) {
+ if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
dev_err(&pdata->dev->dev, "failed to map for dma\n");
ret = -EFAULT;
goto unpin_pages;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index 2c09e502e721..4b66aaa32b5a 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -40,6 +40,13 @@ enum dfl_fpga_devt_type {
DFL_FPGA_DEVT_MAX,
};
+static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
+
+static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
+ "dfl-fme-pdata",
+ "dfl-port-pdata",
+};
+
/**
* dfl_dev_info - dfl feature device information.
* @name: name string of the feature platform device.
@@ -315,7 +322,7 @@ static void dfl_chardev_uinit(void)
for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
if (MAJOR(dfl_chrdevs[i].devt)) {
unregister_chrdev_region(dfl_chrdevs[i].devt,
- MINORMASK);
+ MINORMASK + 1);
dfl_chrdevs[i].devt = MKDEV(0, 0);
}
}
@@ -325,8 +332,8 @@ static int dfl_chardev_init(void)
int i, ret;
for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
- ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0, MINORMASK,
- dfl_chrdevs[i].name);
+ ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
+ MINORMASK + 1, dfl_chrdevs[i].name);
if (ret)
goto exit;
}
@@ -443,11 +450,16 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
struct platform_device *fdev = binfo->feature_dev;
struct dfl_feature_platform_data *pdata;
struct dfl_feature_info *finfo, *p;
+ enum dfl_id_type type;
int ret, index = 0;
if (!fdev)
return 0;
+ type = feature_dev_id_type(fdev);
+ if (WARN_ON_ONCE(type >= DFL_ID_MAX))
+ return -EINVAL;
+
/*
* we do not need to care for the memory which is associated with
* the platform device. After calling platform_device_unregister(),
@@ -463,6 +475,8 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
pdata->num = binfo->feature_num;
pdata->dfl_cdev = binfo->cdev;
mutex_init(&pdata->lock);
+ lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
+ dfl_pdata_key_strings[type]);
/*
* the count should be initialized to 0 to make sure
@@ -497,7 +511,7 @@ static int build_info_commit_dev(struct build_feature_devs_info *binfo)
ret = platform_device_add(binfo->feature_dev);
if (!ret) {
- if (feature_dev_id_type(binfo->feature_dev) == PORT_ID)
+ if (type == PORT_ID)
dfl_fpga_cdev_add_port_dev(binfo->cdev,
binfo->feature_dev);
else
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 13851b3d1c56..215d33789c74 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -507,12 +507,16 @@ static int __init s10_init(void)
if (!fw_np)
return -ENODEV;
+ of_node_get(fw_np);
np = of_find_matching_node(fw_np, s10_of_match);
- if (!np)
+ if (!np) {
+ of_node_put(fw_np);
return -ENODEV;
+ }
of_node_put(np);
ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL);
+ of_node_put(fw_np);
if (ret)
return ret;
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index f7cbaadf49ab..b8a88d21d038 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -47,7 +47,7 @@ static int zynqmp_fpga_ops_write(struct fpga_manager *mgr,
char *kbuf;
int ret;
- if (!eemi_ops || !eemi_ops->fpga_load)
+ if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_load)
return -ENXIO;
priv = mgr->priv;
@@ -81,7 +81,7 @@ static enum fpga_mgr_states zynqmp_fpga_ops_state(struct fpga_manager *mgr)
const struct zynqmp_eemi_ops *eemi_ops = zynqmp_pm_get_eemi_ops();
u32 status;
- if (!eemi_ops || !eemi_ops->fpga_get_status)
+ if (IS_ERR_OR_NULL(eemi_ops) || !eemi_ops->fpga_get_status)
return FPGA_MGR_STATE_UNKNOWN;
eemi_ops->fpga_get_status(&status);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index cc8ad3831982..f4ac632a87b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1589,6 +1589,7 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
{
int r = 0;
int i;
+ uint32_t smu_version;
if (adev->asic_type >= CHIP_VEGA10) {
for (i = 0; i < adev->num_ip_blocks; i++) {
@@ -1614,16 +1615,9 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
}
}
}
+ r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
- r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
- if (r) {
- pr_err("firmware loading failed\n");
- return r;
- }
- }
-
- return 0;
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 34471dbaa872..039cfa2ec89d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -2490,6 +2490,21 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
}
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+{
+ int r = -EINVAL;
+
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
+ r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
+ if (r) {
+ pr_err("smu firmware loading failed\n");
+ return r;
+ }
+ *smu_version = adev->pm.fw_version;
+ }
+ return r;
+}
+
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index f21a7716b90e..7ff0e7621fff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -34,6 +34,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev);
void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index c021b114c8a4..f7189e22f6b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -1072,7 +1072,7 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- uint32_t rptr = amdgpu_ring_get_rptr(ring);
+ uint32_t rptr;
unsigned i;
int r, timeout = adev->usec_timeout;
@@ -1084,6 +1084,8 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
if (r)
return r;
+ rptr = amdgpu_ring_get_rptr(ring);
+
amdgpu_ring_write(ring, VCE_CMD_END);
amdgpu_ring_commit(ring);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index ba67d1023264..b610e3b30d95 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -28,6 +28,7 @@
#include "soc15.h"
#include "soc15d.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_pm.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
@@ -96,6 +97,7 @@ MODULE_FIRMWARE("amdgpu/raven2_me.bin");
MODULE_FIRMWARE("amdgpu/raven2_mec.bin");
MODULE_FIRMWARE("amdgpu/raven2_mec2.bin");
MODULE_FIRMWARE("amdgpu/raven2_rlc.bin");
+MODULE_FIRMWARE("amdgpu/raven_kicker_rlc.bin");
static const struct soc15_reg_golden golden_settings_gc_9_0[] =
{
@@ -588,7 +590,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_RAVEN:
if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
break;
- if ((adev->gfx.rlc_fw_version < 531) ||
+ if ((adev->gfx.rlc_fw_version != 106 &&
+ adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) ||
!adev->gfx.rlc.is_rlc_v2_1)
@@ -612,6 +615,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
unsigned int i = 0;
uint16_t version_major;
uint16_t version_minor;
+ uint32_t smu_version;
DRM_DEBUG("\n");
@@ -682,6 +686,12 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
(((adev->pdev->revision >= 0xC8) && (adev->pdev->revision <= 0xCF)) ||
((adev->pdev->revision >= 0xD8) && (adev->pdev->revision <= 0xDF))))
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc_am4.bin", chip_name);
+ else if (!strcmp(chip_name, "raven") && (amdgpu_pm_load_smu_firmware(adev, &smu_version) == 0) &&
+ (smu_version >= 0x41e2b))
+ /**
+ *SMC is loaded by SBIOS on APU and it's able to get the SMU version directly.
+ */
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_kicker_rlc.bin", chip_name);
else
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index bcb1a93c0b4c..ab7c5c3004ee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4232,8 +4232,7 @@ static void dm_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *old_state =
drm_atomic_get_old_plane_state(new_state->state, plane);
- if (plane->state->fb != new_state->fb)
- drm_atomic_set_fb_for_plane(plane->state, new_state->fb);
+ swap(plane->state->fb, new_state->fb);
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 6cd6497c6fc2..f1d326caf69e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -92,6 +92,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr)
hwmgr_set_user_specify_caps(hwmgr);
hwmgr->fan_ctrl_is_in_default_mode = true;
hwmgr_init_workload_prority(hwmgr);
+ hwmgr->gfxoff_state_changed_by_workload = false;
switch (hwmgr->chip_family) {
case AMDGPU_FAMILY_CI:
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 9a595f7525e6..e32ae9d3373c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1258,21 +1258,46 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
return size;
}
+static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
+{
+ struct amdgpu_device *adev = hwmgr->adev;
+ if ((adev->asic_type == CHIP_RAVEN) &&
+ (adev->rev_id != 0x15d8) &&
+ (hwmgr->smu_version >= 0x41e2b))
+ return true;
+ else
+ return false;
+}
+
static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
{
int workload_type = 0;
+ int result = 0;
if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
pr_err("Invalid power profile mode %ld\n", input[size]);
return -EINVAL;
}
- hwmgr->power_profile_mode = input[size];
+ if (hwmgr->power_profile_mode == input[size])
+ return 0;
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type =
- conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
+ conv_power_profile_to_pplib_workload(input[size]);
+ if (workload_type &&
+ smu10_is_raven1_refresh(hwmgr) &&
+ !hwmgr->gfxoff_state_changed_by_workload) {
+ smu10_gfx_off_control(hwmgr, false);
+ hwmgr->gfxoff_state_changed_by_workload = true;
+ }
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
1 << workload_type);
+ if (!result)
+ hwmgr->power_profile_mode = input[size];
+ if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
+ smu10_gfx_off_control(hwmgr, true);
+ hwmgr->gfxoff_state_changed_by_workload = false;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index bac3d85e3b82..c92999aac07c 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -782,6 +782,7 @@ struct pp_hwmgr {
uint32_t workload_mask;
uint32_t workload_prority[Workload_Policy_Max];
uint32_t workload_setting[Workload_Policy_Max];
+ bool gfxoff_state_changed_by_workload;
};
int hwmgr_early_init(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
index 031e5f305a3c..6bab816ed8e7 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -245,7 +245,7 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
}
-static struct komeda_component_funcs d71_layer_funcs = {
+static const struct komeda_component_funcs d71_layer_funcs = {
.update = d71_layer_update,
.disable = d71_layer_disable,
.dump_register = d71_layer_dump,
@@ -391,7 +391,7 @@ static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]);
}
-static struct komeda_component_funcs d71_compiz_funcs = {
+static const struct komeda_component_funcs d71_compiz_funcs = {
.update = d71_compiz_update,
.disable = d71_component_disable,
.dump_register = d71_compiz_dump,
@@ -467,7 +467,7 @@ static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
}
-static struct komeda_component_funcs d71_improc_funcs = {
+static const struct komeda_component_funcs d71_improc_funcs = {
.update = d71_improc_update,
.disable = d71_component_disable,
.dump_register = d71_improc_dump,
@@ -580,7 +580,7 @@ static void d71_timing_ctrlr_dump(struct komeda_component *c,
seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]);
}
-static struct komeda_component_funcs d71_timing_ctrlr_funcs = {
+static const struct komeda_component_funcs d71_timing_ctrlr_funcs = {
.update = d71_timing_ctrlr_update,
.disable = d71_timing_ctrlr_disable,
.dump_register = d71_timing_ctrlr_dump,
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index 34506ef7ad40..3a7248d42376 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -502,7 +502,7 @@ static void d71_init_fmt_tbl(struct komeda_dev *mdev)
table->n_formats = ARRAY_SIZE(d71_format_caps_table);
}
-static struct komeda_dev_funcs d71_chip_funcs = {
+static const struct komeda_dev_funcs d71_chip_funcs = {
.init_format_table = d71_init_fmt_tbl,
.enum_resources = d71_enum_resources,
.cleanup = d71_cleanup,
@@ -514,7 +514,7 @@ static struct komeda_dev_funcs d71_chip_funcs = {
.flush = d71_flush,
};
-struct komeda_dev_funcs *
+const struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
{
chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 62fad59f5a6a..284ce079d8c4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -350,7 +350,7 @@ static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
return true;
}
-static struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
+static const struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
.atomic_check = komeda_crtc_atomic_check,
.atomic_flush = komeda_crtc_atomic_flush,
.atomic_enable = komeda_crtc_atomic_enable,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index ca3599e4a4d3..b67030a9f056 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,6 +8,7 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -249,6 +250,9 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
goto err_cleanup;
}
+ dev->dma_parms = &mdev->dma_parms;
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
if (err) {
DRM_ERROR("create sysfs group failed.\n");
@@ -269,7 +273,7 @@ err_cleanup:
void komeda_dev_destroy(struct komeda_dev *mdev)
{
struct device *dev = mdev->dev;
- struct komeda_dev_funcs *funcs = mdev->funcs;
+ const struct komeda_dev_funcs *funcs = mdev->funcs;
int i;
sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index 29e03c4e1ffc..973fd5e0eb98 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -60,7 +60,7 @@ struct komeda_chip_info {
struct komeda_product_data {
u32 product_id;
- struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
+ const struct komeda_dev_funcs *(*identify)(u32 __iomem *reg,
struct komeda_chip_info *info);
};
@@ -149,6 +149,8 @@ struct komeda_dev {
struct device *dev;
/** @reg_base: the base address of komeda io space */
u32 __iomem *reg_base;
+ /** @dma_parms: the dma parameters of komeda */
+ struct device_dma_parameters dma_parms;
/** @chip: the basic chip information */
struct komeda_chip_info chip;
@@ -173,7 +175,7 @@ struct komeda_dev {
struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES];
/** @funcs: chip funcs to access to HW */
- struct komeda_dev_funcs *funcs;
+ const struct komeda_dev_funcs *funcs;
/**
* @chip_data:
*
@@ -192,7 +194,7 @@ komeda_product_match(struct komeda_dev *mdev, u32 target)
return MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id) == target;
}
-struct komeda_dev_funcs *
+const struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
struct komeda_dev *komeda_dev_create(struct device *dev);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index c379439c6194..a130b62fa6d1 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -12,7 +12,7 @@
/** komeda_pipeline_add - Add a pipeline to &komeda_dev */
struct komeda_pipeline *
komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
- struct komeda_pipeline_funcs *funcs)
+ const struct komeda_pipeline_funcs *funcs)
{
struct komeda_pipeline *pipe;
@@ -130,7 +130,7 @@ komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id)
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
size_t comp_sz, u32 id, u32 hw_id,
- struct komeda_component_funcs *funcs,
+ const struct komeda_component_funcs *funcs,
u8 max_active_inputs, u32 supported_inputs,
u8 max_active_outputs, u32 __iomem *reg,
const char *name_fmt, ...)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index b1f813a349a4..bae8a32b81a6 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -124,7 +124,7 @@ struct komeda_component {
/**
* @funcs: chip functions to access HW
*/
- struct komeda_component_funcs *funcs;
+ const struct komeda_component_funcs *funcs;
};
/**
@@ -346,8 +346,8 @@ struct komeda_pipeline {
struct komeda_improc *improc;
/** @ctrlr: timing controller */
struct komeda_timing_ctrlr *ctrlr;
- /** @funcs: chip pipeline functions */
- struct komeda_pipeline_funcs *funcs; /* private pipeline functions */
+ /** @funcs: chip private pipeline functions */
+ const struct komeda_pipeline_funcs *funcs;
/** @of_node: pipeline dt node */
struct device_node *of_node;
@@ -397,7 +397,7 @@ struct komeda_pipeline_state {
/* pipeline APIs */
struct komeda_pipeline *
komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
- struct komeda_pipeline_funcs *funcs);
+ const struct komeda_pipeline_funcs *funcs);
void komeda_pipeline_destroy(struct komeda_dev *mdev,
struct komeda_pipeline *pipe);
int komeda_assemble_pipelines(struct komeda_dev *mdev);
@@ -411,7 +411,7 @@ void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
size_t comp_sz, u32 id, u32 hw_id,
- struct komeda_component_funcs *funcs,
+ const struct komeda_component_funcs *funcs,
u8 max_active_inputs, u32 supported_inputs,
u8 max_active_outputs, u32 __iomem *reg,
const char *name_fmt, ...);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 07ed0cc1bc44..c97062bdd69b 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -55,7 +55,6 @@ komeda_plane_atomic_check(struct drm_plane *plane,
struct komeda_plane_state *kplane_st = to_kplane_st(state);
struct komeda_layer *layer = kplane->layer;
struct drm_crtc_state *crtc_st;
- struct komeda_crtc *kcrtc;
struct komeda_crtc_state *kcrtc_st;
struct komeda_data_flow_cfg dflow;
int err;
@@ -64,7 +63,7 @@ komeda_plane_atomic_check(struct drm_plane *plane,
return 0;
crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc);
- if (!crtc_st->enable) {
+ if (IS_ERR(crtc_st) || !crtc_st->enable) {
DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
return -EINVAL;
}
@@ -73,7 +72,6 @@ komeda_plane_atomic_check(struct drm_plane *plane,
if (!crtc_st->active)
return 0;
- kcrtc = to_kcrtc(state->crtc);
kcrtc_st = to_kcrtc_st(crtc_st);
err = komeda_plane_init_data_flow(state, &dflow);
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 0b2b62f8fa3c..a3efa28436ea 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -186,20 +186,20 @@ static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
clk_disable_unprepare(hdlcd->clk);
}
-static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
- struct drm_display_mode *mode = &state->adjusted_mode;
long rate, clk_rate = mode->clock * 1000;
rate = clk_round_rate(hdlcd->clk, clk_rate);
- if (rate != clk_rate) {
+ /* 0.1% seems a close enough tolerance for the TDA19988 on Juno */
+ if (abs(rate - clk_rate) * 1000 > clk_rate) {
/* clock required by mode not supported by hardware */
- return -EINVAL;
+ return MODE_NOCLOCK;
}
- return 0;
+ return MODE_OK;
}
static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -220,7 +220,7 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
- .atomic_check = hdlcd_crtc_atomic_check,
+ .mode_valid = hdlcd_crtc_mode_valid,
.atomic_begin = hdlcd_crtc_atomic_begin,
.atomic_enable = hdlcd_crtc_atomic_enable,
.atomic_disable = hdlcd_crtc_atomic_disable,
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 3ecf8ddc5130..af1992f06a1d 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -188,6 +188,7 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
{
struct drm_device *drm = state->dev;
struct malidp_drm *malidp = drm->dev_private;
+ int loop = 5;
malidp->event = malidp->crtc.state->event;
malidp->crtc.state->event = NULL;
@@ -202,8 +203,18 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
drm_crtc_vblank_get(&malidp->crtc);
/* only set config_valid if the CRTC is enabled */
- if (malidp_set_and_wait_config_valid(drm) < 0)
+ if (malidp_set_and_wait_config_valid(drm) < 0) {
+ /*
+ * make a loop around the second CVAL setting and
+ * try 5 times before giving up.
+ */
+ while (loop--) {
+ if (!malidp_set_and_wait_config_valid(drm))
+ break;
+ }
DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+ }
+
} else if (malidp->event) {
/* CRTC inactive means vblank IRQ is disabled, send event directly */
spin_lock_irq(&drm->event_lock);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2e0cb4246cbd..22a5c617f670 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1607,15 +1607,6 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
- /*
- * FIXME: Since prepare_fb and cleanup_fb are always called on
- * the new_plane_state for async updates we need to block framebuffer
- * changes. This prevents use of a fb that's been cleaned up and
- * double cleanups from occuring.
- */
- if (old_plane_state->fb != new_plane_state->fb)
- return -EINVAL;
-
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
@@ -1646,6 +1637,8 @@ EXPORT_SYMBOL(drm_atomic_helper_async_check);
* drm_atomic_async_check() succeeds. Async commits are not supposed to swap
* the states like normal sync commits, but just do in-place changes on the
* current state.
+ *
+ * TODO: Implement full swap instead of doing in-place changes.
*/
void drm_atomic_helper_async_commit(struct drm_device *dev,
struct drm_atomic_state *state)
@@ -1656,6 +1649,9 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
int i;
for_each_new_plane_in_state(state, plane, plane_state, i) {
+ struct drm_framebuffer *new_fb = plane_state->fb;
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
funcs = plane->helper_private;
funcs->atomic_async_update(plane, plane_state);
@@ -1664,11 +1660,17 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
* plane->state in-place, make sure at least common
* properties have been properly updated.
*/
- WARN_ON_ONCE(plane->state->fb != plane_state->fb);
+ WARN_ON_ONCE(plane->state->fb != new_fb);
WARN_ON_ONCE(plane->state->crtc_x != plane_state->crtc_x);
WARN_ON_ONCE(plane->state->crtc_y != plane_state->crtc_y);
WARN_ON_ONCE(plane->state->src_x != plane_state->src_x);
WARN_ON_ONCE(plane->state->src_y != plane_state->src_y);
+
+ /*
+ * Make sure the FBs have been swapped so that cleanups in the
+ * new_state performs a cleanup in the old FB.
+ */
+ WARN_ON_ONCE(plane_state->fb != old_fb);
}
}
EXPORT_SYMBOL(drm_atomic_helper_async_commit);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 5cb59c0b4bbe..de5347725564 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2530,7 +2530,7 @@ static const struct cmd_info cmd_info[] = {
0, 12, NULL},
{"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
- 0, 20, NULL},
+ 0, 12, NULL},
};
static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 244ad1729764..53115bdae12b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -53,13 +53,19 @@ static int preallocated_oos_pages = 8192;
*/
bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
{
- if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
- && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
- gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
- addr, size);
- return false;
- }
- return true;
+ if (size == 0)
+ return vgpu_gmadr_is_valid(vgpu, addr);
+
+ if (vgpu_gmadr_is_aperture(vgpu, addr) &&
+ vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
+ return true;
+ else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
+ vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
+ return true;
+
+ gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
+ addr, size);
+ return false;
}
/* translate a guest gmadr to host gmadr */
@@ -942,7 +948,16 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
&& e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
- cur_pt_type = get_next_pt_type(e->type) + 1;
+ cur_pt_type = get_next_pt_type(e->type);
+
+ if (!gtt_type_is_pt(cur_pt_type) ||
+ !gtt_type_is_pt(cur_pt_type + 1)) {
+ WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
+ return -EINVAL;
+ }
+
+ cur_pt_type += 1;
+
if (ops->get_pfn(e) ==
vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
return 0;
@@ -1102,6 +1117,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
err_free_spt:
ppgtt_free_spt(spt);
+ spt = NULL;
err:
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
spt, we->val64, we->type);
@@ -2183,7 +2199,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
unsigned long gma, gfn;
- struct intel_gvt_gtt_entry e, m;
+ struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
+ struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
dma_addr_t dma_addr;
int ret;
struct intel_gvt_partial_pte *partial_pte, *pos, *n;
@@ -2250,7 +2267,8 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (!partial_update && (ops->test_present(&e))) {
gfn = ops->get_pfn(&e);
- m = e;
+ m.val64 = e.val64;
+ m.type = e.type;
/* one PTE update may be issued in multiple writes and the
* first write may not construct a valid gfn
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index e09bd6e0cc4d..a6ade66349bd 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -464,6 +464,8 @@ static i915_reg_t force_nonpriv_white_list[] = {
_MMIO(0x2690),
_MMIO(0x2694),
_MMIO(0x2698),
+ _MMIO(0x2754),
+ _MMIO(0x28a0),
_MMIO(0x4de0),
_MMIO(0x4de4),
_MMIO(0x4dfc),
@@ -1690,8 +1692,22 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist;
int ret;
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
+ if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
+ if (data & _MASKED_BIT_ENABLE(1)) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
+
+ if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+ data & _MASKED_BIT_ENABLE(2)) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
+
/* when PPGTT mode enabled, we will check if guest has called
* pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc.
@@ -1773,6 +1789,21 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
return 0;
}
+static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ u32 data = *(u32 *)p_data;
+
+ (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
+ write_vreg(vgpu, offset, p_data, bytes);
+
+ if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+
+ return 0;
+}
+
#define MMIO_F(reg, s, f, am, rm, d, r, w) do { \
ret = new_mmio_info(gvt, i915_mmio_reg_offset(reg), \
f, s, am, rm, d, r, w); \
@@ -1893,7 +1924,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0x20dc), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2088), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x20e4), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(FF_SLICE_CS_CHICKEN2, D_ALL,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2470), D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
@@ -2997,7 +3029,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
- MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
+ MMIO_DFH(BDW_SCRATCH1, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_D(SKL_DFSM, D_SKL_PLUS);
MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
@@ -3010,8 +3042,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
- MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
- NULL, NULL);
+ MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
@@ -3030,7 +3062,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x46520), D_SKL_PLUS);
MMIO_D(_MMIO(0xc403c), D_SKL_PLUS);
- MMIO_D(_MMIO(0xb004), D_SKL_PLUS);
+ MMIO_DFH(GEN8_GARBCNTL, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
@@ -3059,7 +3091,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(_PLANE_KEYMSK_1(PIPE_C)), D_SKL_PLUS);
MMIO_D(_MMIO(0x44500), D_SKL_PLUS);
- MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+#define CSFE_CHICKEN1_REG(base) _MMIO((base) + 0xD4)
+ MMIO_RING_DFH(CSFE_CHICKEN1_REG, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, csfe_chicken1_mmio_write);
+#undef CSFE_CHICKEN1_REG
MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
@@ -3239,7 +3274,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_PUSHBUS_ENABLE, D_BXT);
MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT);
MMIO_D(GEN6_GFXPAUSE, D_BXT);
- MMIO_D(GEN8_L3SQCREG1, D_BXT);
+ MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 33aaa14bfdde..5b66e14c5b7b 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -102,6 +102,8 @@
#define FORCEWAKE_ACK_MEDIA_GEN9_REG 0x0D88
#define FORCEWAKE_ACK_HSW_REG 0x130044
+#define RB_HEAD_WRAP_CNT_MAX ((1 << 11) - 1)
+#define RB_HEAD_WRAP_CNT_OFF 21
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 13632dba8b2a..0f919f0a43d4 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -812,10 +812,31 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
void *src;
unsigned long context_gpa, context_page_num;
int i;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
+ u32 ring_base;
+ u32 head, tail;
+ u16 wrap_count;
gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
workload->ctx_desc.lrca);
+ head = workload->rb_head;
+ tail = workload->rb_tail;
+ wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
+
+ if (tail < head) {
+ if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
+ wrap_count = 0;
+ else
+ wrap_count += 1;
+ }
+
+ head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
+
+ ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+ vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
+ vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
+
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
@@ -1415,6 +1436,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
+ u32 guest_head;
int ret;
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
@@ -1430,6 +1452,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ring_tail.val), &tail, 4);
+ guest_head = head;
+
head &= RB_HEAD_OFF_MASK;
tail &= RB_TAIL_OFF_MASK;
@@ -1462,6 +1486,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
+ workload->guest_rb_head = guest_head;
workload->rb_tail = tail;
workload->rb_start = start;
workload->rb_ctl = ctl;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 90c6756f5453..c50d14a9ce85 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -100,6 +100,7 @@ struct intel_vgpu_workload {
struct execlist_ctx_descriptor_format ctx_desc;
struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
+ unsigned long guest_rb_head;
bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 249d35c12a75..2aa69d347ec4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7620,6 +7620,9 @@ enum {
#define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1 << 8)
#define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1 << 0)
+#define GEN8_L3CNTLREG _MMIO(0x7034)
+ #define GEN8_ERRDETBCTRL (1 << 9)
+
#define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304)
#define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 6decd432f4d3..841b8e515f4d 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -518,6 +518,12 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *wal = &engine->ctx_wa_list;
+ /* WaDisableBankHangMode:icl */
+ wa_write(wal,
+ GEN8_L3CNTLREG,
+ intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
+ GEN8_ERRDETBCTRL);
+
/* Wa_1604370585:icl (pre-prod)
* Formerly known as WaPushConstantDereferenceHoldDisable
*/
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index be13140967b4..b854f471e9e5 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -502,6 +502,8 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
plane->state->src_x = new_state->src_x;
plane->state->src_y = new_state->src_y;
plane->state->crtc_x = new_state->crtc_x;
@@ -524,6 +526,8 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
*to_mdp5_plane_state(plane->state) =
*to_mdp5_plane_state(new_state);
+
+ new_state->fb = old_fb;
}
static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
index ff0fa38aee72..54da9c6bc8d5 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
@@ -1,12 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __NVKM_FIRMWARE_H__
#define __NVKM_FIRMWARE_H__
-
-#include <core/device.h>
-
-int nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
- const struct firmware **fw);
-
-void nvkm_firmware_put(const struct firmware *fw);
-
+#include <core/subdev.h>
+
+int nvkm_firmware_get_version(const struct nvkm_subdev *, const char *fwname,
+ int min_version, int max_version,
+ const struct firmware **);
+int nvkm_firmware_get(const struct nvkm_subdev *, const char *fwname,
+ const struct firmware **);
+void nvkm_firmware_put(const struct firmware *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 058ff46b5f16..092acdec2c39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -24,7 +24,7 @@
/**
* nvkm_firmware_get - load firmware from the official nvidia/chip/ directory
- * @device device that will use that firmware
+ * @subdev subdevice that will use that firmware
* @fwname name of firmware file to load
* @fw firmware structure to load to
*
@@ -32,9 +32,11 @@
* Firmware files released by NVIDIA will always follow this format.
*/
int
-nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
- const struct firmware **fw)
+nvkm_firmware_get_version(const struct nvkm_subdev *subdev, const char *fwname,
+ int min_version, int max_version,
+ const struct firmware **fw)
{
+ struct nvkm_device *device = subdev->device;
char f[64];
char cname[16];
int i;
@@ -48,8 +50,29 @@ nvkm_firmware_get(struct nvkm_device *device, const char *fwname,
cname[i] = tolower(cname[i]);
}
- snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
- return request_firmware(fw, f, device->dev);
+ for (i = max_version; i >= min_version; i--) {
+ if (i != 0)
+ snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, i);
+ else
+ snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname);
+
+ if (!firmware_request_nowarn(fw, f, device->dev)) {
+ nvkm_debug(subdev, "firmware \"%s\" loaded\n", f);
+ return i;
+ }
+
+ nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f);
+ }
+
+ nvkm_error(subdev, "failed to load firmware \"%s\"", fwname);
+ return -ENOENT;
+}
+
+int
+nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname,
+ const struct firmware **fw)
+{
+ return nvkm_firmware_get_version(subdev, fwname, 0, 0, fw);
}
/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 81a13cf9a292..c578deb5867a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2115,12 +2115,10 @@ int
gf100_gr_ctor_fw(struct gf100_gr *gr, const char *fwname,
struct gf100_gr_fuc *fuc)
{
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
const struct firmware *fw;
int ret;
- ret = nvkm_firmware_get(device, fwname, &fw);
+ ret = nvkm_firmware_get(&gr->base.engine.subdev, fwname, &fw);
if (ret) {
ret = gf100_gr_ctor_fw_legacy(gr, fwname, fuc, ret);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
index 75dc06557877..dc80985cf093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.c
@@ -36,7 +36,7 @@ nvkm_acr_load_firmware(const struct nvkm_subdev *subdev, const char *name,
void *blob;
int ret;
- ret = nvkm_firmware_get(subdev->device, name, &fw);
+ ret = nvkm_firmware_get(subdev, name, &fw);
if (ret)
return ERR_PTR(ret);
if (fw->size < min_size) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 1df09ed6fe6d..4fd4cfe459b8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -229,6 +229,8 @@ struct acr_r352_lsf_wpr_header {
struct ls_ucode_img_r352 {
struct ls_ucode_img base;
+ const struct acr_r352_lsf_func *func;
+
struct acr_r352_lsf_wpr_header wpr_header;
struct acr_r352_lsf_lsb_header lsb_header;
};
@@ -243,6 +245,7 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
+ const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
struct ls_ucode_img_r352 *img;
int ret;
@@ -252,15 +255,16 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
img->base.falcon_id = falcon_id;
- ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
-
- if (ret) {
+ ret = func->load(sb, func->version_max, &img->base);
+ if (ret < 0) {
kfree(img->base.ucode_data);
kfree(img->base.sig);
kfree(img);
return ERR_PTR(ret);
}
+ img->func = func->version[ret];
+
/* Check that the signature size matches our expectations... */
if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
nvkm_error(subdev, "invalid signature size for %s falcon!\n",
@@ -302,8 +306,7 @@ acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
struct acr_r352_lsf_wpr_header *whdr = &img->wpr_header;
struct acr_r352_lsf_lsb_header *lhdr = &img->lsb_header;
struct ls_ucode_img_desc *desc = &_img->ucode_desc;
- const struct acr_r352_ls_func *func =
- acr->func->ls_func[_img->falcon_id];
+ const struct acr_r352_lsf_func *func = img->func;
/* Fill WPR header */
whdr->falcon_id = _img->falcon_id;
@@ -419,8 +422,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
/* Figure out how large we need gdesc to be. */
list_for_each_entry(_img, imgs, node) {
- const struct acr_r352_ls_func *ls_func =
- acr->func->ls_func[_img->falcon_id];
+ struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
+ const struct acr_r352_lsf_func *ls_func = img->func;
max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
}
@@ -433,8 +436,7 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
list_for_each_entry(_img, imgs, node) {
struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
- const struct acr_r352_ls_func *ls_func =
- acr->func->ls_func[_img->falcon_id];
+ const struct acr_r352_lsf_func *ls_func = img->func;
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -1063,20 +1065,36 @@ acr_r352_dtor(struct nvkm_acr *_acr)
kfree(acr);
}
+static const struct acr_r352_lsf_func
+acr_r352_ls_fecs_func_0 = {
+ .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r352_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
+ .version_max = 0,
+ .version = {
+ &acr_r352_ls_fecs_func_0,
+ }
+};
+
+static const struct acr_r352_lsf_func
+acr_r352_ls_gpccs_func_0 = {
.generate_bl_desc = acr_r352_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
const struct acr_r352_ls_func
acr_r352_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
- .generate_bl_desc = acr_r352_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+ .version_max = 0,
+ .version = {
+ &acr_r352_ls_gpccs_func_0,
+ }
};
@@ -1150,12 +1168,20 @@ acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
desc->argv = addr_args;
}
+static const struct acr_r352_lsf_func
+acr_r352_ls_pmu_func_0 = {
+ .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
+};
+
static const struct acr_r352_ls_func
acr_r352_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
- .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
.post_run = acr_ls_pmu_post_run,
+ .version_max = 0,
+ .version = {
+ &acr_r352_ls_pmu_func_0,
+ }
};
const struct acr_r352_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
index 3d58ab871563..e516cab849dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
@@ -47,24 +47,34 @@ hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
}
/**
- * struct acr_r352_ls_func - manages a single LS firmware
+ * struct acr_r352_lsf_func - manages a specific LS firmware version
*
- * @load: load the external firmware into a ls_ucode_img
* @generate_bl_desc: function called on a block of bl_desc_size to generate the
* proper bootloader descriptor for this LS firmware
* @bl_desc_size: size of the bootloader descriptor
- * @post_run: hook called right after the ACR is executed
* @lhdr_flags: LS flags
*/
-struct acr_r352_ls_func {
- int (*load)(const struct nvkm_secboot *, struct ls_ucode_img *);
+struct acr_r352_lsf_func {
void (*generate_bl_desc)(const struct nvkm_acr *,
const struct ls_ucode_img *, u64, void *);
u32 bl_desc_size;
- int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
u32 lhdr_flags;
};
+/**
+ * struct acr_r352_ls_func - manages a single LS falcon
+ *
+ * @load: load the external firmware into a ls_ucode_img
+ * @post_run: hook called right after the ACR is executed
+ */
+struct acr_r352_ls_func {
+ int (*load)(const struct nvkm_secboot *, int maxver,
+ struct ls_ucode_img *);
+ int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
+ int version_max;
+ const struct acr_r352_lsf_func *version[];
+};
+
struct acr_r352;
/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
index 14b36ef93628..f6b2d20d7fc3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
@@ -66,20 +66,36 @@ acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
bl_desc->data_size = hdr->data_size;
}
+static const struct acr_r352_lsf_func
+acr_r361_ls_fecs_func_0 = {
+ .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r361_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
+ .version_max = 0,
+ .version = {
+ &acr_r361_ls_fecs_func_0,
+ }
+};
+
+static const struct acr_r352_lsf_func
+acr_r361_ls_gpccs_func_0 = {
.generate_bl_desc = acr_r361_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
const struct acr_r352_ls_func
acr_r361_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
- .generate_bl_desc = acr_r361_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+ .version_max = 0,
+ .version = {
+ &acr_r361_ls_gpccs_func_0,
+ }
};
struct acr_r361_pmu_bl_desc {
@@ -125,12 +141,20 @@ acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
desc->argv = addr_args;
}
+static const struct acr_r352_lsf_func
+acr_r361_ls_pmu_func_0 = {
+ .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r361_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
- .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
.post_run = acr_ls_pmu_post_run,
+ .version_max = 0,
+ .version = {
+ &acr_r361_ls_pmu_func_0,
+ }
};
static void
@@ -164,12 +188,20 @@ acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
desc->argv = 0x01000000;
}
-const struct acr_r352_ls_func
-acr_r361_ls_sec2_func = {
- .load = acr_ls_ucode_load_sec2,
+const struct acr_r352_lsf_func
+acr_r361_ls_sec2_func_0 = {
.generate_bl_desc = acr_r361_generate_sec2_bl_desc,
.bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
+};
+
+static const struct acr_r352_ls_func
+acr_r361_ls_sec2_func = {
+ .load = acr_ls_ucode_load_sec2,
.post_run = acr_ls_sec2_post_run,
+ .version_max = 0,
+ .version = {
+ &acr_r361_ls_sec2_func_0,
+ }
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
index f9f978daadb9..38dec93779c8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
@@ -67,6 +67,5 @@ void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
-extern const struct acr_r352_ls_func acr_r361_ls_sec2_func;
-
+extern const struct acr_r352_lsf_func acr_r361_ls_sec2_func_0;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
index 978ad0790367..472ced29da7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -22,6 +22,7 @@
#include "acr_r367.h"
#include "acr_r361.h"
+#include "acr_r370.h"
#include <core/gpuobj.h>
@@ -100,6 +101,8 @@ struct acr_r367_lsf_wpr_header {
struct ls_ucode_img_r367 {
struct ls_ucode_img base;
+ const struct acr_r352_lsf_func *func;
+
struct acr_r367_lsf_wpr_header wpr_header;
struct acr_r367_lsf_lsb_header lsb_header;
};
@@ -111,6 +114,7 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
+ const struct acr_r352_ls_func *func = acr->func->ls_func[falcon_id];
struct ls_ucode_img_r367 *img;
int ret;
@@ -120,14 +124,16 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
img->base.falcon_id = falcon_id;
- ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
- if (ret) {
+ ret = func->load(sb, func->version_max, &img->base);
+ if (ret < 0) {
kfree(img->base.ucode_data);
kfree(img->base.sig);
kfree(img);
return ERR_PTR(ret);
}
+ img->func = func->version[ret];
+
/* Check that the signature size matches our expectations... */
if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
nvkm_error(subdev, "invalid signature size for %s falcon!\n",
@@ -158,8 +164,7 @@ acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
struct ls_ucode_img_desc *desc = &_img->ucode_desc;
- const struct acr_r352_ls_func *func =
- acr->func->ls_func[_img->falcon_id];
+ const struct acr_r352_lsf_func *func = img->func;
/* Fill WPR header */
whdr->falcon_id = _img->falcon_id;
@@ -269,8 +274,8 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
u8 *gdesc;
list_for_each_entry(_img, imgs, node) {
- const struct acr_r352_ls_func *ls_func =
- acr->func->ls_func[_img->falcon_id];
+ struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
+ const struct acr_r352_lsf_func *ls_func = img->func;
max_desc_size = max(max_desc_size, ls_func->bl_desc_size);
}
@@ -283,8 +288,7 @@ acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
list_for_each_entry(_img, imgs, node) {
struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
- const struct acr_r352_ls_func *ls_func =
- acr->func->ls_func[_img->falcon_id];
+ const struct acr_r352_lsf_func *ls_func = img->func;
nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
sizeof(img->wpr_header));
@@ -378,6 +382,17 @@ acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
}
}
+static const struct acr_r352_ls_func
+acr_r367_ls_sec2_func = {
+ .load = acr_ls_ucode_load_sec2,
+ .post_run = acr_ls_sec2_post_run,
+ .version_max = 1,
+ .version = {
+ &acr_r361_ls_sec2_func_0,
+ &acr_r370_ls_sec2_func_0,
+ }
+};
+
const struct acr_r352_func
acr_r367_func = {
.fixup_hs_desc = acr_r367_fixup_hs_desc,
@@ -391,7 +406,7 @@ acr_r367_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
[NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
- [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
+ [NVKM_SECBOOT_FALCON_SEC2] = &acr_r367_ls_sec2_func,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
index 2f890dfae7fc..e821d0fd6217 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.c
@@ -49,20 +49,36 @@ acr_r370_generate_flcn_bl_desc(const struct nvkm_acr *acr,
desc->data_size = pdesc->app_resident_data_size;
}
+static const struct acr_r352_lsf_func
+acr_r370_ls_fecs_func_0 = {
+ .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r370_ls_fecs_func = {
.load = acr_ls_ucode_load_fecs,
+ .version_max = 0,
+ .version = {
+ &acr_r370_ls_fecs_func_0,
+ }
+};
+
+static const struct acr_r352_lsf_func
+acr_r370_ls_gpccs_func_0 = {
.generate_bl_desc = acr_r370_generate_flcn_bl_desc,
.bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
const struct acr_r352_ls_func
acr_r370_ls_gpccs_func = {
.load = acr_ls_ucode_load_gpccs,
- .generate_bl_desc = acr_r370_generate_flcn_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
- /* GPCCS will be loaded using PRI */
- .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+ .version_max = 0,
+ .version = {
+ &acr_r370_ls_gpccs_func_0,
+ }
};
static void
@@ -95,12 +111,20 @@ acr_r370_generate_sec2_bl_desc(const struct nvkm_acr *acr,
desc->argv = 0x01000000;
}
+const struct acr_r352_lsf_func
+acr_r370_ls_sec2_func_0 = {
+ .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r370_ls_sec2_func = {
.load = acr_ls_ucode_load_sec2,
- .generate_bl_desc = acr_r370_generate_sec2_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.post_run = acr_ls_sec2_post_run,
+ .version_max = 0,
+ .version = {
+ &acr_r370_ls_sec2_func_0,
+ }
};
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
index 3426f86a15e4..2efed6f995ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r370.h
@@ -46,4 +46,5 @@ struct acr_r370_flcn_bl_desc {
void acr_r370_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
extern const struct acr_r352_ls_func acr_r370_ls_fecs_func;
extern const struct acr_r352_ls_func acr_r370_ls_gpccs_func;
+extern const struct acr_r352_lsf_func acr_r370_ls_sec2_func_0;
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
index 7bdef93cb7ae..8f0647766038 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
@@ -54,12 +54,20 @@ acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
desc->argv = addr_args;
}
+static const struct acr_r352_lsf_func
+acr_r375_ls_pmu_func_0 = {
+ .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
+};
+
const struct acr_r352_ls_func
acr_r375_ls_pmu_func = {
.load = acr_ls_ucode_load_pmu,
- .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
- .bl_desc_size = sizeof(struct acr_r370_flcn_bl_desc),
.post_run = acr_ls_pmu_post_run,
+ .version_max = 0,
+ .version = {
+ &acr_r375_ls_pmu_func_0,
+ }
};
const struct acr_r352_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
index 9b7c402594e8..d43f906da3a7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
@@ -147,11 +147,15 @@ struct fw_bl_desc {
u32 data_size;
};
-int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, struct ls_ucode_img *);
-int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, struct ls_ucode_img *);
-int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, int,
+ struct ls_ucode_img *);
+int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, int,
+ struct ls_ucode_img *);
+int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, int,
+ struct ls_ucode_img *);
int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, int,
+ struct ls_ucode_img *);
int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index 1b0c793c0192..821d3b2bdb1f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -90,30 +90,30 @@ ls_ucode_img_build(const struct firmware *bl, const struct firmware *code,
* blob. Also generate the corresponding ucode descriptor.
*/
static int
-ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, struct ls_ucode_img *img,
- const char *falcon_name)
+ls_ucode_img_load_gr(const struct nvkm_subdev *subdev, int maxver,
+ struct ls_ucode_img *img, const char *falcon_name)
{
const struct firmware *bl, *code, *data, *sig;
char f[64];
int ret;
snprintf(f, sizeof(f), "gr/%s_bl", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &bl);
+ ret = nvkm_firmware_get(subdev, f, &bl);
if (ret)
goto error;
snprintf(f, sizeof(f), "gr/%s_inst", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &code);
+ ret = nvkm_firmware_get(subdev, f, &code);
if (ret)
goto free_bl;
snprintf(f, sizeof(f), "gr/%s_data", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &data);
+ ret = nvkm_firmware_get(subdev, f, &data);
if (ret)
goto free_inst;
snprintf(f, sizeof(f), "gr/%s_sig", falcon_name);
- ret = nvkm_firmware_get(subdev->device, f, &sig);
+ ret = nvkm_firmware_get(subdev, f, &sig);
if (ret)
goto free_data;
@@ -146,13 +146,15 @@ error:
}
int
-acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, int maxver,
+ struct ls_ucode_img *img)
{
- return ls_ucode_img_load_gr(&sb->subdev, img, "fecs");
+ return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "fecs");
}
int
-acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, int maxver,
+ struct ls_ucode_img *img)
{
- return ls_ucode_img_load_gr(&sb->subdev, img, "gpccs");
+ return ls_ucode_img_load_gr(&sb->subdev, maxver, img, "gpccs");
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index 1e1f1c635cab..77c13b096a67 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -39,32 +39,32 @@
*/
static int
acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
- struct ls_ucode_img *img)
+ int maxver, struct ls_ucode_img *img)
{
const struct firmware *image, *desc, *sig;
char f[64];
- int ret;
+ int ver, ret;
snprintf(f, sizeof(f), "%s/image", name);
- ret = nvkm_firmware_get(subdev->device, f, &image);
- if (ret)
- return ret;
+ ver = nvkm_firmware_get_version(subdev, f, 0, maxver, &image);
+ if (ver < 0)
+ return ver;
img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
nvkm_firmware_put(image);
if (!img->ucode_data)
return -ENOMEM;
snprintf(f, sizeof(f), "%s/desc", name);
- ret = nvkm_firmware_get(subdev->device, f, &desc);
- if (ret)
+ ret = nvkm_firmware_get_version(subdev, f, ver, ver, &desc);
+ if (ret < 0)
return ret;
memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
nvkm_firmware_put(desc);
snprintf(f, sizeof(f), "%s/sig", name);
- ret = nvkm_firmware_get(subdev->device, f, &sig);
- if (ret)
+ ret = nvkm_firmware_get_version(subdev, f, ver, ver, &sig);
+ if (ret < 0)
return ret;
img->sig_size = sig->size;
img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
@@ -72,7 +72,7 @@ acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
if (!img->sig)
return -ENOMEM;
- return 0;
+ return ver;
}
static int
@@ -99,12 +99,13 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
}
int
-acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, int maxver,
+ struct ls_ucode_img *img)
{
struct nvkm_pmu *pmu = sb->subdev.device->pmu;
int ret;
- ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", img);
+ ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", maxver, img);
if (ret)
return ret;
@@ -136,14 +137,15 @@ acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
}
int
-acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
+acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, int maxver,
+ struct ls_ucode_img *img)
{
struct nvkm_sec2 *sec = sb->subdev.device->sec2;
- int ret;
+ int ver, ret;
- ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", img);
- if (ret)
- return ret;
+ ver = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", maxver, img);
+ if (ver < 0)
+ return ver;
/* Allocate the PMU queue corresponding to the FW version */
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
@@ -151,7 +153,7 @@ acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
if (ret)
return ret;
- return 0;
+ return ver;
}
int
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 447e96f9d259..12ed5265a90b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -916,29 +916,17 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct vop *vop = to_vop(plane->state->crtc);
- struct drm_plane_state *plane_state;
-
- plane_state = plane->funcs->atomic_duplicate_state(plane);
- plane_state->crtc_x = new_state->crtc_x;
- plane_state->crtc_y = new_state->crtc_y;
- plane_state->crtc_h = new_state->crtc_h;
- plane_state->crtc_w = new_state->crtc_w;
- plane_state->src_x = new_state->src_x;
- plane_state->src_y = new_state->src_y;
- plane_state->src_h = new_state->src_h;
- plane_state->src_w = new_state->src_w;
-
- if (plane_state->fb != new_state->fb)
- drm_atomic_set_fb_for_plane(plane_state, new_state->fb);
-
- swap(plane_state, plane->state);
-
- if (plane->state->fb && plane->state->fb != new_state->fb) {
- drm_framebuffer_get(plane->state->fb);
- WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
- drm_flip_work_queue(&vop->fb_unref_work, plane->state->fb);
- set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
- }
+ struct drm_framebuffer *old_fb = plane->state->fb;
+
+ plane->state->crtc_x = new_state->crtc_x;
+ plane->state->crtc_y = new_state->crtc_y;
+ plane->state->crtc_h = new_state->crtc_h;
+ plane->state->crtc_w = new_state->crtc_w;
+ plane->state->src_x = new_state->src_x;
+ plane->state->src_y = new_state->src_y;
+ plane->state->src_h = new_state->src_h;
+ plane->state->src_w = new_state->src_w;
+ swap(plane->state->fb, new_state->fb);
if (vop->is_enabled) {
rockchip_drm_psr_inhibit_get_state(new_state->state);
@@ -947,9 +935,22 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
rockchip_drm_psr_inhibit_put_state(new_state->state);
- }
- plane->funcs->atomic_destroy_state(plane, plane_state);
+ /*
+ * A scanout can still be occurring, so we can't drop the
+ * reference to the old framebuffer. To solve this we get a
+ * reference to old_fb and set a worker to release it later.
+ * FIXME: if we perform 500 async_update calls before the
+ * vblank, then we can have 500 different framebuffers waiting
+ * to be released.
+ */
+ if (old_fb && plane->state->fb != old_fb) {
+ drm_framebuffer_get(old_fb);
+ WARN_ON(drm_crtc_vblank_get(plane->state->crtc) != 0);
+ drm_flip_work_queue(&vop->fb_unref_work, old_fb);
+ set_bit(VOP_PENDING_FB_UNREF, &vop->pending);
+ }
+ }
}
static const struct drm_plane_helper_funcs plane_helper_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4d918d3e4858..afc80b245ea3 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1025,7 +1025,7 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
{
struct vc4_plane_state *vc4_state, *new_vc4_state;
- drm_atomic_set_fb_for_plane(plane->state, state->fb);
+ swap(plane->state->fb, state->fb);
plane->state->crtc_x = state->crtc_x;
plane->state->crtc_y = state->crtc_y;
plane->state->crtc_w = state->crtc_w;
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 35d58736a3ed..05e120e01cb4 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -633,7 +633,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
if (err)
goto free_hwmon;
- if (dev && chip && chip->ops->read &&
+ if (dev && dev->of_node && chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
const struct hwmon_channel_info **info = chip->info;
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index ef7ee90ee785..8470097907bc 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1217,7 +1217,8 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
const struct pmbus_driver_info *info,
const char *name,
int index, int page,
- const struct pmbus_sensor_attr *attr)
+ const struct pmbus_sensor_attr *attr,
+ bool paged)
{
struct pmbus_sensor *base;
bool upper = !!(attr->gbit & 0xff00); /* need to check STATUS_WORD */
@@ -1225,7 +1226,7 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
if (attr->label) {
ret = pmbus_add_label(data, name, index, attr->label,
- attr->paged ? page + 1 : 0);
+ paged ? page + 1 : 0);
if (ret)
return ret;
}
@@ -1258,6 +1259,30 @@ static int pmbus_add_sensor_attrs_one(struct i2c_client *client,
return 0;
}
+static bool pmbus_sensor_is_paged(const struct pmbus_driver_info *info,
+ const struct pmbus_sensor_attr *attr)
+{
+ int p;
+
+ if (attr->paged)
+ return true;
+
+ /*
+ * Some attributes may be present on more than one page despite
+ * not being marked with the paged attribute. If that is the case,
+ * then treat the sensor as being paged and add the page suffix to the
+ * attribute name.
+ * We don't just add the paged attribute to all such attributes, in
+ * order to maintain the un-suffixed labels in the case where the
+ * attribute is only on page 0.
+ */
+ for (p = 1; p < info->pages; p++) {
+ if (info->func[p] & attr->func)
+ return true;
+ }
+ return false;
+}
+
static int pmbus_add_sensor_attrs(struct i2c_client *client,
struct pmbus_data *data,
const char *name,
@@ -1271,14 +1296,15 @@ static int pmbus_add_sensor_attrs(struct i2c_client *client,
index = 1;
for (i = 0; i < nattrs; i++) {
int page, pages;
+ bool paged = pmbus_sensor_is_paged(info, attrs);
- pages = attrs->paged ? info->pages : 1;
+ pages = paged ? info->pages : 1;
for (page = 0; page < pages; page++) {
if (!(info->func[page] & attrs->func))
continue;
ret = pmbus_add_sensor_attrs_one(client, data, info,
name, index, page,
- attrs);
+ attrs, paged);
if (ret)
return ret;
index++;
@@ -1942,11 +1968,14 @@ static ssize_t pmbus_set_samples(struct device *dev,
long val;
struct i2c_client *client = to_i2c_client(dev->parent);
struct pmbus_samples_reg *reg = to_samples_reg(devattr);
+ struct pmbus_data *data = i2c_get_clientdata(client);
if (kstrtol(buf, 0, &val) < 0)
return -EINVAL;
+ mutex_lock(&data->update_lock);
ret = _pmbus_write_word_data(client, reg->page, reg->attr->reg, val);
+ mutex_unlock(&data->update_lock);
return ret ? : count;
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 0fea7c54f788..37b3b9307d07 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -709,11 +709,16 @@ static const struct i2c_algorithm xiic_algorithm = {
.functionality = xiic_func,
};
+static const struct i2c_adapter_quirks xiic_quirks = {
+ .max_read_len = 255,
+};
+
static const struct i2c_adapter xiic_adapter = {
.owner = THIS_MODULE,
.name = DRIVER_NAME,
.class = I2C_CLASS_DEPRECATED,
.algo = &xiic_algorithm,
+ .quirks = &xiic_quirks,
};
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 78dc07c6ac4b..29f7b15c81d9 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -409,27 +409,44 @@ static int rename_compat_devs(struct ib_device *device)
int ib_device_rename(struct ib_device *ibdev, const char *name)
{
+ unsigned long index;
+ void *client_data;
int ret;
down_write(&devices_rwsem);
if (!strcmp(name, dev_name(&ibdev->dev))) {
- ret = 0;
- goto out;
+ up_write(&devices_rwsem);
+ return 0;
}
if (__ib_device_get_by_name(name)) {
- ret = -EEXIST;
- goto out;
+ up_write(&devices_rwsem);
+ return -EEXIST;
}
ret = device_rename(&ibdev->dev, name);
- if (ret)
- goto out;
+ if (ret) {
+ up_write(&devices_rwsem);
+ return ret;
+ }
+
strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
ret = rename_compat_devs(ibdev);
-out:
- up_write(&devices_rwsem);
- return ret;
+
+ downgrade_write(&devices_rwsem);
+ down_read(&ibdev->client_data_rwsem);
+ xan_for_each_marked(&ibdev->client_data, index, client_data,
+ CLIENT_DATA_REGISTERED) {
+ struct ib_client *client = xa_load(&clients, index);
+
+ if (!client || !client->rename)
+ continue;
+
+ client->rename(ibdev, client_data);
+ }
+ up_read(&ibdev->client_data_rwsem);
+ up_read(&devices_rwsem);
+ return 0;
}
static int alloc_name(struct ib_device *ibdev, const char *name)
@@ -474,14 +491,15 @@ static void ib_device_release(struct device *device)
free_netdevs(dev);
WARN_ON(refcount_read(&dev->refcount));
- ib_cache_release_one(dev);
- ib_security_release_port_pkey_list(dev);
- xa_destroy(&dev->compat_devs);
- xa_destroy(&dev->client_data);
- if (dev->port_data)
+ if (dev->port_data) {
+ ib_cache_release_one(dev);
+ ib_security_release_port_pkey_list(dev);
kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
pdata[0]),
rcu_head);
+ }
+ xa_destroy(&dev->compat_devs);
+ xa_destroy(&dev->client_data);
kfree_rcu(dev, rcu_head);
}
@@ -1935,6 +1953,9 @@ static void free_netdevs(struct ib_device *ib_dev)
unsigned long flags;
unsigned int port;
+ if (!ib_dev->port_data)
+ return;
+
rdma_for_each_port (ib_dev, port) {
struct ib_port_data *pdata = &ib_dev->port_data[port];
struct net_device *ndev;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index 5445323629b5..e63fbda25e1d 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -110,6 +110,8 @@ int uverbs_output_written(const struct uverbs_attr_bundle *bundle, size_t idx);
void setup_ufile_idr_uobject(struct ib_uverbs_file *ufile);
void release_ufile_idr_uobject(struct ib_uverbs_file *ufile);
+struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs);
+
/*
* This is the runtime description of the uverbs API, used by the syscall
* machinery to validate and dispatch calls.
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 5a3a1780ceea..63fe14c7c68f 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -174,6 +174,17 @@ static int uverbs_request_finish(struct uverbs_req_iter *iter)
return 0;
}
+/*
+ * When calling a destroy function during an error unwind we need to pass in
+ * the udata that is sanitized of all user arguments. Ie from the driver
+ * perspective it looks like no udata was passed.
+ */
+struct ib_udata *uverbs_get_cleared_udata(struct uverbs_attr_bundle *attrs)
+{
+ attrs->driver_udata = (struct ib_udata){};
+ return &attrs->driver_udata;
+}
+
static struct ib_uverbs_completion_event_file *
_ib_uverbs_lookup_comp_file(s32 fd, struct uverbs_attr_bundle *attrs)
{
@@ -441,7 +452,7 @@ static int ib_uverbs_alloc_pd(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(uobj, attrs);
err_copy:
- ib_dealloc_pd_user(pd, &attrs->driver_udata);
+ ib_dealloc_pd_user(pd, uverbs_get_cleared_udata(attrs));
pd = NULL;
err_alloc:
kfree(pd);
@@ -644,7 +655,7 @@ err_copy:
}
err_dealloc_xrcd:
- ib_dealloc_xrcd(xrcd, &attrs->driver_udata);
+ ib_dealloc_xrcd(xrcd, uverbs_get_cleared_udata(attrs));
err:
uobj_alloc_abort(&obj->uobject, attrs);
@@ -767,7 +778,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(uobj, attrs);
err_copy:
- ib_dereg_mr_user(mr, &attrs->driver_udata);
+ ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
err_put:
uobj_put_obj_read(pd);
@@ -1042,7 +1053,7 @@ static struct ib_ucq_object *create_cq(struct uverbs_attr_bundle *attrs,
return obj;
err_cb:
- ib_destroy_cq(cq);
+ ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
err_file:
if (ev_file)
@@ -1478,7 +1489,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_cb:
- ib_destroy_qp(qp);
+ ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
err_put:
if (!IS_ERR(xrcd_uobj))
@@ -1611,7 +1622,7 @@ static int ib_uverbs_open_qp(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_destroy:
- ib_destroy_qp(qp);
+ ib_destroy_qp_user(qp, uverbs_get_cleared_udata(attrs));
err_xrcd:
uobj_put_read(xrcd_uobj);
err_put:
@@ -2453,7 +2464,8 @@ static int ib_uverbs_create_ah(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(uobj, attrs);
err_copy:
- rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
+ rdma_destroy_ah_user(ah, RDMA_DESTROY_AH_SLEEPABLE,
+ uverbs_get_cleared_udata(attrs));
err_put:
uobj_put_obj_read(pd);
@@ -2964,7 +2976,7 @@ static int ib_uverbs_ex_create_wq(struct uverbs_attr_bundle *attrs)
return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_copy:
- ib_destroy_wq(wq, &attrs->driver_udata);
+ ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs));
err_put_cq:
uobj_put_obj_read(cq);
err_put_pd:
@@ -3464,7 +3476,7 @@ static int __uverbs_create_xsrq(struct uverbs_attr_bundle *attrs,
return uobj_alloc_commit(&obj->uevent.uobject, attrs);
err_copy:
- ib_destroy_srq_user(srq, &attrs->driver_udata);
+ ib_destroy_srq_user(srq, uverbs_get_cleared_udata(attrs));
err_free:
kfree(srq);
diff --git a/drivers/infiniband/core/uverbs_std_types_cq.c b/drivers/infiniband/core/uverbs_std_types_cq.c
index db5c46a1bb2d..07ea4e3c4566 100644
--- a/drivers/infiniband/core/uverbs_std_types_cq.c
+++ b/drivers/infiniband/core/uverbs_std_types_cq.c
@@ -135,7 +135,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_CQ_CREATE)(
return 0;
err_cq:
- ib_destroy_cq(cq);
+ ib_destroy_cq_user(cq, uverbs_get_cleared_udata(attrs));
err_event_file:
if (ev_file)
diff --git a/drivers/infiniband/core/uverbs_std_types_mr.c b/drivers/infiniband/core/uverbs_std_types_mr.c
index 610d3b9f7654..997f7a3a558a 100644
--- a/drivers/infiniband/core/uverbs_std_types_mr.c
+++ b/drivers/infiniband/core/uverbs_std_types_mr.c
@@ -148,7 +148,7 @@ static int UVERBS_HANDLER(UVERBS_METHOD_DM_MR_REG)(
return 0;
err_dereg:
- ib_dereg_mr_user(mr, &attrs->driver_udata);
+ ib_dereg_mr_user(mr, uverbs_get_cleared_udata(attrs));
return ret;
}
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 6d6886c9009f..0fea5d63fdbe 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -1728,7 +1728,6 @@ int efa_mmap(struct ib_ucontext *ibucontext,
ibdev_dbg(&dev->ibdev, "Mapping executable pages is not permitted\n");
return -EPERM;
}
- vma->vm_flags &= ~VM_MAYEXEC;
return __efa_mmap(dev, ucontext, vma, key, length);
}
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 310105d4e3de..4221a99ee7f4 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -9850,6 +9850,7 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
/* disable the port */
clear_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
+ cancel_work_sync(&ppd->freeze_work);
}
static inline int init_cpu_counters(struct hfi1_devdata *dd)
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 0cd71ce7cc71..3592a9ec155e 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -324,6 +324,9 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
u32 *tidlist = NULL;
struct tid_user_buf *tidbuf;
+ if (!PAGE_ALIGNED(tinfo->vaddr))
+ return -EINVAL;
+
tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
if (!tidbuf)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 1eb4105b2d22..a2b26a635baf 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1356,8 +1356,6 @@ static void hfi1_fill_device_attr(struct hfi1_devdata *dd)
rdi->dparms.props.max_cq = hfi1_max_cqs;
rdi->dparms.props.max_ah = hfi1_max_ahs;
rdi->dparms.props.max_cqe = hfi1_max_cqes;
- rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_pd = hfi1_max_pds;
rdi->dparms.props.max_qp_rd_atom = HFI1_MAX_RDMA_ATOMIC;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 4c5d0f160c10..e068a02122f5 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -899,6 +899,7 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
dev_err(dev, "Destroy cq for mr_free failed(%d)!\n", ret);
hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
+ kfree(&free_mr->mr_free_pd->ibpd);
}
static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index e3ec79b8f7f5..6c8645033102 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -190,12 +190,12 @@ int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
u16 uid, phys_addr_t *addr, u32 *obj_id)
{
struct mlx5_core_dev *dev = dm->dev;
- u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
u32 in[MLX5_ST_SZ_DW(create_sw_icm_in)] = {};
unsigned long *block_map;
u64 icm_start_addr;
u32 log_icm_size;
+ u32 num_blocks;
u32 max_blocks;
u64 block_idx;
void *sw_icm;
@@ -224,6 +224,8 @@ int mlx5_cmd_alloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
return -EINVAL;
}
+ num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
max_blocks = BIT(log_icm_size - MLX5_LOG_SW_ICM_BLOCK_SIZE(dev));
spin_lock(&dm->lock);
block_idx = bitmap_find_next_zero_area(block_map,
@@ -266,13 +268,16 @@ int mlx5_cmd_dealloc_sw_icm(struct mlx5_dm *dm, int type, u64 length,
u16 uid, phys_addr_t addr, u32 obj_id)
{
struct mlx5_core_dev *dev = dm->dev;
- u32 num_blocks = DIV_ROUND_UP(length, MLX5_SW_ICM_BLOCK_SIZE(dev));
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
unsigned long *block_map;
+ u32 num_blocks;
u64 start_idx;
int err;
+ num_blocks = (length + MLX5_SW_ICM_BLOCK_SIZE(dev) - 1) >>
+ MLX5_LOG_SW_ICM_BLOCK_SIZE(dev);
+
switch (type) {
case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM:
start_idx =
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index abac70ad5c7c..340290b883fe 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2344,7 +2344,7 @@ static int handle_alloc_dm_sw_icm(struct ib_ucontext *ctx,
/* Allocation size must a multiple of the basic block size
* and a power of 2.
*/
- act_size = roundup(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
+ act_size = round_up(attr->length, MLX5_SW_ICM_BLOCK_SIZE(dm_db->dev));
act_size = roundup_pow_of_two(act_size);
dm->size = act_size;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 5ff32d32c61c..2c4e569ce438 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -1459,8 +1459,6 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_cq = ib_qib_max_cqs;
rdi->dparms.props.max_cqe = ib_qib_max_cqes;
rdi->dparms.props.max_ah = ib_qib_max_ahs;
- rdi->dparms.props.max_mr = rdi->lkey_table.max;
- rdi->dparms.props.max_fmr = rdi->lkey_table.max;
rdi->dparms.props.max_map_per_fmr = 32767;
rdi->dparms.props.max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC;
rdi->dparms.props.max_qp_init_rd_atom = 255;
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 54f3f9c27552..f48240f66b8f 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -96,6 +96,8 @@ int rvt_driver_mr_init(struct rvt_dev_info *rdi)
for (i = 0; i < rdi->lkey_table.max; i++)
RCU_INIT_POINTER(rdi->lkey_table.table[i], NULL);
+ rdi->dparms.props.max_mr = rdi->lkey_table.max;
+ rdi->dparms.props.max_fmr = rdi->lkey_table.max;
return 0;
}
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 31a2e65e4906..c5a50614a6c6 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -594,7 +594,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
offset = qpt->incr | ((offset & 1) ^ 1);
}
/* there can be no set bits in low-order QoS bits */
- WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
+ WARN_ON(rdi->dparms.qos_shift > 1 &&
+ offset & ((BIT(rdi->dparms.qos_shift - 1) - 1) << 1));
qpn = mk_qpn(qpt, map, offset);
}
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index be9ddcad8f28..4305da2c9037 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -148,6 +148,7 @@ MODULE_PARM_DESC(ch_count,
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device, void *client_data);
+static void srp_rename_dev(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname);
@@ -162,7 +163,8 @@ static struct workqueue_struct *srp_remove_wq;
static struct ib_client srp_client = {
.name = "srp",
.add = srp_add_one,
- .remove = srp_remove_one
+ .remove = srp_remove_one,
+ .rename = srp_rename_dev
};
static struct ib_sa_client srp_sa_client;
@@ -4112,6 +4114,20 @@ free_host:
return NULL;
}
+static void srp_rename_dev(struct ib_device *device, void *client_data)
+{
+ struct srp_device *srp_dev = client_data;
+ struct srp_host *host, *tmp_host;
+
+ list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
+ char name[IB_DEVICE_NAME_MAX + 8];
+
+ snprintf(name, sizeof(name), "srp-%s-%d",
+ dev_name(&device->dev), host->port);
+ device_rename(&host->dev, name);
+ }
+}
+
static void srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index aba50ec98b4d..9545e87b6085 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -694,13 +694,13 @@ static void h_mspro_block_setup_cmd(struct memstick_dev *card, u64 offset,
/*** Data transfer ***/
-static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
+static int mspro_block_issue_req(struct memstick_dev *card)
{
struct mspro_block_data *msb = memstick_get_drvdata(card);
u64 t_off;
unsigned int count;
- while (chunk) {
+ while (true) {
msb->current_page = 0;
msb->current_seg = 0;
msb->seg_count = blk_rq_map_sg(msb->block_req->q,
@@ -709,6 +709,7 @@ static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
if (!msb->seg_count) {
unsigned int bytes = blk_rq_cur_bytes(msb->block_req);
+ bool chunk;
chunk = blk_update_request(msb->block_req,
BLK_STS_RESOURCE,
@@ -718,7 +719,7 @@ static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
__blk_mq_end_request(msb->block_req,
BLK_STS_RESOURCE);
msb->block_req = NULL;
- break;
+ return -EAGAIN;
}
t_off = blk_rq_pos(msb->block_req);
@@ -735,8 +736,6 @@ static int mspro_block_issue_req(struct memstick_dev *card, bool chunk)
memstick_new_req(card->host);
return 0;
}
-
- return 1;
}
static int mspro_block_complete_req(struct memstick_dev *card, int error)
@@ -779,7 +778,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
chunk = blk_update_request(msb->block_req,
errno_to_blk_status(error), t_len);
if (chunk) {
- error = mspro_block_issue_req(card, chunk);
+ error = mspro_block_issue_req(card);
if (!error)
goto out;
} else {
@@ -849,7 +848,7 @@ static blk_status_t mspro_queue_rq(struct blk_mq_hw_ctx *hctx,
msb->block_req = bd->rq;
blk_mq_start_request(bd->rq);
- if (mspro_block_issue_req(card, true))
+ if (mspro_block_issue_req(card))
msb->block_req = NULL;
spin_unlock_irq(&msb->q_lock);
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 7618b65aab34..3bc51f19c734 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -772,6 +772,8 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
if ((m->addr == 0x0) || (m->size == 0))
return -EINVAL;
+ if (m->size > ULONG_MAX - PAGE_SIZE - (m->addr & ~PAGE_MASK))
+ return -EINVAL;
map_addr = (m->addr & PAGE_MASK);
map_size = round_up(m->size + (m->addr & ~PAGE_MASK), PAGE_SIZE);
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 0ddc28961524..2e1c4d2905e8 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -578,6 +578,10 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
/* determine space needed for page_list. */
data = (unsigned long)uaddr;
offs = offset_in_page(data);
+ if (size > ULONG_MAX - PAGE_SIZE - offs) {
+ m->size = 0; /* mark unused and not added */
+ return -EINVAL;
+ }
m->nr_pages = DIV_ROUND_UP(offs + size, PAGE_SIZE);
m->page_list = kcalloc(m->nr_pages,
diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c
index 4804cdcf4c48..f4c92f110a72 100644
--- a/drivers/misc/habanalabs/context.c
+++ b/drivers/misc/habanalabs/context.c
@@ -26,6 +26,12 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
dma_fence_put(ctx->cs_pending[i]);
if (ctx->asid != HL_KERNEL_ASID_ID) {
+ /*
+ * The engines are stopped as there is no executing CS, but the
+ * Coresight might be still working by accessing addresses
+ * related to the stopped engines. Hence stop it explicitly.
+ */
+ hdev->asic_funcs->halt_coresight(hdev);
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
}
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a4447699ff4e..ba418aaa404c 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -459,41 +459,31 @@ static ssize_t mmu_write(struct file *file, const char __user *buf,
struct hl_debugfs_entry *entry = s->private;
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
- char kbuf[MMU_KBUF_SIZE], asid_kbuf[MMU_ASID_BUF_SIZE],
- addr_kbuf[MMU_ADDR_BUF_SIZE];
+ char kbuf[MMU_KBUF_SIZE];
char *c;
ssize_t rc;
if (!hdev->mmu_enable)
return count;
- memset(kbuf, 0, sizeof(kbuf));
- memset(asid_kbuf, 0, sizeof(asid_kbuf));
- memset(addr_kbuf, 0, sizeof(addr_kbuf));
-
+ if (count > sizeof(kbuf) - 1)
+ goto err;
if (copy_from_user(kbuf, buf, count))
goto err;
-
- kbuf[MMU_KBUF_SIZE - 1] = 0;
+ kbuf[count] = 0;
c = strchr(kbuf, ' ');
if (!c)
goto err;
+ *c = '\0';
- memcpy(asid_kbuf, kbuf, c - kbuf);
-
- rc = kstrtouint(asid_kbuf, 10, &dev_entry->mmu_asid);
+ rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
if (rc)
goto err;
- c = strstr(kbuf, " 0x");
- if (!c)
+ if (strncmp(c+1, "0x", 2))
goto err;
-
- c += 3;
- memcpy(addr_kbuf, c, (kbuf + count) - c);
-
- rc = kstrtoull(addr_kbuf, 16, &dev_entry->mmu_addr);
+ rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
if (rc)
goto err;
@@ -510,6 +500,7 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
{
struct hl_ctx *ctx = hdev->user_ctx;
u64 hop_addr, hop_pte_addr, hop_pte;
+ u64 offset_mask = HOP4_MASK | OFFSET_MASK;
int rc = 0;
if (!ctx) {
@@ -552,12 +543,14 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr,
goto not_mapped;
hop_pte_addr = get_hop4_pte_addr(ctx, hop_addr, virt_addr);
hop_pte = hdev->asic_funcs->read_pte(hdev, hop_pte_addr);
+
+ offset_mask = OFFSET_MASK;
}
if (!(hop_pte & PAGE_PRESENT_MASK))
goto not_mapped;
- *phys_addr = (hop_pte & PTE_PHYS_ADDR_MASK) | (virt_addr & OFFSET_MASK);
+ *phys_addr = (hop_pte & ~offset_mask) | (virt_addr & offset_mask);
goto out;
@@ -600,10 +593,8 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
}
sprintf(tmp_buf, "0x%08x\n", val);
- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
- strlen(tmp_buf) + 1);
-
- return rc;
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
}
static ssize_t hl_data_write32(struct file *f, const char __user *buf,
@@ -645,7 +636,6 @@ static ssize_t hl_get_power_state(struct file *f, char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char tmp_buf[200];
- ssize_t rc;
int i;
if (*ppos)
@@ -660,10 +650,8 @@ static ssize_t hl_get_power_state(struct file *f, char __user *buf,
sprintf(tmp_buf,
"current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
- strlen(tmp_buf) + 1);
-
- return rc;
+ return simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
}
static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
@@ -716,8 +704,8 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
}
sprintf(tmp_buf, "0x%02x\n", val);
- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
- strlen(tmp_buf) + 1);
+ rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
+ strlen(tmp_buf));
return rc;
}
@@ -806,18 +794,9 @@ static ssize_t hl_led2_write(struct file *f, const char __user *buf,
static ssize_t hl_device_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
- char tmp_buf[200];
- ssize_t rc;
-
- if (*ppos)
- return 0;
-
- sprintf(tmp_buf,
- "Valid values: disable, enable, suspend, resume, cpu_timeout\n");
- rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
- strlen(tmp_buf) + 1);
-
- return rc;
+ static const char *help =
+ "Valid values: disable, enable, suspend, resume, cpu_timeout\n";
+ return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
}
static ssize_t hl_device_write(struct file *f, const char __user *buf,
@@ -825,7 +804,7 @@ static ssize_t hl_device_write(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- char data[30];
+ char data[30] = {0};
/* don't allow partial writes */
if (*ppos != 0)
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index 91a9e47a3482..0b19d3eefb98 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -231,6 +231,7 @@ static int device_early_init(struct hl_device *hdev)
mutex_init(&hdev->fd_open_cnt_lock);
mutex_init(&hdev->send_cpu_message_lock);
+ mutex_init(&hdev->mmu_cache_lock);
INIT_LIST_HEAD(&hdev->hw_queues_mirror_list);
spin_lock_init(&hdev->hw_queues_mirror_lock);
atomic_set(&hdev->in_reset, 0);
@@ -260,6 +261,7 @@ early_fini:
*/
static void device_early_fini(struct hl_device *hdev)
{
+ mutex_destroy(&hdev->mmu_cache_lock);
mutex_destroy(&hdev->send_cpu_message_lock);
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index a582e29c1ee4..02d116b01a1a 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -4819,7 +4819,8 @@ static const struct hl_asic_funcs goya_funcs = {
.set_dram_bar_base = goya_set_ddr_bar_base,
.init_iatu = goya_init_iatu,
.rreg = hl_rreg,
- .wreg = hl_wreg
+ .wreg = hl_wreg,
+ .halt_coresight = goya_halt_coresight
};
/*
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 14e216cb3668..c83cab0d641e 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -202,6 +202,7 @@ void goya_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
int goya_armcp_info_get(struct hl_device *hdev);
int goya_debug_coresight(struct hl_device *hdev, void *data);
+void goya_halt_coresight(struct hl_device *hdev);
void goya_mmu_prepare(struct hl_device *hdev, u32 asid);
int goya_mmu_clear_pgt_range(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index 1ac951f52d1e..d7ec7ad84cc6 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -425,8 +425,18 @@ static int goya_config_etr(struct hl_device *hdev,
WREG32(base_reg + 0x28, 0);
WREG32(base_reg + 0x304, 0);
- if (params->output_size >= sizeof(u32))
- *(u32 *) params->output = RREG32(base_reg + 0x18);
+ if (params->output_size >= sizeof(u64)) {
+ u32 rwp, rwphi;
+
+ /*
+ * The trace buffer address is 40 bits wide. The end of
+ * the buffer is set in the RWP register (lower 32
+ * bits), and in the RWPHI register (upper 8 bits).
+ */
+ rwp = RREG32(base_reg + 0x18);
+ rwphi = RREG32(base_reg + 0x3c) & 0xff;
+ *(u64 *) params->output = ((u64) rwphi << 32) | rwp;
+ }
}
return 0;
@@ -626,3 +636,20 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
return rc;
}
+
+void goya_halt_coresight(struct hl_device *hdev)
+{
+ struct hl_debug_params params = {};
+ int i, rc;
+
+ for (i = GOYA_ETF_FIRST ; i <= GOYA_ETF_LAST ; i++) {
+ params.reg_idx = i;
+ rc = goya_config_etf(hdev, &params);
+ if (rc)
+ dev_err(hdev->dev, "halt ETF failed, %d/%d\n", rc, i);
+ }
+
+ rc = goya_config_etr(hdev, &params);
+ if (rc)
+ dev_err(hdev->dev, "halt ETR failed, %d\n", rc);
+}
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index 71243b319920..adef7d9d7488 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -501,6 +501,7 @@ enum hl_pll_frequency {
* @init_iatu: Initialize the iATU unit inside the PCI controller.
* @rreg: Read a register. Needed for simulator support.
* @wreg: Write a register. Needed for simulator support.
+ * @halt_coresight: stop the ETF and ETR traces.
*/
struct hl_asic_funcs {
int (*early_init)(struct hl_device *hdev);
@@ -578,6 +579,7 @@ struct hl_asic_funcs {
int (*init_iatu)(struct hl_device *hdev);
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
+ void (*halt_coresight)(struct hl_device *hdev);
};
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index d67d24c13efd..693877e37fd8 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -675,11 +675,6 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
total_npages += npages;
- if (first) {
- first = false;
- dma_addr &= PAGE_MASK_2MB;
- }
-
if ((npages % PGS_IN_2MB_PAGE) ||
(dma_addr & (PAGE_SIZE_2MB - 1)))
is_huge_page_opt = false;
@@ -704,7 +699,6 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
phys_pg_pack->total_size = total_npages * page_size;
j = 0;
- first = true;
for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) {
npages = get_sg_info(sg, &dma_addr);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 533d9315b6fb..10aee3141444 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -404,15 +404,12 @@ int hl_mmu_init(struct hl_device *hdev)
/* MMU H/W init was already done in device hw_init() */
- mutex_init(&hdev->mmu_cache_lock);
-
hdev->mmu_pgt_pool =
gen_pool_create(__ffs(prop->mmu_hop_table_size), -1);
if (!hdev->mmu_pgt_pool) {
dev_err(hdev->dev, "Failed to create page gen pool\n");
- rc = -ENOMEM;
- goto err_pool_create;
+ return -ENOMEM;
}
rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr +
@@ -436,8 +433,6 @@ int hl_mmu_init(struct hl_device *hdev)
err_pool_add:
gen_pool_destroy(hdev->mmu_pgt_pool);
-err_pool_create:
- mutex_destroy(&hdev->mmu_cache_lock);
return rc;
}
@@ -459,7 +454,6 @@ void hl_mmu_fini(struct hl_device *hdev)
kvfree(hdev->mmu_shadow_hop0);
gen_pool_destroy(hdev->mmu_pgt_pool);
- mutex_destroy(&hdev->mmu_cache_lock);
/* MMU H/W fini will be done in device hw_fini() */
}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 7eebbdfbcacd..17f839dee976 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -32,12 +32,20 @@ static int recur_count = REC_NUM_DEFAULT;
static DEFINE_SPINLOCK(lock_me_up);
-static int recursive_loop(int remaining)
+/*
+ * Make sure compiler does not optimize this function or stack frame away:
+ * - function marked noinline
+ * - stack variables are marked volatile
+ * - stack variables are written (memset()) and read (pr_info())
+ * - function has external effects (pr_info())
+ * */
+static int noinline recursive_loop(int remaining)
{
- char buf[REC_STACK_SIZE];
+ volatile char buf[REC_STACK_SIZE];
- /* Make sure compiler does not optimize this away. */
- memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
+ memset((void *)buf, remaining & 0xFF, sizeof(buf));
+ pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
+ recur_count);
if (!remaining)
return 0;
else
@@ -81,9 +89,12 @@ void lkdtm_LOOP(void)
;
}
-void lkdtm_OVERFLOW(void)
+void lkdtm_EXHAUST_STACK(void)
{
- (void) recursive_loop(recur_count);
+ pr_info("Calling function with %d frame size to depth %d ...\n",
+ REC_STACK_SIZE, recur_count);
+ recursive_loop(recur_count);
+ pr_info("FAIL: survived without exhausting stack?!\n");
}
static noinline void __lkdtm_CORRUPT_STACK(void *stack)
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 1972dad966f5..8a1428d4f138 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -106,12 +106,12 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(WARNING),
CRASHTYPE(EXCEPTION),
CRASHTYPE(LOOP),
- CRASHTYPE(OVERFLOW),
+ CRASHTYPE(EXHAUST_STACK),
+ CRASHTYPE(CORRUPT_STACK),
+ CRASHTYPE(CORRUPT_STACK_STRONG),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(CORRUPT_USER_DS),
- CRASHTYPE(CORRUPT_STACK),
- CRASHTYPE(CORRUPT_STACK_STRONG),
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
CRASHTYPE(STACK_GUARD_PAGE_TRAILING),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index b69ee004a3f7..23dc565b4307 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -13,7 +13,7 @@ void lkdtm_BUG(void);
void lkdtm_WARNING(void);
void lkdtm_EXCEPTION(void);
void lkdtm_LOOP(void);
-void lkdtm_OVERFLOW(void);
+void lkdtm_EXHAUST_STACK(void);
void lkdtm_CORRUPT_STACK(void);
void lkdtm_CORRUPT_STACK_STRONG(void);
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index d5a0e7f1813b..e172719dd86d 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -324,14 +324,16 @@ free_user:
void lkdtm_USERCOPY_KERNEL_DS(void)
{
- char __user *user_ptr = (char __user *)ERR_PTR(-EINVAL);
+ char __user *user_ptr =
+ (char __user *)(0xFUL << (sizeof(unsigned long) * 8 - 4));
mm_segment_t old_fs = get_fs();
char buf[10] = {0};
- pr_info("attempting copy_to_user on unmapped kernel address\n");
+ pr_info("attempting copy_to_user() to noncanonical address: %px\n",
+ user_ptr);
set_fs(KERNEL_DS);
- if (copy_to_user(user_ptr, buf, sizeof(buf)))
- pr_info("copy_to_user un unmapped kernel address failed\n");
+ if (copy_to_user(user_ptr, buf, sizeof(buf)) == 0)
+ pr_err("copy_to_user() to noncanonical address succeeded!?\n");
set_fs(old_fs);
}
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index b5b9c6142f08..92900a095796 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -377,6 +377,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
blk_queue_max_segment_size(mq->queue,
round_down(host->max_seg_size, block_size));
+ dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue));
+
INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler);
INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c5a8af4ca76b..5582561586b4 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -859,6 +859,9 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
if (WARN_ON(!host) || WARN_ON(!host->cmd))
return IRQ_NONE;
+ /* ack all raised interrupts */
+ writel(status, host->regs + SD_EMMC_STATUS);
+
cmd = host->cmd;
data = cmd->data;
cmd->error = 0;
@@ -905,9 +908,6 @@ out:
if (ret == IRQ_HANDLED)
meson_mmc_request_done(host->mmc, cmd->mrq);
- /* ack all raised interrupts */
- writel(status, host->regs + SD_EMMC_STATUS);
-
return ret;
}
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index 1e85fb7f1025..781a3e106d9a 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -856,7 +856,7 @@ static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
}
if (!first_fail) {
- WARN_ON("no edge detected, continue with hw tuned delay.\n");
+ WARN(1, "no edge detected, continue with hw tuned delay.\n");
} else if (first_pass) {
/* set tap location at fixed tap relative to the first edge */
edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d128708924e4..59acf8e3331e 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2133,6 +2133,17 @@ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
}
EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
+static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+ sdhci_enable_sdio_irq_nolock(host, true);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
struct mmc_ios *ios)
{
@@ -2581,6 +2592,7 @@ static const struct mmc_host_ops sdhci_ops = {
.get_ro = sdhci_get_ro,
.hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
+ .ack_sdio_irq = sdhci_ack_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
.execute_tuning = sdhci_execute_tuning,
@@ -3083,8 +3095,7 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
if ((intmask & SDHCI_INT_CARD_INT) &&
(host->ier & SDHCI_INT_CARD_INT)) {
sdhci_enable_sdio_irq_nolock(host, false);
- host->thread_isr |= SDHCI_INT_CARD_INT;
- result = IRQ_WAKE_THREAD;
+ sdio_signal_irq(host->mmc);
}
intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
@@ -3156,15 +3167,6 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
mmc_detect_change(mmc, msecs_to_jiffies(200));
}
- if (isr & SDHCI_INT_CARD_INT) {
- sdio_run_irqs(host->mmc);
-
- spin_lock_irqsave(&host->lock, flags);
- if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
- sdhci_enable_sdio_irq_nolock(host, true);
- spin_unlock_irqrestore(&host->lock, flags);
- }
-
return IRQ_HANDLED;
}
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index a91c0b45c48d..3222ea4d584d 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -231,7 +231,7 @@ static int sdhci_am654_init(struct sdhci_host *host)
ctl_cfg_2 = SLOTTYPE_EMBEDDED;
regmap_update_bits(sdhci_am654->base, CTL_CFG_2,
- ctl_cfg_2, SLOTTYPE_MASK);
+ SLOTTYPE_MASK, ctl_cfg_2);
return sdhci_add_host(host);
}
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 130b91cb0f8a..84cb7d2aacdf 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -842,8 +842,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
if (mrq->cmd->error || (mrq->data && mrq->data->error))
tmio_mmc_abort_dma(host);
+ /* SCC error means retune, but executed command was still successful */
if (host->check_scc_error && host->check_scc_error(host))
- mrq->cmd->error = -EILSEQ;
+ mmc_retune_needed(host->mmc);
/* If SET_BLOCK_COUNT, continue with main command */
if (host->mrq && !mrq->cmd->error) {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 42da3f1bff5b..063c7a671b41 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1388,7 +1388,7 @@ static int mv88e6xxx_vtu_get(struct mv88e6xxx_chip *chip, u16 vid,
int err;
if (!vid)
- return -EINVAL;
+ return -EOPNOTSUPP;
entry->vid = vid - 1;
entry->valid = false;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 0663b78a2f6c..1c3959efebc4 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -652,16 +652,6 @@ static int sja1105_speed[] = {
[SJA1105_SPEED_1000MBPS] = 1000,
};
-static sja1105_speed_t sja1105_get_speed_cfg(unsigned int speed_mbps)
-{
- int i;
-
- for (i = SJA1105_SPEED_AUTO; i <= SJA1105_SPEED_1000MBPS; i++)
- if (sja1105_speed[i] == speed_mbps)
- return i;
- return -EINVAL;
-}
-
/* Set link speed and enable/disable traffic I/O in the MAC configuration
* for a specific port.
*
@@ -684,8 +674,21 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
- speed = sja1105_get_speed_cfg(speed_mbps);
- if (speed_mbps && speed < 0) {
+ switch (speed_mbps) {
+ case 0:
+ /* No speed update requested */
+ speed = SJA1105_SPEED_AUTO;
+ break;
+ case 10:
+ speed = SJA1105_SPEED_10MBPS;
+ break;
+ case 100:
+ speed = SJA1105_SPEED_100MBPS;
+ break;
+ case 1000:
+ speed = SJA1105_SPEED_1000MBPS;
+ break;
+ default:
dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
return -EINVAL;
}
@@ -695,10 +698,7 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
* and we no longer need to store it in the static config (already told
* hardware we want auto during upload phase).
*/
- if (speed_mbps)
- mac[port].speed = speed;
- else
- mac[port].speed = SJA1105_SPEED_AUTO;
+ mac[port].speed = speed;
/* On P/Q/R/S, one can read from the device via the MAC reconfiguration
* tables. On E/T, MAC reconfig tables are not readable, only writable.
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index c38020dcbd3a..52646855495e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -332,13 +332,13 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
{
u32 val;
int err = 0;
- bool is_locked;
- is_locked = hw_atl_sem_ram_get(self);
- if (!is_locked) {
- err = -ETIME;
+ err = readx_poll_timeout_atomic(hw_atl_sem_ram_get, self,
+ val, val == 1U,
+ 10U, 100000U);
+ if (err < 0)
goto err_exit;
- }
+
if (IS_CHIP_FEATURE(REVISION_B1)) {
u32 offset = 0;
@@ -350,8 +350,8 @@ static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
/* 1000 times by 10us = 10ms */
err = readx_poll_timeout_atomic(hw_atl_scrpad12_get,
self, val,
- (val & 0xF0000000) ==
- 0x80000000,
+ (val & 0xF0000000) !=
+ 0x80000000,
10U, 10000U);
}
} else {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index f1cea9bea27f..da726489e3c8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -381,7 +381,7 @@ static int aq_fw2x_set_sleep_proxy(struct aq_hw_s *self, u8 *mac)
err = readx_poll_timeout_atomic(aq_fw2x_state2_get,
self, val,
val & HW_ATL_FW2X_CTRL_SLEEP_PROXY,
- 1U, 10000U);
+ 1U, 100000U);
err_exit:
return err;
@@ -401,6 +401,8 @@ static int aq_fw2x_set_wol_params(struct aq_hw_s *self, u8 *mac)
msg = (struct fw2x_msg_wol *)rpc;
+ memset(msg, 0, sizeof(*msg));
+
msg->msg_id = HAL_ATLANTIC_UTILS_FW2X_MSG_WOL;
msg->magic_packet_enabled = true;
memcpy(msg->hw_addr, mac, ETH_ALEN);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index d1df0a44f93c..717fccc2efba 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -335,6 +335,7 @@ static int __lb_setup(struct net_device *ndev,
static int __lb_up(struct net_device *ndev,
enum hnae_loop loop_mode)
{
+#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
int speed, duplex;
@@ -361,6 +362,9 @@ static int __lb_up(struct net_device *ndev,
h->dev->ops->adjust_link(h, speed, duplex);
+ /* wait adjust link done and phy ready */
+ msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 7a67e23a2c2b..d8e5241097a9 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1304,8 +1304,8 @@ static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
int i;
for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
- &mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
+ strscpy(data + i * ETH_GSTRING_LEN,
+ mvpp2_ethtool_regs[i].string, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 96b53ff68c96..6cfffb64cd51 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -1772,6 +1772,7 @@ static void mtk_poll_controller(struct net_device *dev)
static int mtk_start_dma(struct mtk_eth *eth)
{
+ u32 rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
int err;
err = mtk_dma_init(eth);
@@ -1788,7 +1789,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
MTK_QDMA_GLO_CFG);
mtk_w32(eth,
- MTK_RX_DMA_EN | MTK_RX_2B_OFFSET |
+ MTK_RX_DMA_EN | rx_2b_offset |
MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
MTK_PDMA_GLO_CFG);
@@ -2292,13 +2293,13 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
- if (dev->features & NETIF_F_LRO) {
+ if (dev->hw_features & NETIF_F_LRO) {
cmd->data = MTK_MAX_RX_RING_NUM;
ret = 0;
}
break;
case ETHTOOL_GRXCLSRLCNT:
- if (dev->features & NETIF_F_LRO) {
+ if (dev->hw_features & NETIF_F_LRO) {
struct mtk_mac *mac = netdev_priv(dev);
cmd->rule_cnt = mac->hwlro_ip_cnt;
@@ -2306,11 +2307,11 @@ static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
}
break;
case ETHTOOL_GRXCLSRULE:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_entry(dev, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_get_fdir_all(dev, cmd,
rule_locs);
break;
@@ -2327,11 +2328,11 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_add_ipaddr(dev, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
- if (dev->features & NETIF_F_LRO)
+ if (dev->hw_features & NETIF_F_LRO)
ret = mtk_hwlro_del_ipaddr(dev, cmd);
break;
default:
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index a4a7ec0d2531..6d1c9ebae7cc 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -643,7 +643,7 @@ void cpsw_get_ringparam(struct net_device *ndev,
struct cpsw_common *cpsw = priv->cpsw;
/* not supported */
- ering->tx_max_pending = 0;
+ ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index cf38c392b9b6..1c96bed5a7c4 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -107,7 +107,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
}
#define IPVLAN_FEATURES \
- (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
+ (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 9044b95d2afe..4c0616ba314d 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -1073,6 +1073,7 @@ EXPORT_SYMBOL_GPL(phylink_ethtool_ksettings_get);
int phylink_ethtool_ksettings_set(struct phylink *pl,
const struct ethtool_link_ksettings *kset)
{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support);
struct ethtool_link_ksettings our_kset;
struct phylink_link_state config;
int ret;
@@ -1083,11 +1084,12 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
kset->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
+ linkmode_copy(support, pl->supported);
config = pl->link_config;
/* Mask out unsupported advertisements */
linkmode_and(config.advertising, kset->link_modes.advertising,
- pl->supported);
+ support);
/* FIXME: should we reject autoneg if phy/mac does not support it? */
if (kset->base.autoneg == AUTONEG_DISABLE) {
@@ -1097,7 +1099,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
* duplex.
*/
s = phy_lookup_setting(kset->base.speed, kset->base.duplex,
- pl->supported, false);
+ support, false);
if (!s)
return -EINVAL;
@@ -1126,7 +1128,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising);
}
- if (phylink_validate(pl, pl->supported, &config))
+ if (phylink_validate(pl, support, &config))
return -EINVAL;
/* If autonegotiation is enabled, we must have an advertisement */
@@ -1576,6 +1578,7 @@ static int phylink_sfp_module_insert(void *upstream,
{
struct phylink *pl = upstream;
__ETHTOOL_DECLARE_LINK_MODE_MASK(support) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(support1);
struct phylink_link_state config;
phy_interface_t iface;
int ret = 0;
@@ -1603,6 +1606,8 @@ static int phylink_sfp_module_insert(void *upstream,
return ret;
}
+ linkmode_copy(support1, support);
+
iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
if (iface == PHY_INTERFACE_MODE_NA) {
netdev_err(pl->netdev,
@@ -1612,7 +1617,7 @@ static int phylink_sfp_module_insert(void *upstream,
}
config.interface = iface;
- ret = phylink_validate(pl, support, &config);
+ ret = phylink_validate(pl, support1, &config);
if (ret) {
netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
phylink_an_mode_str(MLO_AN_INBAND),
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index d4635c2178d1..71812be0ac64 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -281,6 +281,7 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
{
struct i2c_msg msgs[2];
u8 bus_addr = a2 ? 0x51 : 0x50;
+ size_t this_len;
int ret;
msgs[0].addr = bus_addr;
@@ -292,11 +293,26 @@ static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
msgs[1].len = len;
msgs[1].buf = buf;
- ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
- if (ret < 0)
- return ret;
+ while (len) {
+ this_len = len;
+ if (this_len > 16)
+ this_len = 16;
- return ret == ARRAY_SIZE(msgs) ? len : 0;
+ msgs[1].len = this_len;
+
+ ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ if (ret != ARRAY_SIZE(msgs))
+ break;
+
+ msgs[1].buf += this_len;
+ dev_addr += this_len;
+ len -= this_len;
+ }
+
+ return msgs[1].buf - (u8 *)buf;
}
static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1b7c2afd84cb..120fb593d1da 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3400,7 +3400,8 @@ static int nvme_scan_ns_list(struct nvme_ctrl *ctrl, unsigned nn)
{
struct nvme_ns *ns;
__le32 *ns_list;
- unsigned i, j, nsid, prev = 0, num_lists = DIV_ROUND_UP(nn, 1024);
+ unsigned i, j, nsid, prev = 0;
+ unsigned num_lists = DIV_ROUND_UP_ULL((u64)nn, 1024);
int ret = 0;
ns_list = kzalloc(NVME_IDENTIFY_DATA_SIZE, GFP_KERNEL);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f562154551ce..524d6bd6d095 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2513,6 +2513,12 @@ static void nvme_reset_work(struct work_struct *work)
*/
dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
dev->ctrl.max_segments = NVME_MAX_SEGS;
+
+ /*
+ * Don't limit the IOMMU merged segment size.
+ */
+ dma_set_max_seg_size(dev->dev, 0xffffffff);
+
mutex_unlock(&dev->shutdown_lock);
/*
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f383146e7d0f..97f668a39ae1 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -213,6 +213,11 @@ static struct nvme_rdma_qe *nvme_rdma_alloc_ring(struct ib_device *ibdev,
if (!ring)
return NULL;
+ /*
+ * Bind the CQEs (post recv buffers) DMA mapping to the RDMA queue
+ * lifetime. It's safe, since any chage in the underlying RDMA device
+ * will issue error recovery and queue re-creation.
+ */
for (i = 0; i < ib_queue_size; i++) {
if (nvme_rdma_alloc_qe(ibdev, &ring[i], capsule_size, dir))
goto out_free_ring;
@@ -274,14 +279,9 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor)
static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx)
{
- struct nvme_rdma_ctrl *ctrl = set->driver_data;
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
- int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
- struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
- struct nvme_rdma_device *dev = queue->device;
- nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
- DMA_TO_DEVICE);
+ kfree(req->sqe.data);
}
static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
@@ -292,15 +292,11 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set,
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
- struct nvme_rdma_device *dev = queue->device;
- struct ib_device *ibdev = dev->dev;
- int ret;
nvme_req(rq)->ctrl = &ctrl->ctrl;
- ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command),
- DMA_TO_DEVICE);
- if (ret)
- return ret;
+ req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL);
+ if (!req->sqe.data)
+ return -ENOMEM;
req->queue = queue;
@@ -641,34 +637,16 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
{
struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
struct ib_device *ibdev = ctrl->device->dev;
- unsigned int nr_io_queues;
+ unsigned int nr_io_queues, nr_default_queues;
+ unsigned int nr_read_queues, nr_poll_queues;
int i, ret;
- nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
-
- /*
- * we map queues according to the device irq vectors for
- * optimal locality so we don't need more queues than
- * completion vectors.
- */
- nr_io_queues = min_t(unsigned int, nr_io_queues,
- ibdev->num_comp_vectors);
-
- if (opts->nr_write_queues) {
- ctrl->io_queues[HCTX_TYPE_DEFAULT] =
- min(opts->nr_write_queues, nr_io_queues);
- nr_io_queues += ctrl->io_queues[HCTX_TYPE_DEFAULT];
- } else {
- ctrl->io_queues[HCTX_TYPE_DEFAULT] = nr_io_queues;
- }
-
- ctrl->io_queues[HCTX_TYPE_READ] = nr_io_queues;
-
- if (opts->nr_poll_queues) {
- ctrl->io_queues[HCTX_TYPE_POLL] =
- min(opts->nr_poll_queues, num_online_cpus());
- nr_io_queues += ctrl->io_queues[HCTX_TYPE_POLL];
- }
+ nr_read_queues = min_t(unsigned int, ibdev->num_comp_vectors,
+ min(opts->nr_io_queues, num_online_cpus()));
+ nr_default_queues = min_t(unsigned int, ibdev->num_comp_vectors,
+ min(opts->nr_write_queues, num_online_cpus()));
+ nr_poll_queues = min(opts->nr_poll_queues, num_online_cpus());
+ nr_io_queues = nr_read_queues + nr_default_queues + nr_poll_queues;
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
if (ret)
@@ -681,6 +659,34 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
dev_info(ctrl->ctrl.device,
"creating %d I/O queues.\n", nr_io_queues);
+ if (opts->nr_write_queues && nr_read_queues < nr_io_queues) {
+ /*
+ * separate read/write queues
+ * hand out dedicated default queues only after we have
+ * sufficient read queues.
+ */
+ ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues;
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+ min(nr_default_queues, nr_io_queues);
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ /*
+ * shared read/write queues
+ * either no write queues were requested, or we don't have
+ * sufficient queue count to have dedicated default queues.
+ */
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+ min(nr_read_queues, nr_io_queues);
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ }
+
+ if (opts->nr_poll_queues && nr_io_queues) {
+ /* map dedicated poll queues only if we have queues left */
+ ctrl->io_queues[HCTX_TYPE_POLL] =
+ min(nr_poll_queues, nr_io_queues);
+ }
+
for (i = 1; i < ctrl->ctrl.queue_count; i++) {
ret = nvme_rdma_alloc_queue(ctrl, i,
ctrl->ctrl.sqsize + 1);
@@ -769,6 +775,11 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
+ /*
+ * Bind the async event SQE DMA mapping to the admin queue lifetime.
+ * It's safe, since any chage in the underlying RDMA device will issue
+ * error recovery and queue re-creation.
+ */
error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
if (error)
@@ -1709,12 +1720,20 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
dev = queue->device->dev;
+
+ req->sqe.dma = ib_dma_map_single(dev, req->sqe.data,
+ sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ err = ib_dma_mapping_error(dev, req->sqe.dma);
+ if (unlikely(err))
+ return BLK_STS_RESOURCE;
+
ib_dma_sync_single_for_cpu(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
ret = nvme_setup_cmd(ns, rq, c);
if (ret)
- return ret;
+ goto unmap_qe;
blk_mq_start_request(rq);
@@ -1739,10 +1758,16 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
}
return BLK_STS_OK;
+
err:
if (err == -ENOMEM || err == -EAGAIN)
- return BLK_STS_RESOURCE;
- return BLK_STS_IOERR;
+ ret = BLK_STS_RESOURCE;
+ else
+ ret = BLK_STS_IOERR;
+unmap_qe:
+ ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
+ return ret;
}
static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
@@ -1755,25 +1780,36 @@ static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
static void nvme_rdma_complete_rq(struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ struct ib_device *ibdev = queue->device->dev;
- nvme_rdma_unmap_data(req->queue, rq);
+ nvme_rdma_unmap_data(queue, rq);
+ ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command),
+ DMA_TO_DEVICE);
nvme_complete_rq(rq);
}
static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_rdma_ctrl *ctrl = set->driver_data;
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- set->map[HCTX_TYPE_READ].nr_queues = ctrl->io_queues[HCTX_TYPE_READ];
- if (ctrl->ctrl.opts->nr_write_queues) {
+ if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
/* separate read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_READ];
set->map[HCTX_TYPE_READ].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
} else {
- /* mixed read/write queues */
+ /* shared read/write queues */
+ set->map[HCTX_TYPE_DEFAULT].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_READ].queue_offset = 0;
}
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
@@ -1781,16 +1817,22 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
ctrl->device->dev, 0);
- if (ctrl->ctrl.opts->nr_poll_queues) {
+ if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
+ /* map dedicated poll queues only if we have queues left */
set->map[HCTX_TYPE_POLL].nr_queues =
ctrl->io_queues[HCTX_TYPE_POLL];
set->map[HCTX_TYPE_POLL].queue_offset =
- ctrl->io_queues[HCTX_TYPE_DEFAULT];
- if (ctrl->ctrl.opts->nr_write_queues)
- set->map[HCTX_TYPE_POLL].queue_offset +=
- ctrl->io_queues[HCTX_TYPE_READ];
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] +
+ ctrl->io_queues[HCTX_TYPE_READ];
blk_mq_map_queues(&set->map[HCTX_TYPE_POLL]);
}
+
+ dev_info(ctrl->ctrl.device,
+ "mapped %d/%d/%d default/read/poll queues.\n",
+ ctrl->io_queues[HCTX_TYPE_DEFAULT],
+ ctrl->io_queues[HCTX_TYPE_READ],
+ ctrl->io_queues[HCTX_TYPE_POLL]);
+
return 0;
}
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 2b107a1d152b..08a2501b9357 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -111,6 +111,7 @@ struct nvme_tcp_ctrl {
struct work_struct err_work;
struct delayed_work connect_work;
struct nvme_tcp_request async_req;
+ u32 io_queues[HCTX_MAX_TYPES];
};
static LIST_HEAD(nvme_tcp_ctrl_list);
@@ -1564,6 +1565,35 @@ static unsigned int nvme_tcp_nr_io_queues(struct nvme_ctrl *ctrl)
return nr_io_queues;
}
+static void nvme_tcp_set_io_queues(struct nvme_ctrl *nctrl,
+ unsigned int nr_io_queues)
+{
+ struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl);
+ struct nvmf_ctrl_options *opts = nctrl->opts;
+
+ if (opts->nr_write_queues && opts->nr_io_queues < nr_io_queues) {
+ /*
+ * separate read/write queues
+ * hand out dedicated default queues only after we have
+ * sufficient read queues.
+ */
+ ctrl->io_queues[HCTX_TYPE_READ] = opts->nr_io_queues;
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ];
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+ min(opts->nr_write_queues, nr_io_queues);
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ } else {
+ /*
+ * shared read/write queues
+ * either no write queues were requested, or we don't have
+ * sufficient queue count to have dedicated default queues.
+ */
+ ctrl->io_queues[HCTX_TYPE_DEFAULT] =
+ min(opts->nr_io_queues, nr_io_queues);
+ nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ }
+}
+
static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
{
unsigned int nr_io_queues;
@@ -1581,6 +1611,8 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
dev_info(ctrl->device,
"creating %d I/O queues.\n", nr_io_queues);
+ nvme_tcp_set_io_queues(ctrl, nr_io_queues);
+
return __nvme_tcp_alloc_io_queues(ctrl);
}
@@ -2089,23 +2121,34 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
{
struct nvme_tcp_ctrl *ctrl = set->driver_data;
+ struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
- set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
- set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
- if (ctrl->ctrl.opts->nr_write_queues) {
+ if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) {
/* separate read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->ctrl.opts->nr_write_queues;
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_READ];
set->map[HCTX_TYPE_READ].queue_offset =
- ctrl->ctrl.opts->nr_write_queues;
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
} else {
- /* mixed read/write queues */
+ /* shared read/write queues */
set->map[HCTX_TYPE_DEFAULT].nr_queues =
- ctrl->ctrl.opts->nr_io_queues;
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
+ set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
+ set->map[HCTX_TYPE_READ].nr_queues =
+ ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_READ].queue_offset = 0;
}
blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
+
+ dev_info(ctrl->ctrl.device,
+ "mapped %d/%d default/read queues.\n",
+ ctrl->io_queues[HCTX_TYPE_DEFAULT],
+ ctrl->io_queues[HCTX_TYPE_READ]);
+
return 0;
}
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 3efc52f9c309..7a1cf6437a6a 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -293,6 +293,7 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
return 0;
case nvme_cmd_write_zeroes:
req->execute = nvmet_bdev_execute_write_zeroes;
+ req->data_len = 0;
return 0;
default:
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 121f7603a595..217f15aafa4a 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -562,14 +562,12 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
/* We currently only support kernel addresses */
BUG_ON(sid != KERNEL_SPACE);
- mtsp(sid,1);
-
/*
** WORD 1 - low order word
** "hints" parm includes the VALID bit!
** "dep" clobbers the physical address offset bits as well.
*/
- pa = virt_to_phys(vba);
+ pa = lpa(vba);
asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints));
((u32 *)pdir_ptr)[1] = (u32) pa;
@@ -594,7 +592,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
** Grab virtual index [0:11]
** Deposit virt_idx bits into I/O PDIR word
*/
- asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
+ asm volatile ("lci %%r0(%1), %0" : "=r" (ci) : "r" (vba));
asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci));
asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci));
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 8a9ea9bd050c..296668caf7e5 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -569,11 +569,10 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
u64 pa; /* physical address */
register unsigned ci; /* coherent index */
- pa = virt_to_phys(vba);
+ pa = lpa(vba);
pa &= IOVP_MASK;
- mtsp(sid,1);
- asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
+ asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 5dc53d420ca8..7b4ee33c1935 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -895,6 +895,7 @@ parport_register_dev_model(struct parport *port, const char *name,
par_dev->devmodel = true;
ret = device_register(&par_dev->dev);
if (ret) {
+ kfree(par_dev->state);
put_device(&par_dev->dev);
goto err_put_port;
}
@@ -912,6 +913,7 @@ parport_register_dev_model(struct parport *port, const char *name,
spin_unlock(&port->physport->pardevice_lock);
pr_debug("%s: cannot grant exclusive access for device %s\n",
port->name, name);
+ kfree(par_dev->state);
device_unregister(&par_dev->dev);
goto err_put_port;
}
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 009f2c0ec504..b1823d75dd35 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1274,16 +1274,20 @@ static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers)
return 0;
}
-static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
+static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int count = single ? 1 : card->dev->num_tx_queues;
+ int rc;
rtnl_lock();
- netif_set_real_num_tx_queues(card->dev, count);
+ rc = netif_set_real_num_tx_queues(card->dev, count);
rtnl_unlock();
+ if (rc)
+ return rc;
+
if (card->qdio.no_out_queues == count)
- return;
+ return 0;
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
qeth_free_qdio_queues(card);
@@ -1293,12 +1297,14 @@ static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
card->qdio.no_out_queues = count;
+ return 0;
}
static int qeth_update_from_chp_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
struct channel_path_desc_fmt0 *chp_dsc;
+ int rc = 0;
QETH_DBF_TEXT(SETUP, 2, "chp_desc");
@@ -1311,12 +1317,12 @@ static int qeth_update_from_chp_desc(struct qeth_card *card)
if (IS_OSD(card) || IS_OSX(card))
/* CHPP field bit 6 == 1 -> single queue */
- qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
+ rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
kfree(chp_dsc);
QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
- return 0;
+ return rc;
}
static void qeth_init_qdio_info(struct qeth_card *card)
@@ -5597,8 +5603,12 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
if (IS_IQD(card)) {
- netif_set_real_num_tx_queues(dev, QETH_IQD_MIN_TXQ);
dev->features |= NETIF_F_SG;
+ if (netif_set_real_num_tx_queues(dev,
+ QETH_IQD_MIN_TXQ)) {
+ free_netdev(dev);
+ return NULL;
+ }
}
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 218801232ca2..ff8a6cd790b1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1680,7 +1680,7 @@ static void qeth_bridgeport_an_set_cb(void *priv,
l2entry = (struct qdio_brinfo_entry_l2 *)entry;
code = IPA_ADDR_CHANGE_CODE_MACADDR;
- if (l2entry->addr_lnid.lnid)
+ if (l2entry->addr_lnid.lnid < VLAN_N_VID)
code |= IPA_ADDR_CHANGE_CODE_VLANID;
qeth_bridge_emit_host_event(card, anev_reg_unreg, code,
(struct net_if_token *)&l2entry->nit,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0271833da6a2..13bf3e2e9cea 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1888,13 +1888,20 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static int qeth_l3_get_cast_type(struct sk_buff *skb)
{
+ int ipv = qeth_get_ip_version(skb);
struct neighbour *n = NULL;
struct dst_entry *dst;
rcu_read_lock();
dst = skb_dst(skb);
- if (dst)
- n = dst_neigh_lookup_skb(dst, skb);
+ if (dst) {
+ struct rt6_info *rt = (struct rt6_info *) dst;
+
+ dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0);
+ if (dst)
+ n = dst_neigh_lookup_skb(dst, skb);
+ }
+
if (n) {
int cast_type = n->type;
@@ -1909,8 +1916,10 @@ static int qeth_l3_get_cast_type(struct sk_buff *skb)
rcu_read_unlock();
/* no neighbour (eg AF_PACKET), fall back to target's IP address ... */
- switch (qeth_get_ip_version(skb)) {
+ switch (ipv) {
case 4:
+ if (ipv4_is_lbcast(ip_hdr(skb)->daddr))
+ return RTN_BROADCAST;
return ipv4_is_multicast(ip_hdr(skb)->daddr) ?
RTN_MULTICAST : RTN_UNICAST;
case 6:
@@ -1940,6 +1949,7 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3;
struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
struct qeth_card *card = queue->card;
+ struct dst_entry *dst;
hdr->hdr.l3.length = data_len;
@@ -1985,15 +1995,27 @@ static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue,
}
rcu_read_lock();
+ dst = skb_dst(skb);
+
if (ipv == 4) {
- struct rtable *rt = skb_rtable(skb);
+ struct rtable *rt;
+
+ if (dst)
+ dst = dst_check(dst, 0);
+ rt = (struct rtable *) dst;
*((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ?
rt_nexthop(rt, ip_hdr(skb)->daddr) :
ip_hdr(skb)->daddr;
} else {
/* IPv6 */
- const struct rt6_info *rt = skb_rt6_info(skb);
+ struct rt6_info *rt;
+
+ if (dst) {
+ rt = (struct rt6_info *) dst;
+ dst = dst_check(dst, rt6_get_cookie(rt));
+ }
+ rt = (struct rt6_info *) dst;
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
l3_hdr->next_hop.ipv6_addr = rt->rt6i_gateway;
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index d6be4e8f4a8f..8fd5ffc55792 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -4046,8 +4046,10 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
return -ETIMEDOUT;
msecs_blocked =
jiffies_to_msecs(jiffies - start_jiffies);
- if (msecs_blocked >= timeout_msecs)
- return -ETIMEDOUT;
+ if (msecs_blocked >= timeout_msecs) {
+ rc = -ETIMEDOUT;
+ goto out;
+ }
timeout_msecs -= msecs_blocked;
}
}
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 8c1c551f2b42..3fe3029617a8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1917,7 +1917,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
/* Get the descriptor */
- if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+ if (hba->dev_cmd.query.descriptor &&
+ lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
GENERAL_UPIU_REQUEST_SIZE;
u16 resp_len;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df51a35cf537..2d9df786a9d3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -604,12 +604,6 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
return iov_iter_count(iter);
}
-static bool vhost_exceeds_weight(int pkts, int total_len)
-{
- return total_len >= VHOST_NET_WEIGHT ||
- pkts >= VHOST_NET_PKT_WEIGHT;
-}
-
static int get_tx_bufs(struct vhost_net *net,
struct vhost_net_virtqueue *nvq,
struct msghdr *msg,
@@ -779,7 +773,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
int sent_pkts = 0;
bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
- for (;;) {
+ do {
bool busyloop_intr = false;
if (nvq->done_idx == VHOST_NET_BATCH)
@@ -845,11 +839,7 @@ done:
vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->done_idx].len = 0;
++nvq->done_idx;
- if (vhost_exceeds_weight(++sent_pkts, total_len)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
vhost_tx_batch(net, nvq, sock, &msg);
}
@@ -874,7 +864,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
bool zcopy_used;
int sent_pkts = 0;
- for (;;) {
+ do {
bool busyloop_intr;
/* Release DMAs done buffers first */
@@ -951,11 +941,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
else
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
- if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
}
/* Expects to be always run from workqueue - which acts as
@@ -1153,8 +1139,11 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
- while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
- &busyloop_intr))) {
+ do {
+ sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
+ &busyloop_intr);
+ if (!sock_len)
+ break;
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -1239,14 +1228,11 @@ static void handle_rx(struct vhost_net *net)
vhost_log_write(vq, vq_log, log, vhost_len,
vq->iov, in);
total_len += vhost_len;
- if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
- vhost_poll_queue(&vq->poll);
- goto out;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
+
if (unlikely(busyloop_intr))
vhost_poll_queue(&vq->poll);
- else
+ else if (!sock_len)
vhost_net_enable_vq(net, vq);
out:
vhost_net_signal_used(nvq);
@@ -1338,7 +1324,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vhost_net_buf_init(&n->vqs[i].rxq);
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
- UIO_MAXIOV + VHOST_NET_BATCH);
+ UIO_MAXIOV + VHOST_NET_BATCH,
+ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index c090d177bd75..a9caf1bc3c3e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -57,6 +57,12 @@
#define VHOST_SCSI_PREALLOC_UPAGES 2048
#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
+/* Max number of requests before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * request.
+ */
+#define VHOST_SCSI_WEIGHT 256
+
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
struct completion comp;
@@ -912,7 +918,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct iov_iter in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- int ret, prot_bytes;
+ int ret, prot_bytes, c = 0;
u16 lun;
u8 task_attr;
bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
@@ -932,7 +938,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
- for (;;) {
+ do {
ret = vhost_scsi_get_desc(vs, vq, &vc);
if (ret)
goto err;
@@ -1112,7 +1118,7 @@ err:
break;
else if (ret == -EIO)
vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
@@ -1171,7 +1177,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
} v_req;
struct vhost_scsi_ctx vc;
size_t typ_size;
- int ret;
+ int ret, c = 0;
mutex_lock(&vq->mutex);
/*
@@ -1185,7 +1191,7 @@ vhost_scsi_ctl_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
- for (;;) {
+ do {
ret = vhost_scsi_get_desc(vs, vq, &vc);
if (ret)
goto err;
@@ -1264,7 +1270,7 @@ err:
break;
else if (ret == -EIO)
vhost_scsi_send_bad_target(vs, vq, vc.head, vc.out);
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
@@ -1621,7 +1627,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
+ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
+ VHOST_SCSI_WEIGHT, 0);
vhost_scsi_init_inflight(vs, NULL);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1e3ed41ae1f3..3f3eac4bcc58 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -413,8 +413,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
vhost_vq_free_iovecs(dev->vqs[i]);
}
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
+ int pkts, int total_len)
+{
+ struct vhost_dev *dev = vq->dev;
+
+ if ((dev->byte_weight && total_len >= dev->byte_weight) ||
+ pkts >= dev->weight) {
+ vhost_poll_queue(&vq->poll);
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
+
void vhost_dev_init(struct vhost_dev *dev,
- struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
+ struct vhost_virtqueue **vqs, int nvqs,
+ int iov_limit, int weight, int byte_weight)
{
struct vhost_virtqueue *vq;
int i;
@@ -428,6 +444,8 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->mm = NULL;
dev->worker = NULL;
dev->iov_limit = iov_limit;
+ dev->weight = weight;
+ dev->byte_weight = byte_weight;
init_llist_head(&dev->work_list);
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 9490e7ddb340..27a78a9b8cc7 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -171,10 +171,13 @@ struct vhost_dev {
struct list_head pending_list;
wait_queue_head_t wait;
int iov_limit;
+ int weight;
+ int byte_weight;
};
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
- int nvqs, int iov_limit);
+ int nvqs, int iov_limit, int weight, int byte_weight);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bb5fc0e9fbc2..814bed72d793 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -21,6 +21,14 @@
#include "vhost.h"
#define VHOST_VSOCK_DEFAULT_HOST_CID 2
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_VSOCK_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * small pkts.
+ */
+#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
VHOST_VSOCK_FEATURES = VHOST_FEATURES,
@@ -78,6 +86,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq)
{
struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ int pkts = 0, total_len = 0;
bool added = false;
bool restart_tx = false;
@@ -89,7 +98,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
- for (;;) {
+ do {
struct virtio_vsock_pkt *pkt;
struct iov_iter iov_iter;
unsigned out, in;
@@ -174,8 +183,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
*/
virtio_transport_deliver_tap_pkt(pkt);
+ total_len += pkt->len;
virtio_transport_free_pkt(pkt);
- }
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
if (added)
vhost_signal(&vsock->dev, vq);
@@ -350,7 +360,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
dev);
struct virtio_vsock_pkt *pkt;
- int head;
+ int head, pkts = 0, total_len = 0;
unsigned int out, in;
bool added = false;
@@ -360,7 +370,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
goto out;
vhost_disable_notify(&vsock->dev, vq);
- for (;;) {
+ do {
u32 len;
if (!vhost_vsock_more_replies(vsock)) {
@@ -401,9 +411,11 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
else
virtio_transport_free_pkt(pkt);
- vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
+ len += sizeof(pkt->hdr);
+ vhost_add_used(vq, head, len);
+ total_len += len;
added = true;
- }
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
no_more_replies:
if (added)
@@ -531,7 +543,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
- vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
+ UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
+ VHOST_VSOCK_WEIGHT);
file->private_data = vsock;
spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 9aea44ed54c7..023fc3bc01c6 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -63,12 +63,12 @@ config VIRTIO_INPUT
If unsure, say M.
- config VIRTIO_MMIO
+config VIRTIO_MMIO
tristate "Platform bus driver for memory mapped virtio devices"
depends on HAS_IOMEM && HAS_DMA
- select VIRTIO
- ---help---
- This drivers provides support for memory mapped virtio
+ select VIRTIO
+ ---help---
+ This drivers provides support for memory mapped virtio
platform device driver.
If unsure, say N.
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index 92e8f0755b9a..edf0bc98012c 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -138,7 +138,7 @@ static ssize_t status_control_read(struct file *filp, struct kobject *kobj,
W1_F29_REG_CONTROL_AND_STATUS, buf);
}
-#ifdef fCONFIG_W1_SLAVE_DS2408_READBACK
+#ifdef CONFIG_W1_SLAVE_DS2408_READBACK
static bool optional_read_back_valid(struct w1_slave *sl, u8 expected)
{
u8 w1_buf[3];
diff --git a/fs/adfs/adfs.h b/fs/adfs/adfs.h
index c76db75f02aa..804c6a77c5db 100644
--- a/fs/adfs/adfs.h
+++ b/fs/adfs/adfs.h
@@ -113,19 +113,6 @@ struct object_info {
__u16 filetype;
};
-/* RISC OS 12-bit filetype converts to ,xyz hex filename suffix */
-static inline int append_filetype_suffix(char *buf, __u16 filetype)
-{
- if (filetype == 0xffff) /* no explicit 12-bit file type was set */
- return 0;
-
- *buf++ = ',';
- *buf++ = hex_asc_lo(filetype >> 8);
- *buf++ = hex_asc_lo(filetype >> 4);
- *buf++ = hex_asc_lo(filetype >> 0);
- return 4;
-}
-
struct adfs_dir_ops {
int (*read)(struct super_block *sb, unsigned int id, unsigned int sz, struct adfs_dir *dir);
int (*setpos)(struct adfs_dir *dir, unsigned int fpos);
@@ -172,6 +159,7 @@ extern const struct dentry_operations adfs_dentry_operations;
extern const struct adfs_dir_ops adfs_f_dir_ops;
extern const struct adfs_dir_ops adfs_fplus_dir_ops;
+void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj);
extern int adfs_dir_update(struct super_block *sb, struct object_info *obj,
int wait);
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index e18eff854e1a..fe39310c1a0a 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -16,6 +16,50 @@
*/
static DEFINE_RWLOCK(adfs_dir_lock);
+void adfs_object_fixup(struct adfs_dir *dir, struct object_info *obj)
+{
+ unsigned int dots, i;
+
+ /*
+ * RISC OS allows the use of '/' in directory entry names, so we need
+ * to fix these up. '/' is typically used for FAT compatibility to
+ * represent '.', so do the same conversion here. In any case, '.'
+ * will never be in a RISC OS name since it is used as the pathname
+ * separator. Handle the case where we may generate a '.' or '..'
+ * name, replacing the first character with '^' (the RISC OS "parent
+ * directory" character.)
+ */
+ for (i = dots = 0; i < obj->name_len; i++)
+ if (obj->name[i] == '/') {
+ obj->name[i] = '.';
+ dots++;
+ }
+
+ if (obj->name_len <= 2 && dots == obj->name_len)
+ obj->name[0] = '^';
+
+ obj->filetype = -1;
+
+ /*
+ * object is a file and is filetyped and timestamped?
+ * RISC OS 12-bit filetype is stored in load_address[19:8]
+ */
+ if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
+ (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
+ obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
+
+ /* optionally append the ,xyz hex filetype suffix */
+ if (ADFS_SB(dir->sb)->s_ftsuffix) {
+ __u16 filetype = obj->filetype;
+
+ obj->name[obj->name_len++] = ',';
+ obj->name[obj->name_len++] = hex_asc_lo(filetype >> 8);
+ obj->name[obj->name_len++] = hex_asc_lo(filetype >> 4);
+ obj->name[obj->name_len++] = hex_asc_lo(filetype >> 0);
+ }
+ }
+}
+
static int
adfs_readdir(struct file *file, struct dir_context *ctx)
{
@@ -100,37 +144,36 @@ out:
return ret;
}
-static int
-adfs_match(const struct qstr *name, struct object_info *obj)
+static unsigned char adfs_tolower(unsigned char c)
{
- int i;
-
- if (name->len != obj->name_len)
- return 0;
+ if (c >= 'A' && c <= 'Z')
+ c += 'a' - 'A';
+ return c;
+}
- for (i = 0; i < name->len; i++) {
- char c1, c2;
+static int __adfs_compare(const unsigned char *qstr, u32 qlen,
+ const char *str, u32 len)
+{
+ u32 i;
- c1 = name->name[i];
- c2 = obj->name[i];
+ if (qlen != len)
+ return 1;
- if (c1 >= 'A' && c1 <= 'Z')
- c1 += 'a' - 'A';
- if (c2 >= 'A' && c2 <= 'Z')
- c2 += 'a' - 'A';
+ for (i = 0; i < qlen; i++)
+ if (adfs_tolower(qstr[i]) != adfs_tolower(str[i]))
+ return 1;
- if (c1 != c2)
- return 0;
- }
- return 1;
+ return 0;
}
-static int
-adfs_dir_lookup_byname(struct inode *inode, const struct qstr *name, struct object_info *obj)
+static int adfs_dir_lookup_byname(struct inode *inode, const struct qstr *qstr,
+ struct object_info *obj)
{
struct super_block *sb = inode->i_sb;
const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
+ const unsigned char *name;
struct adfs_dir dir;
+ u32 name_len;
int ret;
ret = ops->read(sb, inode->i_ino, inode->i_size, &dir);
@@ -153,8 +196,10 @@ adfs_dir_lookup_byname(struct inode *inode, const struct qstr *name, struct obje
goto unlock_out;
ret = -ENOENT;
+ name = qstr->name;
+ name_len = qstr->len;
while (ops->getnext(&dir, obj) == 0) {
- if (adfs_match(name, obj)) {
+ if (!__adfs_compare(name, name_len, obj->name, obj->name_len)) {
ret = 0;
break;
}
@@ -179,30 +224,18 @@ const struct file_operations adfs_dir_operations = {
static int
adfs_hash(const struct dentry *parent, struct qstr *qstr)
{
- const unsigned int name_len = ADFS_SB(parent->d_sb)->s_namelen;
const unsigned char *name;
unsigned long hash;
- int i;
+ u32 len;
- if (qstr->len < name_len)
- return 0;
+ if (qstr->len > ADFS_SB(parent->d_sb)->s_namelen)
+ return -ENAMETOOLONG;
- /*
- * Truncate the name in place, avoids
- * having to define a compare function.
- */
- qstr->len = i = name_len;
+ len = qstr->len;
name = qstr->name;
hash = init_name_hash(parent);
- while (i--) {
- char c;
-
- c = *name++;
- if (c >= 'A' && c <= 'Z')
- c += 'a' - 'A';
-
- hash = partial_name_hash(c, hash);
- }
+ while (len--)
+ hash = partial_name_hash(adfs_tolower(*name++), hash);
qstr->hash = end_name_hash(hash);
return 0;
@@ -212,30 +245,10 @@ adfs_hash(const struct dentry *parent, struct qstr *qstr)
* Compare two names, taking note of the name length
* requirements of the underlying filesystem.
*/
-static int
-adfs_compare(const struct dentry *dentry,
- unsigned int len, const char *str, const struct qstr *name)
+static int adfs_compare(const struct dentry *dentry, unsigned int len,
+ const char *str, const struct qstr *qstr)
{
- int i;
-
- if (len != name->len)
- return 1;
-
- for (i = 0; i < name->len; i++) {
- char a, b;
-
- a = str[i];
- b = name->name[i];
-
- if (a >= 'A' && a <= 'Z')
- a += 'a' - 'A';
- if (b >= 'A' && b <= 'Z')
- b += 'a' - 'A';
-
- if (a != b)
- return 1;
- }
- return 0;
+ return __adfs_compare(qstr->name, qstr->len, str, len);
}
const struct dentry_operations adfs_dentry_operations = {
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c
index 382c9d7ad375..693f69ed3de3 100644
--- a/fs/adfs/dir_f.c
+++ b/fs/adfs/dir_f.c
@@ -47,21 +47,6 @@ static inline void adfs_writeval(unsigned char *p, int len, unsigned int val)
}
}
-static inline int adfs_readname(char *buf, char *ptr, int maxlen)
-{
- char *old_buf = buf;
-
- while ((unsigned char)*ptr >= ' ' && maxlen--) {
- if (*ptr == '/')
- *buf++ = '.';
- else
- *buf++ = *ptr;
- ptr++;
- }
-
- return buf - old_buf;
-}
-
#define ror13(v) ((v >> 13) | (v << 19))
#define dir_u8(idx) \
@@ -216,29 +201,23 @@ static inline void
adfs_dir2obj(struct adfs_dir *dir, struct object_info *obj,
struct adfs_direntry *de)
{
- obj->name_len = adfs_readname(obj->name, de->dirobname, ADFS_F_NAME_LEN);
+ unsigned int name_len;
+
+ for (name_len = 0; name_len < ADFS_F_NAME_LEN; name_len++) {
+ if (de->dirobname[name_len] < ' ')
+ break;
+
+ obj->name[name_len] = de->dirobname[name_len];
+ }
+
+ obj->name_len = name_len;
obj->file_id = adfs_readval(de->dirinddiscadd, 3);
obj->loadaddr = adfs_readval(de->dirload, 4);
obj->execaddr = adfs_readval(de->direxec, 4);
obj->size = adfs_readval(de->dirlen, 4);
obj->attr = de->newdiratts;
- obj->filetype = -1;
- /*
- * object is a file and is filetyped and timestamped?
- * RISC OS 12-bit filetype is stored in load_address[19:8]
- */
- if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
- (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
- obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
-
- /* optionally append the ,xyz hex filetype suffix */
- if (ADFS_SB(dir->sb)->s_ftsuffix)
- obj->name_len +=
- append_filetype_suffix(
- &obj->name[obj->name_len],
- obj->filetype);
- }
+ adfs_object_fixup(dir, obj);
}
/*
diff --git a/fs/adfs/dir_fplus.c b/fs/adfs/dir_fplus.c
index c92cfb638c18..97b9f28f459b 100644
--- a/fs/adfs/dir_fplus.c
+++ b/fs/adfs/dir_fplus.c
@@ -169,7 +169,7 @@ adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
(struct adfs_bigdirheader *) dir->bh_fplus[0]->b_data;
struct adfs_bigdirentry bde;
unsigned int offset;
- int i, ret = -ENOENT;
+ int ret = -ENOENT;
if (dir->pos >= le32_to_cpu(h->bigdirentries))
goto out;
@@ -193,27 +193,7 @@ adfs_fplus_getnext(struct adfs_dir *dir, struct object_info *obj)
offset += le32_to_cpu(bde.bigdirobnameptr);
dir_memcpy(dir, offset, obj->name, obj->name_len);
- for (i = 0; i < obj->name_len; i++)
- if (obj->name[i] == '/')
- obj->name[i] = '.';
-
- obj->filetype = -1;
-
- /*
- * object is a file and is filetyped and timestamped?
- * RISC OS 12-bit filetype is stored in load_address[19:8]
- */
- if ((0 == (obj->attr & ADFS_NDA_DIRECTORY)) &&
- (0xfff00000 == (0xfff00000 & obj->loadaddr))) {
- obj->filetype = (__u16) ((0x000fff00 & obj->loadaddr) >> 8);
-
- /* optionally append the ,xyz hex filetype suffix */
- if (ADFS_SB(dir->sb)->s_ftsuffix)
- obj->name_len +=
- append_filetype_suffix(
- &obj->name[obj->name_len],
- obj->filetype);
- }
+ adfs_object_fixup(dir, obj);
dir->pos += 1;
ret = 0;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 3959f08279e6..b8f9c83835d5 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1377,10 +1377,17 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
if (err && !nbytes)
break;
- if (write)
+ if (write) {
+ if (!capable(CAP_FSETID)) {
+ struct fuse_write_in *inarg;
+
+ inarg = &req->misc.write.in;
+ inarg->write_flags |= FUSE_WRITE_KILL_PRIV;
+ }
nres = fuse_send_write(req, io, pos, nbytes, owner);
- else
+ } else {
nres = fuse_send_read(req, io, pos, nbytes, owner);
+ }
if (!io->async)
fuse_release_user_pages(req, io->should_dirty);
@@ -3014,6 +3021,16 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
return ret;
}
+static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
+{
+ int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
+
+ if (!err)
+ fuse_sync_writes(inode);
+
+ return err;
+}
+
static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
loff_t length)
{
@@ -3042,12 +3059,10 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
inode_lock(inode);
if (mode & FALLOC_FL_PUNCH_HOLE) {
loff_t endbyte = offset + length - 1;
- err = filemap_write_and_wait_range(inode->i_mapping,
- offset, endbyte);
+
+ err = fuse_writeback_range(inode, offset, endbyte);
if (err)
goto out;
-
- fuse_sync_writes(inode);
}
}
@@ -3055,7 +3070,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
offset + length > i_size_read(inode)) {
err = inode_newsize_ok(inode, offset + length);
if (err)
- return err;
+ goto out;
}
if (!(mode & FALLOC_FL_KEEP_SIZE))
@@ -3103,6 +3118,7 @@ static ssize_t fuse_copy_file_range(struct file *file_in, loff_t pos_in,
{
struct fuse_file *ff_in = file_in->private_data;
struct fuse_file *ff_out = file_out->private_data;
+ struct inode *inode_in = file_inode(file_in);
struct inode *inode_out = file_inode(file_out);
struct fuse_inode *fi_out = get_fuse_inode(inode_out);
struct fuse_conn *fc = ff_in->fc;
@@ -3126,15 +3142,20 @@ static ssize_t fuse_copy_file_range(struct file *file_in, loff_t pos_in,
if (fc->no_copy_file_range)
return -EOPNOTSUPP;
+ if (fc->writeback_cache) {
+ inode_lock(inode_in);
+ err = fuse_writeback_range(inode_in, pos_in, pos_in + len);
+ inode_unlock(inode_in);
+ if (err)
+ return err;
+ }
+
inode_lock(inode_out);
if (fc->writeback_cache) {
- err = filemap_write_and_wait_range(inode_out->i_mapping,
- pos_out, pos_out + len);
+ err = fuse_writeback_range(inode_out, pos_out, pos_out + len);
if (err)
goto out;
-
- fuse_sync_writes(inode_out);
}
if (is_unstable)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f0857c056df1..f1ebcb42cbf5 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -137,7 +137,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- BUG_ON(test_bit(GLF_REVOKES, &gl->gl_flags));
+ BUG_ON(atomic_read(&gl->gl_revokes));
rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
smp_mb();
wake_up_glock(gl);
@@ -1798,7 +1798,7 @@ void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
state2str(gl->gl_target),
state2str(gl->gl_demote_state), dtime,
atomic_read(&gl->gl_ail_count),
- test_bit(GLF_REVOKES, &gl->gl_flags) ? 1 : 0,
+ atomic_read(&gl->gl_revokes),
(int)gl->gl_lockref.count, gl->gl_hold_time);
list_for_each_entry(gh, &gl->gl_holders, gh_list)
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 8c4b334d1be4..c9af93ac6c73 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -342,7 +342,6 @@ enum {
GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15,
GLF_INODE_CREATING = 16, /* Inode creation occurring */
- GLF_REVOKES = 17, /* Glock has revokes in queue */
};
struct gfs2_glock {
@@ -372,6 +371,7 @@ struct gfs2_glock {
struct list_head gl_lru;
struct list_head gl_ail_list;
atomic_t gl_ail_count;
+ atomic_t gl_revokes;
struct delayed_work gl_work;
union {
/* For inode and iopen glocks only */
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 1360008c4ee9..c4c9700c366e 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -603,10 +603,8 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
gfs2_remove_from_ail(bd); /* drops ref on bh */
bd->bd_bh = NULL;
sdp->sd_log_num_revoke++;
- if (!test_bit(GLF_REVOKES, &gl->gl_flags)) {
- set_bit(GLF_REVOKES, &gl->gl_flags);
+ if (atomic_inc_return(&gl->gl_revokes) == 1)
gfs2_glock_hold(gl);
- }
set_bit(GLF_LFLUSH, &gl->gl_flags);
list_add(&bd->bd_list, &sdp->sd_log_revokes);
}
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index badb27ffc874..1921cda034fd 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -857,34 +857,19 @@ static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
{
struct list_head *head = &sdp->sd_log_revokes;
- struct gfs2_bufdata *bd, *tmp;
-
- /*
- * Glocks can be referenced repeatedly on the revoke list, but the list
- * only holds one reference. All glocks on the list will have the
- * GLF_REVOKES flag set initially.
- */
-
- list_for_each_entry_safe(bd, tmp, head, bd_list) {
- struct gfs2_glock *gl = bd->bd_gl;
+ struct gfs2_bufdata *bd;
+ struct gfs2_glock *gl;
- if (test_bit(GLF_REVOKES, &gl->gl_flags)) {
- /* Keep each glock on the list exactly once. */
- clear_bit(GLF_REVOKES, &gl->gl_flags);
- continue;
+ while (!list_empty(head)) {
+ bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
+ list_del_init(&bd->bd_list);
+ gl = bd->bd_gl;
+ if (atomic_dec_return(&gl->gl_revokes) == 0) {
+ clear_bit(GLF_LFLUSH, &gl->gl_flags);
+ gfs2_glock_queue_put(gl);
}
- list_del(&bd->bd_list);
- kmem_cache_free(gfs2_bufdata_cachep, bd);
- }
- list_for_each_entry_safe(bd, tmp, head, bd_list) {
- struct gfs2_glock *gl = bd->bd_gl;
-
- list_del(&bd->bd_list);
kmem_cache_free(gfs2_bufdata_cachep, bd);
- clear_bit(GLF_LFLUSH, &gl->gl_flags);
- gfs2_glock_queue_put(gl);
}
- /* the list is empty now */
}
static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 57761731be4f..a1a295b739fb 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -56,6 +56,7 @@ static void gfs2_init_glock_once(void *foo)
INIT_LIST_HEAD(&gl->gl_lru);
INIT_LIST_HEAD(&gl->gl_ail_list);
atomic_set(&gl->gl_ail_count, 0);
+ atomic_set(&gl->gl_revokes, 0);
}
static void gfs2_init_gl_aspace_once(void *foo)
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 2bac687d0ed9..b70cea5c8c59 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1474,7 +1474,7 @@ static void gfs2_final_release_pages(struct gfs2_inode *ip)
truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
truncate_inode_pages(&inode->i_data, 0);
- if (!test_bit(GLF_REVOKES, &gl->gl_flags)) {
+ if (atomic_read(&gl->gl_revokes) == 0) {
clear_bit(GLF_LFLUSH, &gl->gl_flags);
clear_bit(GLF_DIRTY, &gl->gl_flags);
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index c29cbef6b53f..e38f4af20950 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6932,7 +6932,6 @@ struct nfs4_lock_waiter {
struct task_struct *task;
struct inode *inode;
struct nfs_lowner *owner;
- bool notified;
};
static int
@@ -6954,13 +6953,13 @@ nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, vo
/* Make sure it's for the right inode */
if (nfs_compare_fh(NFS_FH(waiter->inode), &cbnl->cbnl_fh))
return 0;
-
- waiter->notified = true;
}
/* override "private" so we can use default_wake_function */
wait->private = waiter->task;
- ret = autoremove_wake_function(wait, mode, flags, key);
+ ret = woken_wake_function(wait, mode, flags, key);
+ if (ret)
+ list_del_init(&wait->entry);
wait->private = waiter;
return ret;
}
@@ -6969,7 +6968,6 @@ static int
nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
{
int status = -ERESTARTSYS;
- unsigned long flags;
struct nfs4_lock_state *lsp = request->fl_u.nfs4_fl.owner;
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs_client *clp = server->nfs_client;
@@ -6979,8 +6977,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
.s_dev = server->s_dev };
struct nfs4_lock_waiter waiter = { .task = current,
.inode = state->inode,
- .owner = &owner,
- .notified = false };
+ .owner = &owner};
wait_queue_entry_t wait;
/* Don't bother with waitqueue if we don't expect a callback */
@@ -6990,27 +6987,22 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
init_wait(&wait);
wait.private = &waiter;
wait.func = nfs4_wake_lock_waiter;
- add_wait_queue(q, &wait);
while(!signalled()) {
- waiter.notified = false;
+ add_wait_queue(q, &wait);
status = nfs4_proc_setlk(state, cmd, request);
- if ((status != -EAGAIN) || IS_SETLK(cmd))
+ if ((status != -EAGAIN) || IS_SETLK(cmd)) {
+ finish_wait(q, &wait);
break;
-
- status = -ERESTARTSYS;
- spin_lock_irqsave(&q->lock, flags);
- if (waiter.notified) {
- spin_unlock_irqrestore(&q->lock, flags);
- continue;
}
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irqrestore(&q->lock, flags);
- freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
+ status = -ERESTARTSYS;
+ freezer_do_not_count();
+ wait_woken(&wait, TASK_INTERRUPTIBLE, NFS4_LOCK_MAXTIMEOUT);
+ freezer_count();
+ finish_wait(q, &wait);
}
- finish_wait(q, &wait);
return status;
}
#else /* !CONFIG_NFS_V4_1 */
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 540a8b845145..340a6ad45914 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -426,7 +426,8 @@ static unsigned int ovl_get_inode_flags(struct inode *inode)
return ovl_iflags;
}
-static long ovl_ioctl_set_flags(struct file *file, unsigned long arg)
+static long ovl_ioctl_set_flags(struct file *file, unsigned int cmd,
+ unsigned long arg)
{
long ret;
struct inode *inode = file_inode(file);
@@ -456,7 +457,7 @@ static long ovl_ioctl_set_flags(struct file *file, unsigned long arg)
if (ret)
goto unlock;
- ret = ovl_real_ioctl(file, FS_IOC_SETFLAGS, arg);
+ ret = ovl_real_ioctl(file, cmd, arg);
ovl_copyflags(ovl_inode_real(inode), inode);
unlock:
@@ -474,11 +475,13 @@ static long ovl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch (cmd) {
case FS_IOC_GETFLAGS:
+ case FS_IOC_FSGETXATTR:
ret = ovl_real_ioctl(file, cmd, arg);
break;
case FS_IOC_SETFLAGS:
- ret = ovl_ioctl_set_flags(file, arg);
+ case FS_IOC_FSSETXATTR:
+ ret = ovl_ioctl_set_flags(file, cmd, arg);
break;
default:
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index b48273e846ad..f7eba21effa5 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -777,6 +777,54 @@ struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
return inode;
}
+bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir)
+{
+ struct inode *key = d_inode(dir);
+ struct inode *trap;
+ bool res;
+
+ trap = ilookup5(sb, (unsigned long) key, ovl_inode_test, key);
+ if (!trap)
+ return false;
+
+ res = IS_DEADDIR(trap) && !ovl_inode_upper(trap) &&
+ !ovl_inode_lower(trap);
+
+ iput(trap);
+ return res;
+}
+
+/*
+ * Create an inode cache entry for layer root dir, that will intentionally
+ * fail ovl_verify_inode(), so any lookup that will find some layer root
+ * will fail.
+ */
+struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir)
+{
+ struct inode *key = d_inode(dir);
+ struct inode *trap;
+
+ if (!d_is_dir(dir))
+ return ERR_PTR(-ENOTDIR);
+
+ trap = iget5_locked(sb, (unsigned long) key, ovl_inode_test,
+ ovl_inode_set, key);
+ if (!trap)
+ return ERR_PTR(-ENOMEM);
+
+ if (!(trap->i_state & I_NEW)) {
+ /* Conflicting layer roots? */
+ iput(trap);
+ return ERR_PTR(-ELOOP);
+ }
+
+ trap->i_mode = S_IFDIR;
+ trap->i_flags = S_DEAD;
+ unlock_new_inode(trap);
+
+ return trap;
+}
+
/*
* Does overlay inode need to be hashed by lower inode?
*/
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index efd372312ef1..badf039267a2 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -18,6 +18,7 @@
#include "overlayfs.h"
struct ovl_lookup_data {
+ struct super_block *sb;
struct qstr name;
bool is_dir;
bool opaque;
@@ -244,6 +245,12 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
if (!d->metacopy || d->last)
goto out;
} else {
+ if (ovl_lookup_trap_inode(d->sb, this)) {
+ /* Caught in a trap of overlapping layers */
+ err = -ELOOP;
+ goto out_err;
+ }
+
if (last_element)
d->is_dir = true;
if (d->last)
@@ -819,6 +826,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
int err;
bool metacopy = false;
struct ovl_lookup_data d = {
+ .sb = dentry->d_sb,
.name = dentry->d_name,
.is_dir = false,
.opaque = false,
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index d26efed9f80a..cec40077b522 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -270,6 +270,7 @@ void ovl_clear_flag(unsigned long flag, struct inode *inode);
bool ovl_test_flag(unsigned long flag, struct inode *inode);
bool ovl_inuse_trylock(struct dentry *dentry);
void ovl_inuse_unlock(struct dentry *dentry);
+bool ovl_is_inuse(struct dentry *dentry);
bool ovl_need_index(struct dentry *dentry);
int ovl_nlink_start(struct dentry *dentry);
void ovl_nlink_end(struct dentry *dentry);
@@ -376,6 +377,8 @@ struct ovl_inode_params {
struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev);
struct inode *ovl_lookup_inode(struct super_block *sb, struct dentry *real,
bool is_upper);
+bool ovl_lookup_trap_inode(struct super_block *sb, struct dentry *dir);
+struct inode *ovl_get_trap_inode(struct super_block *sb, struct dentry *dir);
struct inode *ovl_get_inode(struct super_block *sb,
struct ovl_inode_params *oip);
static inline void ovl_copyattr(struct inode *from, struct inode *to)
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index ec237035333a..6ed1ace8f8b3 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -29,6 +29,8 @@ struct ovl_sb {
struct ovl_layer {
struct vfsmount *mnt;
+ /* Trap in ovl inode cache */
+ struct inode *trap;
struct ovl_sb *fs;
/* Index of this layer in fs root (upper idx == 0) */
int idx;
@@ -65,6 +67,10 @@ struct ovl_fs {
/* Did we take the inuse lock? */
bool upperdir_locked;
bool workdir_locked;
+ /* Traps in ovl inode cache */
+ struct inode *upperdir_trap;
+ struct inode *workdir_trap;
+ struct inode *indexdir_trap;
/* Inode numbers in all layers do not use the high xino_bits */
unsigned int xino_bits;
};
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 5ec4fc2f5d7e..746ea36f3171 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -215,6 +215,9 @@ static void ovl_free_fs(struct ovl_fs *ofs)
{
unsigned i;
+ iput(ofs->indexdir_trap);
+ iput(ofs->workdir_trap);
+ iput(ofs->upperdir_trap);
dput(ofs->indexdir);
dput(ofs->workdir);
if (ofs->workdir_locked)
@@ -223,8 +226,10 @@ static void ovl_free_fs(struct ovl_fs *ofs)
if (ofs->upperdir_locked)
ovl_inuse_unlock(ofs->upper_mnt->mnt_root);
mntput(ofs->upper_mnt);
- for (i = 0; i < ofs->numlower; i++)
+ for (i = 0; i < ofs->numlower; i++) {
+ iput(ofs->lower_layers[i].trap);
mntput(ofs->lower_layers[i].mnt);
+ }
for (i = 0; i < ofs->numlowerfs; i++)
free_anon_bdev(ofs->lower_fs[i].pseudo_dev);
kfree(ofs->lower_layers);
@@ -983,7 +988,26 @@ static const struct xattr_handler *ovl_xattr_handlers[] = {
NULL
};
-static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
+static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
+ struct inode **ptrap, const char *name)
+{
+ struct inode *trap;
+ int err;
+
+ trap = ovl_get_trap_inode(sb, dir);
+ err = PTR_ERR(trap);
+ if (IS_ERR(trap)) {
+ if (err == -ELOOP)
+ pr_err("overlayfs: conflicting %s path\n", name);
+ return err;
+ }
+
+ *ptrap = trap;
+ return 0;
+}
+
+static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs,
+ struct path *upperpath)
{
struct vfsmount *upper_mnt;
int err;
@@ -1003,6 +1027,11 @@ static int ovl_get_upper(struct ovl_fs *ofs, struct path *upperpath)
if (err)
goto out;
+ err = ovl_setup_trap(sb, upperpath->dentry, &ofs->upperdir_trap,
+ "upperdir");
+ if (err)
+ goto out;
+
upper_mnt = clone_private_mount(upperpath);
err = PTR_ERR(upper_mnt);
if (IS_ERR(upper_mnt)) {
@@ -1029,7 +1058,8 @@ out:
return err;
}
-static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
+static int ovl_make_workdir(struct super_block *sb, struct ovl_fs *ofs,
+ struct path *workpath)
{
struct vfsmount *mnt = ofs->upper_mnt;
struct dentry *temp;
@@ -1044,6 +1074,10 @@ static int ovl_make_workdir(struct ovl_fs *ofs, struct path *workpath)
if (!ofs->workdir)
goto out;
+ err = ovl_setup_trap(sb, ofs->workdir, &ofs->workdir_trap, "workdir");
+ if (err)
+ goto out;
+
/*
* Upper should support d_type, else whiteouts are visible. Given
* workdir and upper are on same fs, we can do iterate_dir() on
@@ -1104,7 +1138,8 @@ out:
return err;
}
-static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
+static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs,
+ struct path *upperpath)
{
int err;
struct path workpath = { };
@@ -1135,19 +1170,16 @@ static int ovl_get_workdir(struct ovl_fs *ofs, struct path *upperpath)
pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n");
}
- err = ovl_make_workdir(ofs, &workpath);
- if (err)
- goto out;
+ err = ovl_make_workdir(sb, ofs, &workpath);
- err = 0;
out:
path_put(&workpath);
return err;
}
-static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe,
- struct path *upperpath)
+static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
+ struct ovl_entry *oe, struct path *upperpath)
{
struct vfsmount *mnt = ofs->upper_mnt;
int err;
@@ -1166,6 +1198,11 @@ static int ovl_get_indexdir(struct ovl_fs *ofs, struct ovl_entry *oe,
ofs->indexdir = ovl_workdir_create(ofs, OVL_INDEXDIR_NAME, true);
if (ofs->indexdir) {
+ err = ovl_setup_trap(sb, ofs->indexdir, &ofs->indexdir_trap,
+ "indexdir");
+ if (err)
+ goto out;
+
/*
* Verify upper root is exclusively associated with index dir.
* Older kernels stored upper fh in "trusted.overlay.origin"
@@ -1253,8 +1290,8 @@ static int ovl_get_fsid(struct ovl_fs *ofs, const struct path *path)
return ofs->numlowerfs;
}
-static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
- unsigned int numlower)
+static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs,
+ struct path *stack, unsigned int numlower)
{
int err;
unsigned int i;
@@ -1272,16 +1309,28 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
for (i = 0; i < numlower; i++) {
struct vfsmount *mnt;
+ struct inode *trap;
int fsid;
err = fsid = ovl_get_fsid(ofs, &stack[i]);
if (err < 0)
goto out;
+ err = -EBUSY;
+ if (ovl_is_inuse(stack[i].dentry)) {
+ pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n");
+ goto out;
+ }
+
+ err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir");
+ if (err)
+ goto out;
+
mnt = clone_private_mount(&stack[i]);
err = PTR_ERR(mnt);
if (IS_ERR(mnt)) {
pr_err("overlayfs: failed to clone lowerpath\n");
+ iput(trap);
goto out;
}
@@ -1291,6 +1340,7 @@ static int ovl_get_lower_layers(struct ovl_fs *ofs, struct path *stack,
*/
mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
+ ofs->lower_layers[ofs->numlower].trap = trap;
ofs->lower_layers[ofs->numlower].mnt = mnt;
ofs->lower_layers[ofs->numlower].idx = i + 1;
ofs->lower_layers[ofs->numlower].fsid = fsid;
@@ -1385,7 +1435,7 @@ static struct ovl_entry *ovl_get_lowerstack(struct super_block *sb,
goto out_err;
}
- err = ovl_get_lower_layers(ofs, stack, numlower);
+ err = ovl_get_lower_layers(sb, ofs, stack, numlower);
if (err)
goto out_err;
@@ -1417,6 +1467,85 @@ out_err:
goto out;
}
+/*
+ * Check if this layer root is a descendant of:
+ * - another layer of this overlayfs instance
+ * - upper/work dir of any overlayfs instance
+ * - a disconnected dentry (detached root)
+ */
+static int ovl_check_layer(struct super_block *sb, struct dentry *dentry,
+ const char *name)
+{
+ struct dentry *next, *parent;
+ bool is_root = false;
+ int err = 0;
+
+ if (!dentry || dentry == dentry->d_sb->s_root)
+ return 0;
+
+ next = dget(dentry);
+ /* Walk back ancestors to fs root (inclusive) looking for traps */
+ do {
+ parent = dget_parent(next);
+ is_root = (parent == next);
+ if (ovl_is_inuse(parent)) {
+ err = -EBUSY;
+ pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n",
+ name);
+ } else if (ovl_lookup_trap_inode(sb, parent)) {
+ err = -ELOOP;
+ pr_err("overlayfs: overlapping %s path\n", name);
+ }
+ dput(next);
+ next = parent;
+ } while (!err && !is_root);
+
+ /* Did we really walk to fs root or found a detached root? */
+ if (!err && next != dentry->d_sb->s_root) {
+ err = -ESTALE;
+ pr_err("overlayfs: disconnected %s path\n", name);
+ }
+
+ dput(next);
+
+ return err;
+}
+
+/*
+ * Check if any of the layers or work dirs overlap.
+ */
+static int ovl_check_overlapping_layers(struct super_block *sb,
+ struct ovl_fs *ofs)
+{
+ int i, err;
+
+ if (ofs->upper_mnt) {
+ err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir");
+ if (err)
+ return err;
+
+ /*
+ * Checking workbasedir avoids hitting ovl_is_inuse(parent) of
+ * this instance and covers overlapping work and index dirs,
+ * unless work or index dir have been moved since created inside
+ * workbasedir. In that case, we already have their traps in
+ * inode cache and we will catch that case on lookup.
+ */
+ err = ovl_check_layer(sb, ofs->workbasedir, "workdir");
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ofs->numlower; i++) {
+ err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root,
+ "lowerdir");
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int ovl_fill_super(struct super_block *sb, void *data, int silent)
{
struct path upperpath = { };
@@ -1456,17 +1585,20 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (ofs->config.xino != OVL_XINO_OFF)
ofs->xino_bits = BITS_PER_LONG - 32;
+ /* alloc/destroy_inode needed for setting up traps in inode cache */
+ sb->s_op = &ovl_super_operations;
+
if (ofs->config.upperdir) {
if (!ofs->config.workdir) {
pr_err("overlayfs: missing 'workdir'\n");
goto out_err;
}
- err = ovl_get_upper(ofs, &upperpath);
+ err = ovl_get_upper(sb, ofs, &upperpath);
if (err)
goto out_err;
- err = ovl_get_workdir(ofs, &upperpath);
+ err = ovl_get_workdir(sb, ofs, &upperpath);
if (err)
goto out_err;
@@ -1487,7 +1619,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= SB_RDONLY;
if (!(ovl_force_readonly(ofs)) && ofs->config.index) {
- err = ovl_get_indexdir(ofs, oe, &upperpath);
+ err = ovl_get_indexdir(sb, ofs, oe, &upperpath);
if (err)
goto out_free_oe;
@@ -1500,6 +1632,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
}
+ err = ovl_check_overlapping_layers(sb, ofs);
+ if (err)
+ goto out_free_oe;
+
/* Show index=off in /proc/mounts for forced r/o mount */
if (!ofs->indexdir) {
ofs->config.index = false;
@@ -1521,7 +1657,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
cap_lower(cred->cap_effective, CAP_SYS_RESOURCE);
sb->s_magic = OVERLAYFS_SUPER_MAGIC;
- sb->s_op = &ovl_super_operations;
sb->s_xattr = ovl_xattr_handlers;
sb->s_fs_info = ofs;
sb->s_flags |= SB_POSIXACL;
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 4035e640f402..e135064e87ad 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -652,6 +652,18 @@ void ovl_inuse_unlock(struct dentry *dentry)
}
}
+bool ovl_is_inuse(struct dentry *dentry)
+{
+ struct inode *inode = d_inode(dentry);
+ bool inuse;
+
+ spin_lock(&inode->i_lock);
+ inuse = (inode->i_state & I_OVL_INUSE);
+ spin_unlock(&inode->i_lock);
+
+ return inuse;
+}
+
/*
* Does this overlay dentry need to be indexed on copy up?
*/
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 9f83686db0be..3d7024662d29 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -335,8 +335,10 @@ static void allocate_buf_for_compression(void)
static void free_buf_for_compression(void)
{
- if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
+ if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm) {
crypto_free_comp(tfm);
+ tfm = NULL;
+ }
kfree(big_oops_buf);
big_oops_buf = NULL;
big_oops_buf_sz = 0;
@@ -594,7 +596,8 @@ int pstore_register(struct pstore_info *psi)
return -EINVAL;
}
- allocate_buf_for_compression();
+ if (psi->flags & PSTORE_FLAGS_DMESG)
+ allocate_buf_for_compression();
if (pstore_is_mounted())
pstore_get_records(0);
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index ade964df5a68..5b7709894415 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -786,26 +786,36 @@ static int ramoops_probe(struct platform_device *pdev)
cxt->pstore.data = cxt;
/*
- * Since bufsize is only used for dmesg crash dumps, it
- * must match the size of the dprz record (after PRZ header
- * and ECC bytes have been accounted for).
+ * Prepare frontend flags based on which areas are initialized.
+ * For ramoops_init_przs() cases, the "max count" variable tells
+ * if there are regions present. For ramoops_init_prz() cases,
+ * the single region size is how to check.
*/
- cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
- cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
- if (!cxt->pstore.buf) {
- pr_err("cannot allocate pstore crash dump buffer\n");
- err = -ENOMEM;
- goto fail_clear;
- }
-
- cxt->pstore.flags = PSTORE_FLAGS_DMESG;
+ cxt->pstore.flags = 0;
+ if (cxt->max_dump_cnt)
+ cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
if (cxt->console_size)
cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
- if (cxt->ftrace_size)
+ if (cxt->max_ftrace_cnt)
cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
if (cxt->pmsg_size)
cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
+ /*
+ * Since bufsize is only used for dmesg crash dumps, it
+ * must match the size of the dprz record (after PRZ header
+ * and ECC bytes have been accounted for).
+ */
+ if (cxt->pstore.flags & PSTORE_FLAGS_DMESG) {
+ cxt->pstore.bufsize = cxt->dprzs[0]->buffer_size;
+ cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
+ if (!cxt->pstore.buf) {
+ pr_err("cannot allocate pstore crash dump buffer\n");
+ err = -ENOMEM;
+ goto fail_clear;
+ }
+ }
+
err = pstore_register(&cxt->pstore);
if (err) {
pr_err("registering with pstore failed\n");
diff --git a/fs/xfs/scrub/ialloc.c b/fs/xfs/scrub/ialloc.c
index 693eb51f5efb..9b47117180cb 100644
--- a/fs/xfs/scrub/ialloc.c
+++ b/fs/xfs/scrub/ialloc.c
@@ -252,7 +252,8 @@ xchk_iallocbt_check_cluster(
ir_holemask = (irec->ir_holemask & cluster_mask);
imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
- imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino);
+ imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino) <<
+ mp->m_sb.sb_inodelog;
if (imap.im_boffset != 0 && cluster_base != 0) {
ASSERT(imap.im_boffset == 0 || cluster_base == 0);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 457ced3ee3e1..2466b0f5b6c4 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -2069,7 +2069,7 @@ xlog_print_tic_res(
/* match with XLOG_REG_TYPE_* in xfs_log.h */
#define REG_TYPE_STR(type, str) [XLOG_REG_TYPE_##type] = str
- static char *res_type_str[XLOG_REG_TYPE_MAX + 1] = {
+ static char *res_type_str[] = {
REG_TYPE_STR(BFORMAT, "bformat"),
REG_TYPE_STR(BCHUNK, "bchunk"),
REG_TYPE_STR(EFI_FORMAT, "efi_format"),
@@ -2089,8 +2089,15 @@ xlog_print_tic_res(
REG_TYPE_STR(UNMOUNT, "unmount"),
REG_TYPE_STR(COMMIT, "commit"),
REG_TYPE_STR(TRANSHDR, "trans header"),
- REG_TYPE_STR(ICREATE, "inode create")
+ REG_TYPE_STR(ICREATE, "inode create"),
+ REG_TYPE_STR(RUI_FORMAT, "rui_format"),
+ REG_TYPE_STR(RUD_FORMAT, "rud_format"),
+ REG_TYPE_STR(CUI_FORMAT, "cui_format"),
+ REG_TYPE_STR(CUD_FORMAT, "cud_format"),
+ REG_TYPE_STR(BUI_FORMAT, "bui_format"),
+ REG_TYPE_STR(BUD_FORMAT, "bud_format"),
};
+ BUILD_BUG_ON(ARRAY_SIZE(res_type_str) != XLOG_REG_TYPE_MAX + 1);
#undef REG_TYPE_STR
xfs_warn(mp, "ticket reservation summary:");
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index f9c94c2a1364..f7bbd0b0ecd1 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1185,6 +1185,14 @@ struct drm_plane_helper_funcs {
* current one with the new plane configurations in the new
* plane_state.
*
+ * Drivers should also swap the framebuffers between current plane
+ * state (&drm_plane.state) and new_state.
+ * This is required since cleanup for async commits is performed on
+ * the new state, rather than old state like for traditional commits.
+ * Since we want to give up the reference on the current (old) fb
+ * instead of our brand new one, swap them in the driver during the
+ * async commit.
+ *
* FIXME:
* - It only works for single plane updates
* - Async Pageflips are not supported yet
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 11e215d7937e..d71b079bb021 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -106,6 +106,8 @@ enum {
CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */
CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */
+ CFTYPE_SYMLINKED = (1 << 6), /* pointed to by symlink too */
+
/* internal flags, do not use outside cgroup core proper */
__CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */
__CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */
@@ -543,6 +545,7 @@ struct cftype {
* end of cftype array.
*/
char name[MAX_CFTYPE_NAME];
+ char link_name[MAX_CFTYPE_NAME];
unsigned long private;
/*
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 3813fe45effd..fcb1386bb0d4 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -201,10 +201,14 @@ enum cpuhp_smt_control {
extern enum cpuhp_smt_control cpu_smt_control;
extern void cpu_smt_disable(bool force);
extern void cpu_smt_check_topology(void);
+extern int cpuhp_smt_enable(void);
+extern int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval);
#else
# define cpu_smt_control (CPU_SMT_NOT_IMPLEMENTED)
static inline void cpu_smt_disable(bool force) { }
static inline void cpu_smt_check_topology(void) { }
+static inline int cpuhp_smt_enable(void) { return 0; }
+static inline int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) { return 0; }
#endif
/*
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index 603a02e5a8cb..e46e18c47d41 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -20,18 +20,6 @@
#define SJA1105_LINKLOCAL_FILTER_B 0x011B19000000ull
#define SJA1105_LINKLOCAL_FILTER_B_MASK 0xFFFFFF000000ull
-enum sja1105_frame_type {
- SJA1105_FRAME_TYPE_NORMAL = 0,
- SJA1105_FRAME_TYPE_LINK_LOCAL,
-};
-
-struct sja1105_skb_cb {
- enum sja1105_frame_type type;
-};
-
-#define SJA1105_SKB_CB(skb) \
- ((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
-
struct sja1105_port {
struct dsa_port *dp;
int mgmt_slot;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0e8834ac32b7..dd0b5f4e1e45 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -99,6 +99,17 @@ extern int mmap_rnd_compat_bits __read_mostly;
#include <asm/pgtable.h>
#include <asm/processor.h>
+/*
+ * Architectures that support memory tagging (assigning tags to memory regions,
+ * embedding these tags into addresses that point to these memory regions, and
+ * checking that the memory and the pointer tags match on memory accesses)
+ * redefine this macro to strip tags from pointers.
+ * It's defined as noop for arcitectures that don't support memory tagging.
+ */
+#ifndef untagged_addr
+#define untagged_addr(addr) (addr)
+#endif
+
#ifndef __pa_symbol
#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 922bb6848813..b25d20822e75 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -56,14 +56,12 @@ void __rcu_read_unlock(void);
static inline void __rcu_read_lock(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
- preempt_disable();
+ preempt_disable();
}
static inline void __rcu_read_unlock(void)
{
- if (IS_ENABLED(CONFIG_PREEMPT_COUNT))
- preempt_enable();
+ preempt_enable();
}
static inline int rcu_preempt_depth(void)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 4a2ffd678887..8594001e8be8 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -227,11 +227,42 @@ static inline void pm_set_resume_via_firmware(void)
pm_suspend_global_flags |= PM_SUSPEND_FLAG_FW_RESUME;
}
+/**
+ * pm_suspend_via_firmware - Check if platform firmware will suspend the system.
+ *
+ * To be called during system-wide power management transitions to sleep states
+ * or during the subsequent system-wide transitions back to the working state.
+ *
+ * Return 'true' if the platform firmware is going to be invoked at the end of
+ * the system-wide power management transition (to a sleep state) in progress in
+ * order to complete it, or if the platform firmware has been invoked in order
+ * to complete the last (or preceding) transition of the system to a sleep
+ * state.
+ *
+ * This matters if the caller needs or wants to carry out some special actions
+ * depending on whether or not control will be passed to the platform firmware
+ * subsequently (for example, the device may need to be reset before letting the
+ * platform firmware manipulate it, which is not necessary when the platform
+ * firmware is not going to be invoked) or when such special actions may have
+ * been carried out during the preceding transition of the system to a sleep
+ * state (as they may need to be taken into account).
+ */
static inline bool pm_suspend_via_firmware(void)
{
return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_SUSPEND);
}
+/**
+ * pm_resume_via_firmware - Check if platform firmware has woken up the system.
+ *
+ * To be called during system-wide power management transitions from sleep
+ * states.
+ *
+ * Return 'true' if the platform firmware has passed control to the kernel at
+ * the beginning of the system-wide power management transition in progress, so
+ * the event that woke up the system from sleep has been handled by the platform
+ * firmware.
+ */
static inline bool pm_resume_via_firmware(void)
{
return !!(pm_suspend_global_flags & PM_SUSPEND_FLAG_FW_RESUME);
diff --git a/include/math-emu/op-2.h b/include/math-emu/op-2.h
index 13a374f51a22..244522b02076 100644
--- a/include/math-emu/op-2.h
+++ b/include/math-emu/op-2.h
@@ -567,16 +567,13 @@
*/
#define _FP_FRAC_ASSEMBLE_2(r, X, rsize) \
- do { \
- if (rsize <= _FP_W_TYPE_SIZE) \
- r = X##_f0; \
- else \
- { \
- r = X##_f1; \
- r <<= _FP_W_TYPE_SIZE; \
- r += X##_f0; \
- } \
- } while (0)
+ (void) (((rsize) <= _FP_W_TYPE_SIZE) \
+ ? ({ (r) = X##_f0; }) \
+ : ({ \
+ (r) = X##_f1; \
+ (r) <<= _FP_W_TYPE_SIZE; \
+ (r) += X##_f0; \
+ }))
#define _FP_FRAC_DISASSEMBLE_2(X, r, rsize) \
do { \
diff --git a/include/math-emu/op-common.h b/include/math-emu/op-common.h
index 6bdf8c61d221..f37d12877754 100644
--- a/include/math-emu/op-common.h
+++ b/include/math-emu/op-common.h
@@ -795,11 +795,12 @@ do { \
ur_ = (unsigned rtype) -r; \
else \
ur_ = (unsigned rtype) r; \
- if (rsize <= _FP_W_TYPE_SIZE) \
- __FP_CLZ(X##_e, ur_); \
- else \
- __FP_CLZ_2(X##_e, (_FP_W_TYPE)(ur_ >> _FP_W_TYPE_SIZE), \
- (_FP_W_TYPE)ur_); \
+ (void) (((rsize) <= _FP_W_TYPE_SIZE) \
+ ? ({ __FP_CLZ(X##_e, ur_); }) \
+ : ({ \
+ __FP_CLZ_2(X##_e, (_FP_W_TYPE)(ur_ >> _FP_W_TYPE_SIZE), \
+ (_FP_W_TYPE)ur_); \
+ })); \
if (rsize < _FP_W_TYPE_SIZE) \
X##_e -= (_FP_W_TYPE_SIZE - rsize); \
X##_e = rsize - X##_e - 1; \
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 3fbc9894a39a..855b352b660f 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -259,8 +259,7 @@ static inline u32 rt6_get_cookie(const struct rt6_info *rt)
rcu_read_lock();
from = rcu_dereference(rt->from);
- if (from && (rt->rt6i_flags & RTF_PCPU ||
- unlikely(!list_empty(&rt->rt6i_uncached))))
+ if (from)
fib6_get_cookie_safe(from, &cookie);
rcu_read_unlock();
diff --git a/include/net/tls.h b/include/net/tls.h
index 39ea62f0c1f6..4a55ce6a303f 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -209,6 +209,10 @@ struct tls_offload_context_tx {
(ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
+enum tls_context_flags {
+ TLS_RX_SYNC_RUNNING = 0,
+};
+
struct cipher_context {
char *iv;
char *rec_seq;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0742095355f2..54873085f2da 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2698,6 +2698,7 @@ struct ib_client {
const char *name;
void (*add) (struct ib_device *);
void (*remove)(struct ib_device *, void *client_data);
+ void (*rename)(struct ib_device *dev, void *client_data);
/* Returns the net_dev belonging to this ib_client and matching the
* given parameters.
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 19fb55e3c73e..2971d29a42e4 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -130,6 +130,9 @@
* 7.30
* - add FUSE_EXPLICIT_INVAL_DATA
* - add FUSE_IOCTL_COMPAT_X32
+ *
+ * 7.31
+ * - add FUSE_WRITE_KILL_PRIV flag
*/
#ifndef _LINUX_FUSE_H
@@ -165,7 +168,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 30
+#define FUSE_KERNEL_MINOR_VERSION 31
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -327,9 +330,11 @@ struct fuse_file_lock {
*
* FUSE_WRITE_CACHE: delayed write from page cache, file handle is guessed
* FUSE_WRITE_LOCKOWNER: lock_owner field is valid
+ * FUSE_WRITE_KILL_PRIV: kill suid and sgid bits
*/
#define FUSE_WRITE_CACHE (1 << 0)
#define FUSE_WRITE_LOCKOWNER (1 << 1)
+#define FUSE_WRITE_KILL_PRIV (1 << 2)
/**
* Read flags
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 8ac292cf4d00..204ab9b4ae67 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -413,6 +413,10 @@ struct hl_debug_params_spmu {
#define HL_DEBUG_OP_SPMU 5
/* Opcode for timestamp */
#define HL_DEBUG_OP_TIMESTAMP 6
+/* Opcode for setting the device into or out of debug mode. The enable
+ * variable should be 1 for enabling debug mode and 0 for disabling it
+ */
+#define HL_DEBUG_OP_SET_MODE 7
struct hl_debug_args {
/*
@@ -574,8 +578,22 @@ struct hl_debug_args {
*
* This IOCTL allows the user to get debug traces from the chip.
*
- * The user needs to provide the register index and essential data such as
- * buffer address and size.
+ * Before the user can send configuration requests of the various
+ * debug/profile engines, it needs to set the device into debug mode.
+ * This is because the debug/profile infrastructure is shared component in the
+ * device and we can't allow multiple users to access it at the same time.
+ *
+ * Once a user set the device into debug mode, the driver won't allow other
+ * users to "work" with the device, i.e. open a FD. If there are multiple users
+ * opened on the device, the driver won't allow any user to debug the device.
+ *
+ * For each configuration request, the user needs to provide the register index
+ * and essential data such as buffer address and size.
+ *
+ * Once the user has finished using the debug/profile engines, he should
+ * set the device into non-debug mode, i.e. disable debug mode.
+ *
+ * The driver can decide to "kick out" the user if he abuses this interface.
*
*/
#define HL_IOCTL_DEBUG \
diff --git a/init/Kconfig b/init/Kconfig
index 36894c9fb420..0e2344389501 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -580,15 +580,14 @@ config IKCONFIG_PROC
This option enables access to the kernel configuration file
through /proc/config.gz.
-config IKHEADERS_PROC
- tristate "Enable kernel header artifacts through /proc/kheaders.tar.xz"
- depends on PROC_FS
- help
- This option enables access to the kernel header and other artifacts that
- are generated during the build process. These can be used to build eBPF
- tracing programs, or similar programs. If you build the headers as a
- module, a module called kheaders.ko is built which can be loaded on-demand
- to get access to the headers.
+config IKHEADERS
+ tristate "Enable kernel headers through /sys/kernel/kheaders.tar.xz"
+ depends on SYSFS
+ help
+ This option enables access to the in-kernel headers that are generated during
+ the build process. These can be used to build eBPF tracing programs,
+ or similar programs. If you build the headers as a module, a module called
+ kheaders.ko is built which can be loaded on-demand to get access to headers.
config LOG_BUF_SHIFT
int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
diff --git a/kernel/Makefile b/kernel/Makefile
index 33824f0385b3..a8d923b5481b 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -71,7 +71,7 @@ obj-$(CONFIG_UTS_NS) += utsname.o
obj-$(CONFIG_USER_NS) += user_namespace.o
obj-$(CONFIG_PID_NS) += pid_namespace.o
obj-$(CONFIG_IKCONFIG) += configs.o
-obj-$(CONFIG_IKHEADERS_PROC) += kheaders.o
+obj-$(CONFIG_IKHEADERS) += kheaders.o
obj-$(CONFIG_SMP) += stop_machine.o
obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
obj-$(CONFIG_AUDIT) += audit.o auditfilter.o
@@ -127,7 +127,7 @@ $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
$(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz
-cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_ikh_data.sh $@
+cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_kheaders.sh $@
$(obj)/kheaders_data.tar.xz: FORCE
$(call cmd,genikh)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 426a0026225c..155048b0eca2 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -1460,8 +1460,8 @@ struct cgroup *task_cgroup_from_root(struct task_struct *task,
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
-static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
- char *buf)
+static char *cgroup_fill_name(struct cgroup *cgrp, const struct cftype *cft,
+ char *buf, bool write_link_name)
{
struct cgroup_subsys *ss = cft->ss;
@@ -1471,13 +1471,26 @@ static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
snprintf(buf, CGROUP_FILE_NAME_MAX, "%s%s.%s",
dbg, cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
- cft->name);
+ write_link_name ? cft->link_name : cft->name);
} else {
- strscpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
+ strscpy(buf, write_link_name ? cft->link_name : cft->name,
+ CGROUP_FILE_NAME_MAX);
}
return buf;
}
+static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
+ char *buf)
+{
+ return cgroup_fill_name(cgrp, cft, buf, false);
+}
+
+static char *cgroup_link_name(struct cgroup *cgrp, const struct cftype *cft,
+ char *buf)
+{
+ return cgroup_fill_name(cgrp, cft, buf, true);
+}
+
/**
* cgroup_file_mode - deduce file mode of a control file
* @cft: the control file in question
@@ -1636,6 +1649,9 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
}
kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
+ if (cft->flags & CFTYPE_SYMLINKED)
+ kernfs_remove_by_name(cgrp->kn,
+ cgroup_link_name(cgrp, cft, name));
}
/**
@@ -3821,6 +3837,7 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
{
char name[CGROUP_FILE_NAME_MAX];
struct kernfs_node *kn;
+ struct kernfs_node *kn_link;
struct lock_class_key *key = NULL;
int ret;
@@ -3851,6 +3868,14 @@ static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
spin_unlock_irq(&cgroup_file_kn_lock);
}
+ if (cft->flags & CFTYPE_SYMLINKED) {
+ kn_link = kernfs_create_link(cgrp->kn,
+ cgroup_link_name(cgrp, cft, name),
+ kn);
+ if (IS_ERR(kn_link))
+ return PTR_ERR(kn_link);
+ }
+
return 0;
}
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f2ef10460698..077fde6fb953 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -2061,7 +2061,7 @@ static void cpuhp_online_cpu_device(unsigned int cpu)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
}
-static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
+int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
{
int cpu, ret = 0;
@@ -2093,7 +2093,7 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
return ret;
}
-static int cpuhp_smt_enable(void)
+int cpuhp_smt_enable(void)
{
int cpu, ret = 0;
diff --git a/kernel/gen_ikh_data.sh b/kernel/gen_kheaders.sh
index 591a94f7b387..9a34e1d9bd7f 100755
--- a/kernel/gen_ikh_data.sh
+++ b/kernel/gen_kheaders.sh
@@ -2,7 +2,7 @@
# SPDX-License-Identifier: GPL-2.0
# This script generates an archive consisting of kernel headers
-# for CONFIG_IKHEADERS_PROC.
+# for CONFIG_IKHEADERS.
set -e
spath="$(dirname "$(readlink -f "$0")")"
kroot="$spath/.."
@@ -31,9 +31,8 @@ arch/$SRCARCH/include/
# This block is useful for debugging the incremental builds.
# Uncomment it for debugging.
-# iter=1
-# if [ ! -f /tmp/iter ]; then echo 1 > /tmp/iter;
-# else; iter=$(($(cat /tmp/iter) + 1)); fi
+# if [ ! -f /tmp/iter ]; then iter=1; echo 1 > /tmp/iter;
+# else iter=$(($(cat /tmp/iter) + 1)); echo $iter > /tmp/iter; fi
# find $src_file_list -type f | xargs ls -lR > /tmp/src-ls-$iter
# find $obj_file_list -type f | xargs ls -lR > /tmp/obj-ls-$iter
@@ -43,10 +42,18 @@ arch/$SRCARCH/include/
pushd $kroot > /dev/null
src_files_md5="$(find $src_file_list -type f |
grep -v "include/generated/compile.h" |
+ grep -v "include/generated/autoconf.h" |
+ grep -v "include/config/auto.conf" |
+ grep -v "include/config/auto.conf.cmd" |
+ grep -v "include/config/tristate.conf" |
xargs ls -lR | md5sum | cut -d ' ' -f1)"
popd > /dev/null
obj_files_md5="$(find $obj_file_list -type f |
grep -v "include/generated/compile.h" |
+ grep -v "include/generated/autoconf.h" |
+ grep -v "include/config/auto.conf" |
+ grep -v "include/config/auto.conf.cmd" |
+ grep -v "include/config/tristate.conf" |
xargs ls -lR | md5sum | cut -d ' ' -f1)"
if [ -f $tarfile ]; then tarfile_md5="$(md5sum $tarfile | cut -d ' ' -f1)"; fi
@@ -82,7 +89,7 @@ find $cpio_dir -type f -print0 |
tar -Jcf $tarfile -C $cpio_dir/ . > /dev/null
-echo "$src_files_md5" > kernel/kheaders.md5
+echo "$src_files_md5" > kernel/kheaders.md5
echo "$obj_files_md5" >> kernel/kheaders.md5
echo "$(md5sum $tarfile | cut -d ' ' -f1)" >> kernel/kheaders.md5
diff --git a/kernel/kheaders.c b/kernel/kheaders.c
index 70ae6052920d..8f69772af77b 100644
--- a/kernel/kheaders.c
+++ b/kernel/kheaders.c
@@ -8,9 +8,8 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/proc_fs.h>
+#include <linux/kobject.h>
#include <linux/init.h>
-#include <linux/uaccess.h>
/*
* Define kernel_headers_data and kernel_headers_data_end, within which the
@@ -31,39 +30,32 @@ extern char kernel_headers_data;
extern char kernel_headers_data_end;
static ssize_t
-ikheaders_read_current(struct file *file, char __user *buf,
- size_t len, loff_t *offset)
+ikheaders_read(struct file *file, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t len)
{
- return simple_read_from_buffer(buf, len, offset,
- &kernel_headers_data,
- &kernel_headers_data_end -
- &kernel_headers_data);
+ memcpy(buf, &kernel_headers_data + off, len);
+ return len;
}
-static const struct file_operations ikheaders_file_ops = {
- .read = ikheaders_read_current,
- .llseek = default_llseek,
+static struct bin_attribute kheaders_attr __ro_after_init = {
+ .attr = {
+ .name = "kheaders.tar.xz",
+ .mode = 0444,
+ },
+ .read = &ikheaders_read,
};
static int __init ikheaders_init(void)
{
- struct proc_dir_entry *entry;
-
- /* create the current headers file */
- entry = proc_create("kheaders.tar.xz", S_IRUGO, NULL,
- &ikheaders_file_ops);
- if (!entry)
- return -ENOMEM;
-
- proc_set_size(entry,
- &kernel_headers_data_end -
- &kernel_headers_data);
- return 0;
+ kheaders_attr.size = (&kernel_headers_data_end -
+ &kernel_headers_data);
+ return sysfs_create_bin_file(kernel_kobj, &kheaders_attr);
}
static void __exit ikheaders_cleanup(void)
{
- remove_proc_entry("kheaders.tar.xz", NULL);
+ sysfs_remove_bin_file(kernel_kobj, &kheaders_attr);
}
module_init(ikheaders_init);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 8fc054e5c501..cd7434e6000d 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -256,6 +256,11 @@ void swsusp_show_speed(ktime_t start, ktime_t stop,
(kps % 1000) / 10);
}
+__weak int arch_resume_nosmt(void)
+{
+ return 0;
+}
+
/**
* create_image - Create a hibernation image.
* @platform_mode: Whether or not to use the platform driver.
@@ -323,6 +328,10 @@ static int create_image(int platform_mode)
Enable_cpus:
suspend_enable_secondary_cpus();
+ /* Allow architectures to do nosmt-specific post-resume dances */
+ if (!in_suspend)
+ error = arch_resume_nosmt();
+
Platform_finish:
platform_finish(platform_mode);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 3c57e206f3e8..9505101ed2bc 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -61,6 +61,12 @@ static DECLARE_SWAIT_QUEUE_HEAD(s2idle_wait_head);
enum s2idle_states __read_mostly s2idle_state;
static DEFINE_RAW_SPINLOCK(s2idle_lock);
+/**
+ * pm_suspend_via_s2idle - Check if suspend-to-idle is the default suspend.
+ *
+ * Return 'true' if suspend-to-idle has been selected as the default system
+ * suspend method.
+ */
bool pm_suspend_via_s2idle(void)
{
return mem_sleep_current == PM_SUSPEND_TO_IDLE;
diff --git a/kernel/signal.c b/kernel/signal.c
index 328a01e1a2f0..d622eac9d169 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3621,12 +3621,11 @@ static struct pid *pidfd_to_pid(const struct file *file)
}
/**
- * sys_pidfd_send_signal - send a signal to a process through a task file
- * descriptor
- * @pidfd: the file descriptor of the process
- * @sig: signal to be sent
- * @info: the signal info
- * @flags: future flags to be passed
+ * sys_pidfd_send_signal - Signal a process through a pidfd
+ * @pidfd: file descriptor of the process
+ * @sig: signal to send
+ * @info: signal info
+ * @flags: future flags
*
* The syscall currently only signals via PIDTYPE_PID which covers
* kill(<positive-pid>, <signal>. It does not signal threads or process
diff --git a/lib/lockref.c b/lib/lockref.c
index 3d468b53d4c9..5b34bbd3eba8 100644
--- a/lib/lockref.c
+++ b/lib/lockref.c
@@ -9,6 +9,7 @@
* failure case.
*/
#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
+ int retry = 100; \
struct lockref old; \
BUILD_BUG_ON(sizeof(old) != 8); \
old.lock_count = READ_ONCE(lockref->lock_count); \
@@ -21,6 +22,8 @@
if (likely(old.lock_count == prev.lock_count)) { \
SUCCESS; \
} \
+ if (!--retry) \
+ break; \
cpu_relax(); \
} \
} while (0)
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 787c146eb485..83ea6c4e623c 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -224,30 +224,30 @@ static ssize_t config_show(struct device *dev,
mutex_lock(&test_fw_mutex);
- len += snprintf(buf, PAGE_SIZE,
+ len += scnprintf(buf, PAGE_SIZE - len,
"Custom trigger configuration for: %s\n",
dev_name(dev));
if (test_fw_config->name)
- len += snprintf(buf+len, PAGE_SIZE,
+ len += scnprintf(buf+len, PAGE_SIZE - len,
"name:\t%s\n",
test_fw_config->name);
else
- len += snprintf(buf+len, PAGE_SIZE,
+ len += scnprintf(buf+len, PAGE_SIZE - len,
"name:\tEMTPY\n");
- len += snprintf(buf+len, PAGE_SIZE,
+ len += scnprintf(buf+len, PAGE_SIZE - len,
"num_requests:\t%u\n", test_fw_config->num_requests);
- len += snprintf(buf+len, PAGE_SIZE,
+ len += scnprintf(buf+len, PAGE_SIZE - len,
"send_uevent:\t\t%s\n",
test_fw_config->send_uevent ?
"FW_ACTION_HOTPLUG" :
"FW_ACTION_NOHOTPLUG");
- len += snprintf(buf+len, PAGE_SIZE,
+ len += scnprintf(buf+len, PAGE_SIZE - len,
"sync_direct:\t\t%s\n",
test_fw_config->sync_direct ? "true" : "false");
- len += snprintf(buf+len, PAGE_SIZE,
+ len += scnprintf(buf+len, PAGE_SIZE - len,
"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
mutex_unlock(&test_fw_mutex);
diff --git a/net/core/dev.c b/net/core/dev.c
index 140858d4a048..eb7fb6daa1ef 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5021,12 +5021,12 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
if (list_empty(head))
return;
if (pt_prev->list_func != NULL)
- pt_prev->list_func(head, pt_prev, orig_dev);
+ INDIRECT_CALL_INET(pt_prev->list_func, ipv6_list_rcv,
+ ip_list_rcv, head, pt_prev, orig_dev);
else
list_for_each_entry_safe(skb, next, head, list) {
skb_list_del_init(skb);
- INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
- skb->dev, pt_prev, orig_dev);
+ pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6dadeff8d39a..d08b1e19ce9c 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1355,13 +1355,16 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
if (!regbuf)
return -ENOMEM;
+ if (regs.len < reglen)
+ reglen = regs.len;
+
ops->get_regs(dev, &regs, regbuf);
ret = -EFAULT;
if (copy_to_user(useraddr, &regs, sizeof(regs)))
goto out;
useraddr += offsetof(struct ethtool_regs, data);
- if (regbuf && copy_to_user(useraddr, regbuf, regs.len))
+ if (copy_to_user(useraddr, regbuf, reglen))
goto out;
ret = 0;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 1d1a2483097d..dd220ce7ca7a 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -754,9 +754,9 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
if (err)
goto errout;
- if (rule_exists(ops, frh, tb, rule)) {
- if (nlh->nlmsg_flags & NLM_F_EXCL)
- err = -EEXIST;
+ if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
+ rule_exists(ops, frh, tb, rule)) {
+ err = -EEXIST;
goto errout_free;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 99ddc69736b2..f975c5e2a369 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3059,7 +3059,13 @@ static int pktgen_wait_thread_run(struct pktgen_thread *t)
{
while (thread_is_running(t)) {
+ /* note: 't' will still be around even after the unlock/lock
+ * cycle because pktgen_thread threads are only cleared at
+ * net exit
+ */
+ mutex_unlock(&pktgen_thread_lock);
msleep_interruptible(100);
+ mutex_lock(&pktgen_thread_lock);
if (signal_pending(current))
goto signal;
@@ -3074,6 +3080,10 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
struct pktgen_thread *t;
int sig = 1;
+ /* prevent from racing with rmmod */
+ if (!try_module_get(THIS_MODULE))
+ return sig;
+
mutex_lock(&pktgen_thread_lock);
list_for_each_entry(t, &pn->pktgen_threads, th_list) {
@@ -3087,6 +3097,7 @@ static int pktgen_wait_all_threads_run(struct pktgen_net *pn)
t->control |= (T_STOP);
mutex_unlock(&pktgen_thread_lock);
+ module_put(THIS_MODULE);
return sig;
}
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 969402c7dbf1..d43737e6c3fb 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -28,14 +28,10 @@ static inline bool sja1105_is_link_local(const struct sk_buff *skb)
*/
static bool sja1105_filter(const struct sk_buff *skb, struct net_device *dev)
{
- if (sja1105_is_link_local(skb)) {
- SJA1105_SKB_CB(skb)->type = SJA1105_FRAME_TYPE_LINK_LOCAL;
+ if (sja1105_is_link_local(skb))
return true;
- }
- if (!dsa_port_is_vlan_filtering(dev->dsa_ptr)) {
- SJA1105_SKB_CB(skb)->type = SJA1105_FRAME_TYPE_NORMAL;
+ if (!dsa_port_is_vlan_filtering(dev->dsa_ptr))
return true;
- }
return false;
}
@@ -84,7 +80,7 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
skb->offload_fwd_mark = 1;
- if (SJA1105_SKB_CB(skb)->type == SJA1105_FRAME_TYPE_LINK_LOCAL) {
+ if (sja1105_is_link_local(skb)) {
/* Management traffic path. Switch embeds the switch ID and
* port ID into bytes of the destination MAC, courtesy of
* the incl_srcpt options.
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cee640281e02..6cb7cff22db9 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1981,7 +1981,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u32 itag = 0;
struct rtable *rth;
struct flowi4 fl4;
- bool do_cache;
+ bool do_cache = true;
/* IP on this device is disabled. */
@@ -2058,6 +2058,9 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
if (res->type == RTN_BROADCAST) {
if (IN_DEV_BFORWARD(in_dev))
goto make_route;
+ /* not do cache if bc_forwarding is enabled */
+ if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
+ do_cache = false;
goto brd_input;
}
@@ -2095,18 +2098,15 @@ brd_input:
RT_CACHE_STAT_INC(in_brd);
local_input:
- do_cache = false;
- if (res->fi) {
- if (!itag) {
- struct fib_nh_common *nhc = FIB_RES_NHC(*res);
+ do_cache &= res->fi && !itag;
+ if (do_cache) {
+ struct fib_nh_common *nhc = FIB_RES_NHC(*res);
- rth = rcu_dereference(nhc->nhc_rth_input);
- if (rt_cache_valid(rth)) {
- skb_dst_set_noref(skb, &rth->dst);
- err = 0;
- goto out;
- }
- do_cache = true;
+ rth = rcu_dereference(nhc->nhc_rth_input);
+ if (rt_cache_valid(rth)) {
+ skb_dst_set_noref(skb, &rth->dst);
+ err = 0;
+ goto out;
}
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 189144346cd4..7c6228fbf5dd 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -533,8 +533,7 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
(inet->inet_dport != rmt_port && inet->inet_dport) ||
(inet->inet_rcv_saddr && inet->inet_rcv_saddr != loc_addr) ||
ipv6_only_sock(sk) ||
- (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
- sk->sk_bound_dev_if != sdif))
+ !udp_sk_bound_dev_eq(net, sk->sk_bound_dev_if, dif, sdif))
return false;
if (!ip_mc_sf_allow(sk, loc_addr, rmt_addr, dif, sdif))
return false;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 703c8387f102..70693bc7ad9d 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -779,6 +779,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct flowi6 fl6;
struct ipcm6_cookie ipc6;
int addr_len = msg->msg_namelen;
+ int hdrincl;
u16 proto;
int err;
@@ -792,6 +793,13 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
+ /* hdrincl should be READ_ONCE(inet->hdrincl)
+ * but READ_ONCE() doesn't work with bit fields.
+ * Doing this indirectly yields the same result.
+ */
+ hdrincl = inet->hdrincl;
+ hdrincl = READ_ONCE(hdrincl);
+
/*
* Get and verify the address.
*/
@@ -883,11 +891,14 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
- rfv.msg = msg;
- rfv.hlen = 0;
- err = rawv6_probe_proto_opt(&rfv, &fl6);
- if (err)
- goto out;
+
+ if (!hdrincl) {
+ rfv.msg = msg;
+ rfv.hlen = 0;
+ err = rawv6_probe_proto_opt(&rfv, &fl6);
+ if (err)
+ goto out;
+ }
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
@@ -904,7 +915,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
- if (inet->hdrincl)
+ if (hdrincl)
fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH;
if (ipc6.tclass < 0)
@@ -927,7 +938,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
goto do_confirm;
back_from_confirm:
- if (inet->hdrincl)
+ if (hdrincl)
err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst,
msg->msg_flags, &ipc6.sockc);
else {
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index fc012e801459..a29d66da7394 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3008,8 +3008,8 @@ static int packet_release(struct socket *sock)
synchronize_net();
+ kfree(po->rollover);
if (f) {
- kfree(po->rollover);
fanout_release_data(f);
kfree(f);
}
diff --git a/net/rds/ib.c b/net/rds/ib.c
index 2da9b75bad16..b8d581b779b2 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -87,7 +87,7 @@ static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
spin_lock_irqsave(&rds_ibdev->spinlock, flags);
list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
- rds_conn_drop(ic->conn);
+ rds_conn_path_drop(&ic->conn->c_path[0], true);
spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
}
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index d664e9ade74d..0b347f46b2f4 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -428,12 +428,14 @@ int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
wait_clean_list_grace();
list_to_llist_nodes(pool, &unmap_list, &clean_nodes, &clean_tail);
- if (ibmr_ret)
+ if (ibmr_ret) {
*ibmr_ret = llist_entry(clean_nodes, struct rds_ib_mr, llnode);
-
+ clean_nodes = clean_nodes->next;
+ }
/* more than one entry in llist nodes */
- if (clean_nodes->next)
- llist_add_batch(clean_nodes->next, clean_tail, &pool->clean_list);
+ if (clean_nodes)
+ llist_add_batch(clean_nodes, clean_tail,
+ &pool->clean_list);
}
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8946c89d7392..3cae88cbdaa0 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -168,6 +168,7 @@ void rds_ib_recv_free_caches(struct rds_ib_connection *ic)
list_del(&inc->ii_cache_entry);
WARN_ON(!list_empty(&inc->ii_frags));
kmem_cache_free(rds_ib_incoming_slab, inc);
+ atomic_dec(&rds_ib_allocation);
}
rds_ib_cache_xfer_to_ready(&ic->i_cache_frags);
@@ -1057,6 +1058,8 @@ out:
void rds_ib_recv_exit(void)
{
+ WARN_ON(atomic_read(&rds_ib_allocation));
+
kmem_cache_destroy(rds_ib_incoming_slab);
kmem_cache_destroy(rds_ib_frag_slab);
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 92331e1195c1..f17908f5c4f3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2312,7 +2312,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
union sctp_addr addr;
struct sctp_af *af;
int src_match = 0;
- char *cookie;
/* We must include the address that the INIT packet came from.
* This is the only address that matters for an INIT packet.
@@ -2416,14 +2415,6 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
/* Peer Rwnd : Current calculated value of the peer's rwnd. */
asoc->peer.rwnd = asoc->peer.i.a_rwnd;
- /* Copy cookie in case we need to resend COOKIE-ECHO. */
- cookie = asoc->peer.cookie;
- if (cookie) {
- asoc->peer.cookie = kmemdup(cookie, asoc->peer.cookie_len, gfp);
- if (!asoc->peer.cookie)
- goto clean_up;
- }
-
/* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily
* high (for example, implementations MAY use the size of the receiver
* advertised window).
@@ -2592,7 +2583,9 @@ do_addr_param:
case SCTP_PARAM_STATE_COOKIE:
asoc->peer.cookie_len =
ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
- asoc->peer.cookie = param.cookie->body;
+ asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
+ if (!asoc->peer.cookie)
+ retval = 0;
break;
case SCTP_PARAM_HEARTBEAT_INFO:
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 9b50da548db2..a554d6d15d1b 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -883,6 +883,11 @@ static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
asoc->rto_initial;
}
+ if (sctp_state(asoc, ESTABLISHED)) {
+ kfree(asoc->peer.cookie);
+ asoc->peer.cookie = NULL;
+ }
+
if (sctp_state(asoc, ESTABLISHED) ||
sctp_state(asoc, CLOSED) ||
sctp_state(asoc, SHUTDOWN_RECEIVED)) {
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d6e57da56c94..627a87a71f8b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -2288,13 +2288,13 @@ call_status(struct rpc_task *task)
case -ECONNREFUSED:
case -ECONNRESET:
case -ECONNABORTED:
+ case -ENOTCONN:
rpc_force_rebind(clnt);
/* fall through */
case -EADDRINUSE:
rpc_delay(task, 3*HZ);
/* fall through */
case -EPIPE:
- case -ENOTCONN:
case -EAGAIN:
break;
case -EIO:
@@ -2426,17 +2426,21 @@ call_decode(struct rpc_task *task)
return;
case -EAGAIN:
task->tk_status = 0;
- /* Note: rpc_decode_header() may have freed the RPC slot */
- if (task->tk_rqstp == req) {
- xdr_free_bvec(&req->rq_rcv_buf);
- req->rq_reply_bytes_recvd = 0;
- req->rq_rcv_buf.len = 0;
- if (task->tk_client->cl_discrtry)
- xprt_conditional_disconnect(req->rq_xprt,
- req->rq_connect_cookie);
- }
+ xdr_free_bvec(&req->rq_rcv_buf);
+ req->rq_reply_bytes_recvd = 0;
+ req->rq_rcv_buf.len = 0;
+ if (task->tk_client->cl_discrtry)
+ xprt_conditional_disconnect(req->rq_xprt,
+ req->rq_connect_cookie);
task->tk_action = call_encode;
rpc_check_timeout(task);
+ break;
+ case -EKEYREJECTED:
+ task->tk_action = call_reserve;
+ rpc_check_timeout(task);
+ rpcauth_invalcred(task);
+ /* Ensure we obtain a new XID if we retry! */
+ xprt_release(task);
}
}
@@ -2572,11 +2576,7 @@ out_msg_denied:
break;
task->tk_cred_retry--;
trace_rpc__stale_creds(task);
- rpcauth_invalcred(task);
- /* Ensure we obtain a new XID! */
- xprt_release(task);
- task->tk_action = call_reserve;
- return -EAGAIN;
+ return -EKEYREJECTED;
case rpc_autherr_badcred:
case rpc_autherr_badverf:
/* possibly garbled cred/verf? */
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index bef5eac8ab38..84bb37924540 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -810,8 +810,7 @@ static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
{
struct rpcrdma_sendctx *sc;
- sc = kzalloc(sizeof(*sc) +
- ia->ri_max_send_sges * sizeof(struct ib_sge),
+ sc = kzalloc(struct_size(sc, sc_sges, ia->ri_max_send_sges),
GFP_KERNEL);
if (!sc)
return NULL;
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index b95c408fd771..1f9cf57d9754 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -550,11 +550,23 @@ void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
}
}
+static void tls_device_resync_rx(struct tls_context *tls_ctx,
+ struct sock *sk, u32 seq, u64 rcd_sn)
+{
+ struct net_device *netdev;
+
+ if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags)))
+ return;
+ netdev = READ_ONCE(tls_ctx->netdev);
+ if (netdev)
+ netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq, rcd_sn);
+ clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags);
+}
+
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_offload_context_rx *rx_ctx;
- struct net_device *netdev;
u32 is_req_pending;
s64 resync_req;
u32 req_seq;
@@ -570,12 +582,7 @@ void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
if (unlikely(is_req_pending) && req_seq == seq &&
atomic64_try_cmpxchg(&rx_ctx->resync_req, &resync_req, 0)) {
seq += TLS_HEADER_SIZE - 1;
- down_read(&device_offload_lock);
- netdev = tls_ctx->netdev;
- if (netdev)
- netdev->tlsdev_ops->tls_dev_resync_rx(netdev, sk, seq,
- rcd_sn);
- up_read(&device_offload_lock);
+ tls_device_resync_rx(tls_ctx, sk, seq, rcd_sn);
}
}
@@ -977,7 +984,10 @@ static int tls_device_down(struct net_device *netdev)
if (ctx->rx_conf == TLS_HW)
netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
TLS_OFFLOAD_CTX_DIR_RX);
- ctx->netdev = NULL;
+ WRITE_ONCE(ctx->netdev, NULL);
+ smp_mb__before_atomic(); /* pairs with test_and_set_bit() */
+ while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags))
+ usleep_range(10, 200);
dev_put(netdev);
list_del_init(&ctx->list);
diff --git a/samples/pidfd/pidfd-metadata.c b/samples/pidfd/pidfd-metadata.c
index 640f5f757c57..14b454448429 100644
--- a/samples/pidfd/pidfd-metadata.c
+++ b/samples/pidfd/pidfd-metadata.c
@@ -21,6 +21,10 @@
#define CLONE_PIDFD 0x00001000
#endif
+#ifndef __NR_pidfd_send_signal
+#define __NR_pidfd_send_signal -1
+#endif
+
static int do_child(void *args)
{
printf("%d\n", getpid());
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 85d758233483..f641bb0aa63f 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -74,8 +74,13 @@ endef
# Usage: CROSS_COMPILE := $(call cc-cross-prefix, m68k-linux-gnu- m68k-linux-)
# Return first <prefix> where a <prefix>gcc is found in PATH.
# If no gcc found in PATH with listed prefixes return nothing
+#
+# Note: '2>/dev/null' is here to force Make to invoke a shell. Otherwise, it
+# would try to directly execute the shell builtin 'command'. This workaround
+# should be kept for a long time since this issue was fixed only after the
+# GNU Make 4.2.1 release.
cc-cross-prefix = $(firstword $(foreach c, $(filter-out -%, $(1)), \
- $(if $(shell which $(c)gcc), $(c))))
+ $(if $(shell command -v $(c)gcc 2>/dev/null), $(c))))
# output directory for tests below
TMPOUT := $(if $(KBUILD_EXTMOD),$(firstword $(KBUILD_EXTMOD))/)
diff --git a/scripts/checkstack.pl b/scripts/checkstack.pl
index 122aef5e4e14..371bd17a4983 100755
--- a/scripts/checkstack.pl
+++ b/scripts/checkstack.pl
@@ -46,7 +46,7 @@ my (@stack, $re, $dre, $x, $xs, $funcre);
$x = "[0-9a-f]"; # hex character
$xs = "[0-9a-f ]"; # hex character or space
$funcre = qr/^$x* <(.*)>:$/;
- if ($arch eq 'aarch64') {
+ if ($arch =~ '^(aarch|arm)64$') {
#ffffffc0006325cc: a9bb7bfd stp x29, x30, [sp, #-80]!
#a110: d11643ff sub sp, sp, #0x590
$re = qr/^.*stp.*sp, \#-([0-9]{1,8})\]\!/o;
diff --git a/scripts/kconfig/tests/err_recursive_inc/expected_stderr b/scripts/kconfig/tests/err_recursive_inc/expected_stderr
index 6b582eee2176..b070a31fdfeb 100644
--- a/scripts/kconfig/tests/err_recursive_inc/expected_stderr
+++ b/scripts/kconfig/tests/err_recursive_inc/expected_stderr
@@ -1,6 +1,6 @@
Recursive inclusion detected.
Inclusion path:
current file : Kconfig.inc1
- included from: Kconfig.inc3:1
- included from: Kconfig.inc2:3
- included from: Kconfig.inc1:4
+ included from: Kconfig.inc3:2
+ included from: Kconfig.inc2:4
+ included from: Kconfig.inc1:5
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 27b42d5b6c4f..ca7f46b562a4 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -104,7 +104,7 @@ clean-dirs += $(objtree)/snap/
# ---------------------------------------------------------------------------
tar%pkg: FORCE
$(MAKE) -f $(srctree)/Makefile
- $(CONFIG_SHELL) $(srctree)/scripts/package/buildtar $@
+ +$(CONFIG_SHELL) $(srctree)/scripts/package/buildtar $@
clean-dirs += $(objtree)/tar-install/
diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c
index be59f9c34ea2..79053a4f4783 100644
--- a/tools/testing/selftests/cgroup/test_core.c
+++ b/tools/testing/selftests/cgroup/test_core.c
@@ -198,7 +198,7 @@ static int test_cgcore_no_internal_process_constraint_on_threads(const char *roo
char *parent = NULL, *child = NULL;
if (cg_read_strstr(root, "cgroup.controllers", "cpu") ||
- cg_read_strstr(root, "cgroup.subtree_control", "cpu")) {
+ cg_write(root, "cgroup.subtree_control", "+cpu")) {
ret = KSFT_SKIP;
goto cleanup;
}
@@ -376,6 +376,11 @@ int main(int argc, char *argv[])
if (cg_find_unified_root(root, sizeof(root)))
ksft_exit_skip("cgroup v2 isn't mounted\n");
+
+ if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
+ if (cg_write(root, "cgroup.subtree_control", "+memory"))
+ ksft_exit_skip("Failed to set memory controller\n");
+
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
index 6f339882a6ca..c19a97dd02d4 100644
--- a/tools/testing/selftests/cgroup/test_memcontrol.c
+++ b/tools/testing/selftests/cgroup/test_memcontrol.c
@@ -1205,6 +1205,10 @@ int main(int argc, char **argv)
if (cg_read_strstr(root, "cgroup.controllers", "memory"))
ksft_exit_skip("memory controller isn't available\n");
+ if (cg_read_strstr(root, "cgroup.subtree_control", "memory"))
+ if (cg_write(root, "cgroup.subtree_control", "+memory"))
+ ksft_exit_skip("Failed to set memory controller\n");
+
for (i = 0; i < ARRAY_SIZE(tests); i++) {
switch (tests[i].fn(root)) {
case KSFT_PASS:
diff --git a/tools/testing/selftests/net/forwarding/router_broadcast.sh b/tools/testing/selftests/net/forwarding/router_broadcast.sh
index 9a678ece32b4..4eac0a06f451 100755
--- a/tools/testing/selftests/net/forwarding/router_broadcast.sh
+++ b/tools/testing/selftests/net/forwarding/router_broadcast.sh
@@ -145,16 +145,19 @@ bc_forwarding_disable()
{
sysctl_set net.ipv4.conf.all.bc_forwarding 0
sysctl_set net.ipv4.conf.$rp1.bc_forwarding 0
+ sysctl_set net.ipv4.conf.$rp2.bc_forwarding 0
}
bc_forwarding_enable()
{
sysctl_set net.ipv4.conf.all.bc_forwarding 1
sysctl_set net.ipv4.conf.$rp1.bc_forwarding 1
+ sysctl_set net.ipv4.conf.$rp2.bc_forwarding 1
}
bc_forwarding_restore()
{
+ sysctl_restore net.ipv4.conf.$rp2.bc_forwarding
sysctl_restore net.ipv4.conf.$rp1.bc_forwarding
sysctl_restore net.ipv4.conf.all.bc_forwarding
}
@@ -171,7 +174,7 @@ ping_test_from()
log_info "ping $dip, expected reply from $from"
ip vrf exec $(master_name_get $oif) \
$PING -I $oif $dip -c 10 -i 0.1 -w $PING_TIMEOUT -b 2>&1 \
- | grep $from &> /dev/null
+ | grep "bytes from $from" > /dev/null
check_err_fail $fail $?
}
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index 5bae1792e3d6..104c75a33882 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -16,6 +16,10 @@
#include "../kselftest.h"
+#ifndef __NR_pidfd_send_signal
+#define __NR_pidfd_send_signal -1
+#endif
+
static inline int sys_pidfd_send_signal(int pidfd, int sig, siginfo_t *info,
unsigned int flags)
{
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index e13eb6cc8901..9534dc2bc929 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -1,10 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for vm selftests
-ifndef OUTPUT
- OUTPUT := $(shell pwd)
-endif
-
CFLAGS = -Wall -I ../../../../usr/include $(EXTRA_CFLAGS)
LDLIBS = -lrt
TEST_GEN_FILES = compaction_test
@@ -25,6 +21,8 @@ TEST_GEN_FILES += virtual_address_range
TEST_PROGS := run_vmtests
+TEST_FILES := test_vmalloc.sh
+
KSFT_KHDR_INSTALL := 1
include ../lib.mk
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index 5d1db824f73a..b3e6497b080c 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -123,7 +123,7 @@ static void usage(void)
fprintf(stderr, "Supported <test type>: anon, hugetlb, "
"hugetlb_shared, shmem\n\n");
fprintf(stderr, "Examples:\n\n");
- fprintf(stderr, examples);
+ fprintf(stderr, "%s", examples);
exit(1);
}
diff --git a/tools/virtio/linux/kernel.h b/tools/virtio/linux/kernel.h
index 7ef45a4a3cba..6683b4a70b05 100644
--- a/tools/virtio/linux/kernel.h
+++ b/tools/virtio/linux/kernel.h
@@ -127,7 +127,7 @@ static inline void free_page(unsigned long addr)
#define dev_err(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
#define dev_warn(dev, format, ...) fprintf (stderr, format, ## __VA_ARGS__)
-#define WARN_ON_ONCE(cond) ((cond) ? fprintf (stderr, "WARNING\n") : 0)
+#define WARN_ON_ONCE(cond) (unlikely(cond) ? fprintf (stderr, "WARNING\n") : 0)
#define min(x, y) ({ \
typeof(x) _min1 = (x); \