summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-08-06 18:44:57 -0700
committerDavid S. Miller <davem@davemloft.net>2019-08-06 18:44:57 -0700
commit13dfb3fa494361ea9a5950f27c9cd8b06d28c04f (patch)
tree1bf30874f57c6c6b21160a10282191fcd0868055
parent05bb520376af2c5146d3c44832c22ec3bb54d778 (diff)
parent33920f1ec5bf47c5c0a1d2113989bdd9dfb3fae9 (diff)
downloadlinux-13dfb3fa494361ea9a5950f27c9cd8b06d28c04f.tar.bz2
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Just minor overlapping changes in the conflicts here. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--.gitignore3
-rw-r--r--.mailmap3
-rw-r--r--Documentation/PCI/pci-error-recovery.rst5
-rw-r--r--Documentation/RCU/rculist_nulls.txt2
-rw-r--r--Documentation/admin-guide/conf.py10
-rw-r--r--Documentation/admin-guide/hw-vuln/spectre.rst88
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt10
-rw-r--r--Documentation/admin-guide/mm/transhuge.rst2
-rw-r--r--Documentation/conf.py30
-rw-r--r--Documentation/core-api/conf.py10
-rw-r--r--Documentation/crypto/conf.py10
-rw-r--r--Documentation/dev-tools/conf.py10
-rw-r--r--Documentation/devicetree/bindings/arm/idle-states.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/renesas.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml2
-rw-r--r--Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt16
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml2
-rw-r--r--Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml (renamed from Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml)2
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml2
-rw-r--r--Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml4
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml45
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.txt81
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.yaml93
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml2
-rw-r--r--Documentation/devicetree/bindings/usb/usb251xb.txt6
-rw-r--r--Documentation/doc-guide/conf.py10
-rw-r--r--Documentation/driver-api/80211/conf.py10
-rw-r--r--Documentation/driver-api/conf.py10
-rw-r--r--Documentation/driver-api/generic-counter.rst4
-rw-r--r--Documentation/driver-api/phy/phy.rst4
-rw-r--r--Documentation/driver-api/pm/conf.py10
-rw-r--r--Documentation/filesystems/conf.py10
-rw-r--r--Documentation/gpu/conf.py10
-rw-r--r--Documentation/hwmon/k8temp.rst2
-rw-r--r--Documentation/index.rst3
-rw-r--r--Documentation/input/conf.py10
-rw-r--r--Documentation/kernel-hacking/conf.py10
-rw-r--r--Documentation/locking/spinlocks.rst4
-rw-r--r--Documentation/maintainer/conf.py10
-rw-r--r--Documentation/media/conf.py12
-rw-r--r--Documentation/memory-barriers.txt2
-rw-r--r--Documentation/networking/conf.py10
-rw-r--r--Documentation/networking/tls-offload.rst23
-rw-r--r--Documentation/power/index.rst2
-rw-r--r--Documentation/powerpc/bootwrapper.rst (renamed from Documentation/powerpc/bootwrapper.txt)28
-rw-r--r--Documentation/powerpc/cpu_families.rst (renamed from Documentation/powerpc/cpu_families.txt)23
-rw-r--r--Documentation/powerpc/cpu_features.rst (renamed from Documentation/powerpc/cpu_features.txt)6
-rw-r--r--Documentation/powerpc/cxl.rst (renamed from Documentation/powerpc/cxl.txt)46
-rw-r--r--Documentation/powerpc/cxlflash.rst (renamed from Documentation/powerpc/cxlflash.txt)10
-rw-r--r--Documentation/powerpc/dawr-power9.rst (renamed from Documentation/powerpc/DAWR-POWER9.txt)15
-rw-r--r--Documentation/powerpc/dscr.rst (renamed from Documentation/powerpc/dscr.txt)18
-rw-r--r--Documentation/powerpc/eeh-pci-error-recovery.rst (renamed from Documentation/powerpc/eeh-pci-error-recovery.txt)108
-rw-r--r--Documentation/powerpc/firmware-assisted-dump.rst (renamed from Documentation/powerpc/firmware-assisted-dump.txt)117
-rw-r--r--Documentation/powerpc/hvcs.rst (renamed from Documentation/powerpc/hvcs.txt)108
-rw-r--r--Documentation/powerpc/index.rst34
-rw-r--r--Documentation/powerpc/isa-versions.rst15
-rw-r--r--Documentation/powerpc/mpc52xx.rst (renamed from Documentation/powerpc/mpc52xx.txt)12
-rw-r--r--Documentation/powerpc/pci_iov_resource_on_powernv.rst (renamed from Documentation/powerpc/pci_iov_resource_on_powernv.txt)15
-rw-r--r--Documentation/powerpc/pmu-ebb.rst (renamed from Documentation/powerpc/pmu-ebb.txt)1
-rw-r--r--Documentation/powerpc/ptrace.rst156
-rw-r--r--Documentation/powerpc/ptrace.txt151
-rw-r--r--Documentation/powerpc/qe_firmware.rst (renamed from Documentation/powerpc/qe_firmware.txt)37
-rw-r--r--Documentation/powerpc/syscall64-abi.rst (renamed from Documentation/powerpc/syscall64-abi.txt)29
-rw-r--r--Documentation/powerpc/transactional_memory.rst (renamed from Documentation/powerpc/transactional_memory.txt)45
-rw-r--r--Documentation/process/conf.py10
-rw-r--r--Documentation/process/deprecated.rst14
-rw-r--r--Documentation/s390/vfio-ccw.rst31
-rw-r--r--Documentation/sh/conf.py10
-rw-r--r--Documentation/sound/conf.py10
-rw-r--r--Documentation/sphinx/load_config.py27
-rw-r--r--Documentation/translations/it_IT/doc-guide/sphinx.rst19
-rw-r--r--Documentation/translations/it_IT/process/index.rst1
-rw-r--r--Documentation/translations/it_IT/process/kernel-docs.rst11
-rw-r--r--Documentation/translations/it_IT/process/maintainer-pgp-guide.rst25
-rw-r--r--Documentation/translations/it_IT/process/programming-language.rst51
-rw-r--r--Documentation/translations/ko_KR/memory-barriers.txt2
-rw-r--r--Documentation/userspace-api/conf.py10
-rw-r--r--Documentation/virt/index.rst (renamed from Documentation/virtual/index.rst)0
-rw-r--r--Documentation/virt/kvm/amd-memory-encryption.rst (renamed from Documentation/virtual/kvm/amd-memory-encryption.rst)0
-rw-r--r--Documentation/virt/kvm/api.txt (renamed from Documentation/virtual/kvm/api.txt)2
-rw-r--r--Documentation/virt/kvm/arm/hyp-abi.txt (renamed from Documentation/virtual/kvm/arm/hyp-abi.txt)0
-rw-r--r--Documentation/virt/kvm/arm/psci.txt (renamed from Documentation/virtual/kvm/arm/psci.txt)0
-rw-r--r--Documentation/virt/kvm/cpuid.rst (renamed from Documentation/virtual/kvm/cpuid.rst)0
-rw-r--r--Documentation/virt/kvm/devices/README (renamed from Documentation/virtual/kvm/devices/README)0
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic-its.txt (renamed from Documentation/virtual/kvm/devices/arm-vgic-its.txt)0
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic-v3.txt (renamed from Documentation/virtual/kvm/devices/arm-vgic-v3.txt)0
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic.txt (renamed from Documentation/virtual/kvm/devices/arm-vgic.txt)0
-rw-r--r--Documentation/virt/kvm/devices/mpic.txt (renamed from Documentation/virtual/kvm/devices/mpic.txt)0
-rw-r--r--Documentation/virt/kvm/devices/s390_flic.txt (renamed from Documentation/virtual/kvm/devices/s390_flic.txt)0
-rw-r--r--Documentation/virt/kvm/devices/vcpu.txt (renamed from Documentation/virtual/kvm/devices/vcpu.txt)0
-rw-r--r--Documentation/virt/kvm/devices/vfio.txt (renamed from Documentation/virtual/kvm/devices/vfio.txt)0
-rw-r--r--Documentation/virt/kvm/devices/vm.txt (renamed from Documentation/virtual/kvm/devices/vm.txt)0
-rw-r--r--Documentation/virt/kvm/devices/xics.txt (renamed from Documentation/virtual/kvm/devices/xics.txt)0
-rw-r--r--Documentation/virt/kvm/devices/xive.txt (renamed from Documentation/virtual/kvm/devices/xive.txt)0
-rw-r--r--Documentation/virt/kvm/halt-polling.txt (renamed from Documentation/virtual/kvm/halt-polling.txt)0
-rw-r--r--Documentation/virt/kvm/hypercalls.txt (renamed from Documentation/virtual/kvm/hypercalls.txt)4
-rw-r--r--Documentation/virt/kvm/index.rst (renamed from Documentation/virtual/kvm/index.rst)1
-rw-r--r--Documentation/virt/kvm/locking.txt (renamed from Documentation/virtual/kvm/locking.txt)0
-rw-r--r--Documentation/virt/kvm/mmu.txt (renamed from Documentation/virtual/kvm/mmu.txt)2
-rw-r--r--Documentation/virt/kvm/msr.txt (renamed from Documentation/virtual/kvm/msr.txt)0
-rw-r--r--Documentation/virt/kvm/nested-vmx.txt (renamed from Documentation/virtual/kvm/nested-vmx.txt)0
-rw-r--r--Documentation/virt/kvm/ppc-pv.txt (renamed from Documentation/virtual/kvm/ppc-pv.txt)0
-rw-r--r--Documentation/virt/kvm/review-checklist.txt (renamed from Documentation/virtual/kvm/review-checklist.txt)2
-rw-r--r--Documentation/virt/kvm/s390-diag.txt (renamed from Documentation/virtual/kvm/s390-diag.txt)0
-rw-r--r--Documentation/virt/kvm/timekeeping.txt (renamed from Documentation/virtual/kvm/timekeeping.txt)0
-rw-r--r--Documentation/virt/kvm/vcpu-requests.rst (renamed from Documentation/virtual/kvm/vcpu-requests.rst)0
-rw-r--r--Documentation/virt/paravirt_ops.rst (renamed from Documentation/virtual/paravirt_ops.rst)0
-rw-r--r--Documentation/virt/uml/UserModeLinux-HOWTO.txt (renamed from Documentation/virtual/uml/UserModeLinux-HOWTO.txt)0
-rw-r--r--Documentation/vm/conf.py10
-rw-r--r--Documentation/vm/hmm.rst2
-rw-r--r--Documentation/watchdog/hpwdt.rst2
-rw-r--r--Documentation/x86/conf.py10
-rw-r--r--MAINTAINERS58
-rw-r--r--Makefile8
-rw-r--r--arch/arm/Kconfig.debug5
-rw-r--r--arch/arm/boot/dts/bcm47094-linksys-panamera.dts3
-rw-r--r--arch/arm/boot/dts/imx6ul-14x14-evk.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6ul-geam.dts2
-rw-r--r--arch/arm/boot/dts/imx6ul-isiot.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6ul-pico-hobbit.dts2
-rw-r--r--arch/arm/boot/dts/imx6ul-pico-pi.dts4
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi2
-rw-r--r--arch/arm/configs/u8500_defconfig34
-rw-r--r--arch/arm/include/asm/dma-mapping.h4
-rw-r--r--arch/arm/mach-davinci/sleep.S1
-rw-r--r--arch/arm/mach-netx/Kconfig22
-rw-r--r--arch/arm/mach-netx/Makefile13
-rw-r--r--arch/arm/mach-netx/Makefile.boot3
-rw-r--r--arch/arm/mach-netx/fb.c65
-rw-r--r--arch/arm/mach-netx/fb.h12
-rw-r--r--arch/arm/mach-netx/generic.c182
-rw-r--r--arch/arm/mach-netx/generic.h14
-rw-r--r--arch/arm/mach-netx/include/mach/hardware.h27
-rw-r--r--arch/arm/mach-netx/include/mach/irqs.h58
-rw-r--r--arch/arm/mach-netx/include/mach/netx-regs.h420
-rw-r--r--arch/arm/mach-netx/include/mach/pfifo.h42
-rw-r--r--arch/arm/mach-netx/include/mach/uncompress.h63
-rw-r--r--arch/arm/mach-netx/include/mach/xc.h30
-rw-r--r--arch/arm/mach-netx/nxdb500.c197
-rw-r--r--arch/arm/mach-netx/nxdkn.c90
-rw-r--r--arch/arm/mach-netx/nxeb500hmi.c174
-rw-r--r--arch/arm/mach-netx/pfifo.c56
-rw-r--r--arch/arm/mach-netx/time.c141
-rw-r--r--arch/arm/mach-netx/xc.c246
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/dma-mapping.c61
-rw-r--r--arch/arm/mm/init.c5
-rw-r--r--arch/arm64/Makefile2
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi3
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h6
-rw-r--r--arch/arm64/include/asm/cpufeature.h7
-rw-r--r--arch/arm64/include/asm/daifflags.h2
-rw-r--r--arch/arm64/include/asm/efi.h6
-rw-r--r--arch/arm64/include/asm/elf.h2
-rw-r--r--arch/arm64/include/asm/memory.h10
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/processor.h14
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/stacktrace.h78
-rw-r--r--arch/arm64/include/asm/vdso/compat_gettimeofday.h40
-rw-r--r--arch/arm64/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--arch/arm64/kernel/cpufeature.c8
-rw-r--r--arch/arm64/kernel/debug-monitors.c14
-rw-r--r--arch/arm64/kernel/entry.S22
-rw-r--r--arch/arm64/kernel/fpsimd.c29
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c11
-rw-r--r--arch/arm64/kernel/module.c4
-rw-r--r--arch/arm64/kernel/perf_callchain.c7
-rw-r--r--arch/arm64/kernel/probes/kprobes.c40
-rw-r--r--arch/arm64/kernel/process.c36
-rw-r--r--arch/arm64/kernel/return_address.c12
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kernel/stacktrace.c62
-rw-r--r--arch/arm64/kernel/time.c7
-rw-r--r--arch/arm64/kernel/traps.c13
-rw-r--r--arch/arm64/kernel/vdso/Makefile13
-rw-r--r--arch/arm64/kernel/vdso32/Makefile14
-rw-r--r--arch/arm64/mm/fault.c57
-rw-r--r--arch/csky/include/uapi/asm/byteorder.h2
-rw-r--r--arch/csky/include/uapi/asm/cachectl.h2
-rw-r--r--arch/csky/include/uapi/asm/perf_regs.h2
-rw-r--r--arch/csky/include/uapi/asm/ptrace.h2
-rw-r--r--arch/csky/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/csky/include/uapi/asm/unistd.h2
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c1
-rw-r--r--arch/mips/kernel/cacheinfo.c2
-rw-r--r--arch/mips/kernel/i8253.c3
-rw-r--r--arch/mips/kvm/emulate.c1
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c13
-rw-r--r--arch/mips/pci/ops-bcm63xx.c1
-rw-r--r--arch/mips/vdso/vdso.h1
-rw-r--r--arch/nds32/include/uapi/asm/auxvec.h2
-rw-r--r--arch/nds32/include/uapi/asm/byteorder.h2
-rw-r--r--arch/nds32/include/uapi/asm/cachectl.h2
-rw-r--r--arch/nds32/include/uapi/asm/fp_udfiex_crtl.h2
-rw-r--r--arch/nds32/include/uapi/asm/param.h2
-rw-r--r--arch/nds32/include/uapi/asm/ptrace.h2
-rw-r--r--arch/nds32/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/nds32/include/uapi/asm/unistd.h2
-rw-r--r--arch/parisc/Makefile5
-rw-r--r--arch/parisc/boot/compressed/Makefile4
-rw-r--r--arch/parisc/boot/compressed/vmlinux.lds.S4
-rw-r--r--arch/parisc/configs/defconfig (renamed from arch/parisc/configs/default_defconfig)0
-rw-r--r--arch/parisc/include/asm/kprobes.h4
-rw-r--r--arch/parisc/kernel/ftrace.c3
-rw-r--r--arch/parisc/kernel/pacache.S3
-rw-r--r--arch/parisc/math-emu/Makefile1
-rw-r--r--arch/parisc/mm/fault.c1
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/hvcall.h11
-rw-r--r--arch/powerpc/include/asm/pmc.h5
-rw-r--r--arch/powerpc/include/asm/unistd.h1
-rw-r--r--arch/powerpc/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm_para.h2
-rw-r--r--arch/powerpc/kernel/Makefile3
-rw-r--r--arch/powerpc/kernel/align.c4
-rw-r--r--arch/powerpc/kernel/dma-common.c17
-rw-r--r--arch/powerpc/kernel/entry_32.S8
-rw-r--r--arch/powerpc/kernel/entry_64.S5
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S2
-rw-r--r--arch/powerpc/kernel/signal_32.c3
-rw-r--r--arch/powerpc/kernel/signal_64.c5
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c13
-rw-r--r--arch/powerpc/kvm/book3s_xive.c4
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c4
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c9
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c7
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c73
-rw-r--r--arch/powerpc/sysdev/xive/common.c7
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi16
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts9
-rw-r--r--arch/riscv/configs/defconfig10
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/uapi/asm/auxvec.h2
-rw-r--r--arch/riscv/include/uapi/asm/bitsperlong.h2
-rw-r--r--arch/riscv/include/uapi/asm/byteorder.h2
-rw-r--r--arch/riscv/include/uapi/asm/hwcap.h2
-rw-r--r--arch/riscv/include/uapi/asm/ptrace.h2
-rw-r--r--arch/riscv/include/uapi/asm/sigcontext.h2
-rw-r--r--arch/riscv/include/uapi/asm/ucontext.h2
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h1
-rw-r--r--arch/riscv/kernel/vdso/Makefile2
-rw-r--r--arch/s390/boot/Makefile2
-rw-r--r--arch/s390/boot/boot.h2
-rw-r--r--arch/s390/boot/head.S1
-rw-r--r--arch/s390/boot/kaslr.c1
-rw-r--r--arch/s390/boot/version.c7
-rw-r--r--arch/s390/configs/debug_defconfig330
-rw-r--r--arch/s390/configs/defconfig233
-rw-r--r--arch/s390/configs/zfcpdump_defconfig31
-rw-r--r--arch/s390/hypfs/hypfs_vm.c4
-rw-r--r--arch/s390/include/asm/bitops.h73
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/qdio.h10
-rw-r--r--arch/s390/include/asm/setup.h5
-rw-r--r--arch/s390/include/asm/unistd.h1
-rw-r--r--arch/s390/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--arch/s390/include/uapi/asm/ipl.h2
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h35
-rw-r--r--arch/s390/kernel/machine_kexec_reloc.c1
-rw-r--r--arch/s390/kernel/perf_cpum_cf_diag.c2
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/s390/lib/xor.c1
-rw-r--r--arch/s390/mm/fault.c3
-rw-r--r--arch/s390/mm/gmap.c4
-rw-r--r--arch/s390/mm/pgalloc.c6
-rw-r--r--arch/sh/include/uapi/asm/setup.h2
-rw-r--r--arch/sh/include/uapi/asm/types.h2
-rw-r--r--arch/sparc/include/uapi/asm/oradax.h2
-rw-r--r--arch/x86/entry/calling.h17
-rw-r--r--arch/x86/entry/entry_32.S13
-rw-r--r--arch/x86/entry/entry_64.S21
-rw-r--r--arch/x86/events/intel/core.c9
-rw-r--r--arch/x86/events/intel/ds.c2
-rw-r--r--arch/x86/include/asm/cpufeatures.h3
-rw-r--r--arch/x86/include/asm/kvm_host.h7
-rw-r--r--arch/x86/include/asm/vdso/gettimeofday.h36
-rw-r--r--arch/x86/include/uapi/asm/byteorder.h2
-rw-r--r--arch/x86/include/uapi/asm/hwcap2.h2
-rw-r--r--arch/x86/include/uapi/asm/sigcontext32.h2
-rw-r--r--arch/x86/include/uapi/asm/types.h2
-rw-r--r--arch/x86/kernel/cpu/bugs.c107
-rw-r--r--arch/x86/kernel/cpu/common.c44
-rw-r--r--arch/x86/kernel/head_64.S8
-rw-r--r--arch/x86/kernel/hpet.c12
-rw-r--r--arch/x86/kernel/stacktrace.c2
-rw-r--r--arch/x86/kernel/sysfb_efi.c46
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/svm.c13
-rw-r--r--arch/x86/kvm/vmx/nested.c4
-rw-r--r--arch/x86/kvm/vmx/vmx.c13
-rw-r--r--arch/x86/kvm/x86.c16
-rw-r--r--arch/x86/mm/fault.c15
-rw-r--r--arch/xtensa/kernel/coprocessor.S1
-rw-r--r--block/bfq-iosched.c67
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-iolatency.c3
-rw-r--r--block/blk-mq-sched.h9
-rw-r--r--block/blk-mq.c10
-rw-r--r--block/blk-rq-qos.c7
-rw-r--r--block/blk-settings.c3
-rw-r--r--block/genhd.c2
-rw-r--r--drivers/acpi/arm64/iort.c4
-rw-r--r--drivers/acpi/device_pm.c4
-rw-r--r--drivers/acpi/nfit/core.c28
-rw-r--r--drivers/acpi/nfit/nfit.h24
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/android/binder.c5
-rw-r--r--drivers/ata/libahci_platform.c4
-rw-r--r--drivers/ata/libata-zpodd.c2
-rw-r--r--drivers/atm/iphase.c8
-rw-r--r--drivers/base/core.c30
-rw-r--r--drivers/base/firmware_loader/firmware.h4
-rw-r--r--drivers/block/ataflop.c1
-rw-r--r--drivers/block/drbd/drbd_receiver.c14
-rw-r--r--drivers/block/loop.c16
-rw-r--r--drivers/block/nbd.c2
-rw-r--r--drivers/bluetooth/hci_ath.c3
-rw-r--r--drivers/bluetooth/hci_bcm.c3
-rw-r--r--drivers/bluetooth/hci_intel.c3
-rw-r--r--drivers/bluetooth/hci_ldisc.c13
-rw-r--r--drivers/bluetooth/hci_mrvl.c3
-rw-r--r--drivers/bluetooth/hci_qca.c3
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/char/hpet.c3
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c43
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm1-cmd.c36
-rw-r--r--drivers/char/tpm/tpm2-cmd.c6
-rw-r--r--drivers/clk/at91/clk-generated.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8183.c46
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c16
-rw-r--r--drivers/clk/sprd/Kconfig1
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c23
-rw-r--r--drivers/firewire/core-device.c2
-rw-r--r--drivers/firewire/core-iso.c2
-rw-r--r--drivers/firewire/core-topology.c1
-rw-r--r--drivers/firmware/Kconfig5
-rw-r--r--drivers/firmware/iscsi_ibft.c4
-rw-r--r--drivers/fpga/Kconfig1
-rw-r--r--drivers/gpio/gpiolib.c23
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h2
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c123
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c160
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c54
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c54
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c1
-rw-r--r--drivers/gpu/drm/bridge/Kconfig4
-rw-r--r--drivers/gpu/drm/drm_client.c60
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c51
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c4
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c27
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c58
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ringbuffer.c31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c38
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c1
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c59
-rw-r--r--drivers/gpu/drm/i915/gvt/trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c67
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h76
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c10
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h15
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c47
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c47
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c6
-rw-r--r--drivers/hid/hid-a4tech.c30
-rw-r--r--drivers/hid/hid-holtek-kbd.c9
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-logitech-dj.c10
-rw-r--r--drivers/hid/hid-logitech-hidpp.c32
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-sony.c15
-rw-r--r--drivers/hid/hid-tmff.c12
-rw-r--r--drivers/hid/usbhid/hiddev.c12
-rw-r--r--drivers/hid/wacom_wac.c12
-rw-r--r--drivers/hwmon/nct6775.c3
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/i2c/busses/i2c-at91-core.c2
-rw-r--r--drivers/i2c/busses/i2c-at91-master.c9
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c10
-rw-r--r--drivers/i2c/busses/i2c-nvidia-gpu.c2
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c1
-rw-r--r--drivers/infiniband/core/core_priv.h5
-rw-r--r--drivers/infiniband/core/counters.c11
-rw-r--r--drivers/infiniband/core/device.c102
-rw-r--r--drivers/infiniband/core/mad.c20
-rw-r--r--drivers/infiniband/core/user_mad.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c13
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c14
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h7
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c11
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c43
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c2
-rw-r--r--drivers/infiniband/hw/hns/Kconfig6
-rw-r--r--drivers/infiniband/hw/hns/Makefile8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c15
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c7
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c50
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c13
-rw-r--r--drivers/infiniband/hw/qedr/main.c10
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c1
-rw-r--r--drivers/infiniband/sw/siw/siw_qp.c6
-rw-r--r--drivers/iommu/amd_iommu_init.c90
-rw-r--r--drivers/iommu/amd_iommu_types.h9
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c4
-rw-r--r--drivers/iommu/intel-iommu.c189
-rw-r--r--drivers/iommu/iova.c23
-rw-r--r--drivers/iommu/virtio-iommu.c40
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c1
-rw-r--r--drivers/irqchip/irq-mbigen.c9
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c13
-rw-r--r--drivers/macintosh/smu.c1
-rw-r--r--drivers/md/bcache/super.c3
-rw-r--r--drivers/md/dm-table.c16
-rw-r--r--drivers/misc/eeprom/Kconfig3
-rw-r--r--drivers/misc/eeprom/at24.c2
-rw-r--r--drivers/misc/habanalabs/goya/goya.c6
-rw-r--r--drivers/misc/mei/hw-me-regs.h3
-rw-r--r--drivers/misc/mei/pci-me.c3
-rw-r--r--drivers/mmc/core/queue.c5
-rw-r--r--drivers/mmc/host/dw_mmc.c3
-rw-r--r--drivers/mmc/host/meson-mx-sdio.c2
-rw-r--r--drivers/mmc/host/sdhci-sprd.c1
-rw-r--r--drivers/mtd/hyperbus/Kconfig3
-rw-r--r--drivers/mtd/nand/onenand/onenand_base.c1
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c14
-rw-r--r--drivers/net/arcnet/arc-rimi.c3
-rw-r--r--drivers/net/arcnet/com20020-isa.c6
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/arcnet/com90xx.c3
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/flexcan.c39
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c9
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c49
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c29
-rw-r--r--drivers/net/dsa/qca8k.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c14
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c143
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c7
-rw-r--r--drivers/net/ethernet/8390/Kconfig4
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/apple/Kconfig4
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c2
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve.h8
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c28
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c31
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c87
-rw-r--r--drivers/net/ethernet/marvell/sky2.c7
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c76
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c17
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c4
-rw-r--r--drivers/net/ethernet/ni/Kconfig2
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig6
-rw-r--r--drivers/net/ethernet/packetengines/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c13
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c14
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/Kconfig2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c87
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c50
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c1
-rw-r--r--drivers/net/ethernet/xscale/Kconfig2
-rw-r--r--drivers/net/hamradio/baycom_epp.c3
-rw-r--r--drivers/net/phy/fixed_phy.c6
-rw-r--r--drivers/net/phy/mscc.c16
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/phy/phy_led_triggers.c3
-rw-r--r--drivers/net/phy/phylink.c10
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/ppp/pppox.c13
-rw-r--r--drivers/net/ppp/pptp.c3
-rw-r--r--drivers/net/tun.c9
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c12
-rw-r--r--drivers/net/wan/sdla.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c539
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c185
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c3
-rw-r--r--drivers/nfc/nfcmrvl/main.c4
-rw-r--r--drivers/nfc/nfcmrvl/uart.c4
-rw-r--r--drivers/nfc/nfcmrvl/usb.c1
-rw-r--r--drivers/nfc/st-nci/se.c2
-rw-r--r--drivers/nfc/st21nfca/se.c2
-rw-r--r--drivers/nvdimm/btt_devs.c16
-rw-r--r--drivers/nvdimm/bus.c210
-rw-r--r--drivers/nvdimm/core.c10
-rw-r--r--drivers/nvdimm/dimm_devs.c4
-rw-r--r--drivers/nvdimm/namespace_devs.c36
-rw-r--r--drivers/nvdimm/nd-core.h71
-rw-r--r--drivers/nvdimm/pfn_devs.c24
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvdimm/region.c24
-rw-r--r--drivers/nvdimm/region_devs.c12
-rw-r--r--drivers/nvme/host/core.c12
-rw-r--r--drivers/nvme/host/multipath.c8
-rw-r--r--drivers/nvme/host/nvme.h6
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/perf/arm_pmu.c2
-rw-r--r--drivers/platform/olpc/olpc-xo175-ec.c6
-rw-r--r--drivers/platform/x86/intel_pmc_core.c1
-rw-r--r--drivers/platform/x86/pcengines-apuv2.c6
-rw-r--r--drivers/powercap/intel_rapl_common.c2
-rw-r--r--drivers/powercap/powercap_sys.c2
-rw-r--r--drivers/regulator/axp20x-regulator.c10
-rw-r--r--drivers/regulator/lp87565-regulator.c8
-rw-r--r--drivers/regulator/of_regulator.c4
-rw-r--r--drivers/s390/block/dasd_alias.c22
-rw-r--r--drivers/s390/char/con3215.c1
-rw-r--r--drivers/s390/char/tape_core.c3
-rw-r--r--drivers/s390/cio/qdio_main.c24
-rw-r--r--drivers/s390/cio/vfio_ccw_async.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c28
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c2
-rw-r--r--drivers/s390/crypto/ap_queue.c1
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c17
-rw-r--r--drivers/s390/virtio/virtio_ccw.c4
-rw-r--r--drivers/scsi/Kconfig4
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c7
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c140
-rw-r--r--drivers/scsi/hpsa.c18
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c27
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/soc/fsl/qe/qe.c2
-rw-r--r--drivers/spi/spi-bcm2835.c3
-rw-r--r--drivers/spi/spi-fsl-qspi.c2
-rw-r--r--drivers/spi/spi-gpio.c6
-rw-r--r--drivers/spi/spi-pxa2xx.c14
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c8
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_main.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c4
-rw-r--r--drivers/tty/hvc/hvcs.c2
-rw-r--r--drivers/tty/serial/Kconfig19
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/netx-serial.c733
-rw-r--r--drivers/tty/tty_ldsem.c5
-rw-r--r--drivers/tty/vt/vt.c6
-rw-r--r--drivers/usb/core/hcd.c4
-rw-r--r--drivers/usb/host/ehci-pci.c4
-rw-r--r--drivers/usb/host/hwa-hc.c2
-rw-r--r--drivers/usb/host/ohci-pci.c2
-rw-r--r--drivers/usb/host/pci-quirks.c45
-rw-r--r--drivers/usb/host/pci-quirks.h2
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/usb251xb.c15
-rw-r--r--drivers/usb/storage/scsiglue.c11
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/privcmd.c35
-rw-r--r--drivers/xen/swiotlb-xen.c34
-rw-r--r--drivers/xen/xen-pciback/conf_space_capability.c3
-rw-r--r--drivers/xen/xlate_mmu.c32
-rw-r--r--fs/afs/fsclient.c51
-rw-r--r--fs/afs/yfsclient.c54
-rw-r--r--fs/block_dev.c142
-rw-r--r--fs/btrfs/backref.c2
-rw-r--r--fs/btrfs/locking.c9
-rw-r--r--fs/btrfs/ordered-data.c11
-rw-r--r--fs/btrfs/send.c77
-rw-r--r--fs/btrfs/transaction.c32
-rw-r--r--fs/btrfs/transaction.h3
-rw-r--r--fs/compat_ioctl.c3
-rw-r--r--fs/coredump.c44
-rw-r--r--fs/dax.c2
-rw-r--r--fs/exec.c2
-rw-r--r--fs/f2fs/file.c63
-rw-r--r--fs/f2fs/gc.c70
-rw-r--r--fs/f2fs/super.c48
-rw-r--r--fs/gfs2/bmap.c15
-rw-r--r--fs/io_uring.c82
-rw-r--r--fs/iomap/Makefile2
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/ocfs2/xattr.c3
-rw-r--r--fs/open.c19
-rw-r--r--fs/super.c5
-rw-r--r--fs/xfs/scrub/dabtree.c6
-rw-r--r--fs/xfs/xfs_itable.c3
-rw-r--r--include/asm-generic/futex.h21
-rw-r--r--include/asm-generic/getorder.h50
-rw-r--r--include/drm/drm_client.h2
-rw-r--r--include/drm/drm_mode_config.h7
-rw-r--r--include/linux/blk-cgroup.h1
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/clk.h1
-rw-r--r--include/linux/cred.h8
-rw-r--r--include/linux/device.h6
-rw-r--r--include/linux/dim.h56
-rw-r--r--include/linux/dma-mapping.h4
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/filter.h13
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/gpio/consumer.h64
-rw-r--r--include/linux/hmm.h54
-rw-r--r--include/linux/if_pppox.h3
-rw-r--r--include/linux/if_rmnet.h4
-rw-r--r--include/linux/iova.h6
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h6
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/of.h2
-rw-r--r--include/linux/page-flags-layout.h18
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sched/numa_balancing.h4
-rw-r--r--include/linux/skmsg.h8
-rw-r--r--include/linux/wait.h13
-rw-r--r--include/net/cfg80211.h15
-rw-r--r--include/net/tc_act/tc_police.h4
-rw-r--r--include/net/tc_act/tc_sample.h2
-rw-r--r--include/net/tcp.h3
-rw-r--r--include/net/tls.h13
-rw-r--r--include/rdma/ib_verbs.h4
-rw-r--r--include/rdma/rdmavt_qp.h9
-rw-r--r--include/scsi/libfc.h52
-rw-r--r--include/scsi/libfcoe.h3
-rw-r--r--include/soc/fsl/qe/qe.h2
-rw-r--r--include/sound/compress_driver.h5
-rw-r--r--include/sound/sof/control.h2
-rw-r--r--include/sound/sof/dai-intel.h2
-rw-r--r--include/sound/sof/dai.h2
-rw-r--r--include/sound/sof/header.h2
-rw-r--r--include/sound/sof/info.h2
-rw-r--r--include/sound/sof/pm.h2
-rw-r--r--include/sound/sof/stream.h2
-rw-r--r--include/sound/sof/topology.h2
-rw-r--r--include/sound/sof/trace.h2
-rw-r--r--include/sound/sof/xtensa.h2
-rw-r--r--include/trace/events/dma_fence.h2
-rw-r--r--include/trace/events/napi.h4
-rw-r--r--include/trace/events/qdisc.h4
-rw-r--r--include/trace/events/tegra_apb_dma.h4
-rw-r--r--include/uapi/linux/bpfilter.h2
-rw-r--r--include/uapi/linux/ipmi_bmc.h2
-rw-r--r--include/uapi/linux/isst_if.h2
-rw-r--r--include/uapi/linux/kvm.h4
-rw-r--r--include/uapi/linux/netfilter/nf_synproxy.h2
-rw-r--r--include/uapi/linux/netfilter/xt_connlabel.h6
-rw-r--r--include/uapi/linux/psp-sev.h2
-rw-r--r--include/uapi/linux/rxrpc.h2
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/socket.h19
-rw-r--r--include/uapi/linux/usb/g_uvc.h2
-rw-r--r--include/uapi/linux/vbox_vmmdev_types.h2
-rw-r--r--include/uapi/linux/vboxguest.h2
-rw-r--r--include/uapi/linux/virtio_iommu.h32
-rw-r--r--include/uapi/linux/virtio_pmem.h2
-rw-r--r--include/uapi/linux/vmcore.h2
-rw-r--r--include/uapi/linux/wmi.h2
-rw-r--r--include/uapi/misc/fastrpc.h2
-rw-r--r--include/uapi/rdma/rvt-abi.h2
-rw-r--r--include/uapi/rdma/siw-abi.h2
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h2
-rw-r--r--include/uapi/sound/skl-tplg-interface.h2
-rw-r--r--include/xen/xen-ops.h3
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/bpf/verifier.c4
-rw-r--r--kernel/cred.c21
-rw-r--r--kernel/dma/contiguous.c8
-rw-r--r--kernel/dma/mapping.c13
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/locking/lockdep.c13
-rw-r--r--kernel/locking/lockdep_proc.c3
-rw-r--r--kernel/locking/mutex.c11
-rw-r--r--kernel/locking/rwsem.c28
-rw-r--r--kernel/sched/fair.c144
-rw-r--r--kernel/signal.c3
-rw-r--r--kernel/trace/trace_functions_graph.c17
-rw-r--r--lib/Kconfig.kasan11
-rw-r--r--lib/Makefile3
-rw-r--r--lib/dim/dim.c4
-rw-r--r--lib/dim/net_dim.c56
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/test_firmware.c5
-rw-r--r--lib/test_meminit.c2
-rw-r--r--lib/vdso/gettimeofday.c79
-rw-r--r--mm/Makefile1
-rw-r--r--mm/balloon_compaction.c69
-rw-r--r--mm/compaction.c11
-rw-r--r--mm/hmm.c10
-rw-r--r--mm/kmemleak.c2
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/memremap.c (renamed from kernel/memremap.c)0
-rw-r--r--mm/migrate.c21
-rw-r--r--mm/slub.c8
-rw-r--r--mm/vmalloc.c9
-rw-r--r--mm/vmscan.c9
-rw-r--r--net/bridge/br.c5
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/bridge/br_private.h9
-rw-r--r--net/bridge/br_vlan.c29
-rw-r--r--net/bridge/netfilter/ebtables.c32
-rw-r--r--net/bridge/netfilter/nft_meta_bridge.c10
-rw-r--r--net/can/gw.c48
-rw-r--r--net/core/dev.c17
-rw-r--r--net/core/filter.c6
-rw-r--r--net/core/skmsg.c4
-rw-r--r--net/core/sock_map.c19
-rw-r--r--net/dsa/tag_sja1105.c12
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/ipip.c3
-rw-r--r--net/ipv4/tcp_ulp.c13
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ip6_tunnel.c6
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/iucv/af_iucv.c14
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/mlme.c10
-rw-r--r--net/mac80211/util.c7
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c2
-rw-r--r--net/netfilter/ipset/ip_set_core.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c6
-rw-r--r--net/netfilter/nft_meta.c16
-rw-r--r--net/netrom/af_netrom.c1
-rw-r--r--net/openvswitch/datapath.c15
-rw-r--r--net/rds/rdma_transport.c5
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/peer_event.c2
-rw-r--r--net/rxrpc/peer_object.c18
-rw-r--r--net/rxrpc/sendmsg.c1
-rw-r--r--net/sched/act_bpf.c9
-rw-r--r--net/sched/act_connmark.c9
-rw-r--r--net/sched/act_csum.c9
-rw-r--r--net/sched/act_ct.c9
-rw-r--r--net/sched/act_ctinfo.c9
-rw-r--r--net/sched/act_gact.c8
-rw-r--r--net/sched/act_ife.c13
-rw-r--r--net/sched/act_mirred.c13
-rw-r--r--net/sched/act_mpls.c8
-rw-r--r--net/sched/act_nat.c9
-rw-r--r--net/sched/act_pedit.c10
-rw-r--r--net/sched/act_police.c8
-rw-r--r--net/sched/act_sample.c10
-rw-r--r--net/sched/act_simple.c10
-rw-r--r--net/sched/act_skbedit.c11
-rw-r--r--net/sched/act_skbmod.c11
-rw-r--r--net/sched/act_tunnel_key.c8
-rw-r--r--net/sched/act_vlan.c25
-rw-r--r--net/sched/sch_codel.c6
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/smc/af_smc.c15
-rw-r--r--net/tipc/netlink_compat.c11
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/tls/tls_main.c97
-rw-r--r--net/tls/tls_sw.c83
-rw-r--r--net/vmw_vsock/hyperv_transport.c8
-rw-r--r--net/wireless/core.c6
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/util.c27
-rw-r--r--samples/vfio-mdev/mdpy-defs.h2
-rw-r--r--scripts/Kbuild.include3
-rw-r--r--scripts/Kconfig.include2
-rw-r--r--scripts/Makefile.lib2
-rw-r--r--scripts/Makefile.modpost82
-rwxr-xr-xscripts/gen_compile_commands.py4
-rwxr-xr-xscripts/headers_install.sh6
-rw-r--r--scripts/kconfig/confdata.c4
-rwxr-xr-xscripts/link-vmlinux.sh2
-rwxr-xr-xscripts/sphinx-pre-install118
-rw-r--r--security/Kconfig.hardening7
-rw-r--r--security/selinux/ss/policydb.c6
-rw-r--r--security/selinux/ss/sidtab.c5
-rw-r--r--sound/ac97/bus.c13
-rw-r--r--sound/core/compress_offload.c60
-rw-r--r--sound/core/pcm_native.c12
-rw-r--r--sound/hda/hdac_i915.c10
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/usb/helper.c2
-rw-r--r--sound/usb/line6/podhd.c2
-rw-r--r--sound/usb/line6/variax.c2
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h12
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h10
-rw-r--r--tools/arch/powerpc/include/uapi/asm/mman.h4
-rw-r--r--tools/arch/sparc/include/uapi/asm/mman.h4
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h22
-rw-r--r--tools/arch/x86/include/uapi/asm/vmx.h1
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h15
-rw-r--r--tools/include/uapi/asm-generic/mman.h10
-rw-r--r--tools/include/uapi/asm-generic/unistd.h8
-rw-r--r--tools/include/uapi/drm/drm.h1
-rw-r--r--tools/include/uapi/drm/i915_drm.h209
-rw-r--r--tools/include/uapi/linux/if_link.h5
-rw-r--r--tools/include/uapi/linux/kvm.h7
-rw-r--r--tools/include/uapi/linux/sched.h30
-rw-r--r--tools/include/uapi/linux/usbdevice_fs.h26
-rw-r--r--tools/lib/bpf/btf.c5
-rw-r--r--tools/lib/bpf/hashmap.h5
-rw-r--r--tools/lib/bpf/libbpf.c34
-rw-r--r--tools/lib/bpf/xsk.c11
-rw-r--r--tools/objtool/check.c7
-rw-r--r--tools/objtool/check.h3
-rw-r--r--tools/perf/Documentation/perf-script.txt8
-rw-r--r--tools/perf/Documentation/perf.data-file-format.txt2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--tools/perf/builtin-probe.c10
-rw-r--r--tools/perf/builtin-script.c2
-rw-r--r--tools/perf/builtin-stat.c9
-rwxr-xr-xtools/perf/trace/beauty/usbdevfs_ioctl.sh9
-rw-r--r--tools/perf/util/evsel.c2
-rw-r--r--tools/perf/util/header.c11
-rw-r--r--tools/perf/util/probe-event.c1
-rw-r--r--tools/perf/util/session.c22
-rw-r--r--tools/perf/util/session.h1
-rw-r--r--tools/perf/util/stat-shadow.c3
-rw-r--r--tools/perf/util/zstd.c4
-rw-r--r--tools/scripts/Makefile.include9
-rwxr-xr-xtools/testing/ktest/config-bisect.pl4
-rw-r--r--tools/testing/selftests/bpf/Makefile3
-rw-r--r--tools/testing/selftests/bpf/progs/sendmsg6_prog.c3
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan.sh57
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh9
-rwxr-xr-xtools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh9
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c11
-rw-r--r--tools/testing/selftests/cgroup/cgroup_util.c3
-rwxr-xr-xtools/testing/selftests/kmod/kmod.sh6
-rw-r--r--tools/testing/selftests/kselftest.h15
-rw-r--r--tools/testing/selftests/livepatch/functions.sh46
-rw-r--r--tools/testing/selftests/net/.gitignore4
-rwxr-xr-xtools/testing/selftests/net/forwarding/gre_multipath.sh28
-rw-r--r--tools/testing/selftests/net/tls.c223
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c6
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json94
-rw-r--r--tools/testing/selftests/x86/test_vsyscall.c2
-rw-r--r--usr/include/Makefile4
-rw-r--r--virt/kvm/arm/arm.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c2
-rw-r--r--virt/kvm/arm/vgic/vgic.h4
1012 files changed, 9587 insertions, 7699 deletions
diff --git a/.gitignore b/.gitignore
index 8f5422cba6e2..2030c7a4d2f8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -142,3 +142,6 @@ x509.genkey
# Kdevelop4
*.kdev4
+
+# Clang's compilation database file
+/compile_commands.json
diff --git a/.mailmap b/.mailmap
index 0fef932de3db..acba1a6163f1 100644
--- a/.mailmap
+++ b/.mailmap
@@ -98,6 +98,7 @@ Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com>
Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com>
<javier@osg.samsung.com> <javier.martinez@collabora.co.uk>
Jean Tourrilhes <jt@hpl.hp.com>
+<jean-philippe@linaro.org> <jean-philippe.brucker@arm.com>
Jeff Garzik <jgarzik@pretzel.yyz.us>
Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
Jeff Layton <jlayton@kernel.org> <jlayton@poochiereds.net>
@@ -116,6 +117,7 @@ John Stultz <johnstul@us.ibm.com>
Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
+Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
Kay Sievers <kay.sievers@vrfy.org>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
@@ -132,6 +134,7 @@ Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
+Marc Zyngier <maz@kernel.org> <marc.zyngier@arm.com>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
Mark Brown <broonie@sirena.org.uk>
Mark Yao <markyao0591@gmail.com> <mark.yao@rock-chips.com>
diff --git a/Documentation/PCI/pci-error-recovery.rst b/Documentation/PCI/pci-error-recovery.rst
index 7e30f43a9659..13beee23cb04 100644
--- a/Documentation/PCI/pci-error-recovery.rst
+++ b/Documentation/PCI/pci-error-recovery.rst
@@ -403,7 +403,7 @@ That is, the recovery API only requires that:
.. note::
Implementation details for the powerpc platform are discussed in
- the file Documentation/powerpc/eeh-pci-error-recovery.txt
+ the file Documentation/powerpc/eeh-pci-error-recovery.rst
As of this writing, there is a growing list of device drivers with
patches implementing error recovery. Not all of these patches are in
@@ -421,3 +421,6 @@ That is, the recovery API only requires that:
- drivers/net/ixgbe
- drivers/net/cxgb3
- drivers/net/s2io.c
+
+The End
+-------
diff --git a/Documentation/RCU/rculist_nulls.txt b/Documentation/RCU/rculist_nulls.txt
index 8151f0195f76..23f115dc87cf 100644
--- a/Documentation/RCU/rculist_nulls.txt
+++ b/Documentation/RCU/rculist_nulls.txt
@@ -1,7 +1,7 @@
Using hlist_nulls to protect read-mostly linked lists and
objects using SLAB_TYPESAFE_BY_RCU allocations.
-Please read the basics in Documentation/RCU/listRCU.txt
+Please read the basics in Documentation/RCU/listRCU.rst
Using special makers (called 'nulls') is a convenient way
to solve following problem :
diff --git a/Documentation/admin-guide/conf.py b/Documentation/admin-guide/conf.py
deleted file mode 100644
index 86f738953799..000000000000
--- a/Documentation/admin-guide/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = 'Linux Kernel User Documentation'
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'linux-user.tex', 'Linux Kernel User Documentation',
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index 25f3b2532198..e05e581af5cf 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -41,10 +41,11 @@ Related CVEs
The following CVE entries describe Spectre variants:
- ============= ======================= =================
+ ============= ======================= ==========================
CVE-2017-5753 Bounds check bypass Spectre variant 1
CVE-2017-5715 Branch target injection Spectre variant 2
- ============= ======================= =================
+ CVE-2019-1125 Spectre v1 swapgs Spectre variant 1 (swapgs)
+ ============= ======================= ==========================
Problem
-------
@@ -78,6 +79,13 @@ There are some extensions of Spectre variant 1 attacks for reading data
over the network, see :ref:`[12] <spec_ref12>`. However such attacks
are difficult, low bandwidth, fragile, and are considered low risk.
+Note that, despite "Bounds Check Bypass" name, Spectre variant 1 is not
+only about user-controlled array bounds checks. It can affect any
+conditional checks. The kernel entry code interrupt, exception, and NMI
+handlers all have conditional swapgs checks. Those may be problematic
+in the context of Spectre v1, as kernel code can speculatively run with
+a user GS.
+
Spectre variant 2 (Branch Target Injection)
-------------------------------------------
@@ -132,6 +140,9 @@ not cover all possible attack vectors.
1. A user process attacking the kernel
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Spectre variant 1
+~~~~~~~~~~~~~~~~~
+
The attacker passes a parameter to the kernel via a register or
via a known address in memory during a syscall. Such parameter may
be used later by the kernel as an index to an array or to derive
@@ -144,7 +155,40 @@ not cover all possible attack vectors.
potentially be influenced for Spectre attacks, new "nospec" accessor
macros are used to prevent speculative loading of data.
- Spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
+Spectre variant 1 (swapgs)
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ An attacker can train the branch predictor to speculatively skip the
+ swapgs path for an interrupt or exception. If they initialize
+ the GS register to a user-space value, if the swapgs is speculatively
+ skipped, subsequent GS-related percpu accesses in the speculation
+ window will be done with the attacker-controlled GS value. This
+ could cause privileged memory to be accessed and leaked.
+
+ For example:
+
+ ::
+
+ if (coming from user space)
+ swapgs
+ mov %gs:<percpu_offset>, %reg
+ mov (%reg), %reg1
+
+ When coming from user space, the CPU can speculatively skip the
+ swapgs, and then do a speculative percpu load using the user GS
+ value. So the user can speculatively force a read of any kernel
+ value. If a gadget exists which uses the percpu value as an address
+ in another load/store, then the contents of the kernel value may
+ become visible via an L1 side channel attack.
+
+ A similar attack exists when coming from kernel space. The CPU can
+ speculatively do the swapgs, causing the user GS to get used for the
+ rest of the speculative window.
+
+Spectre variant 2
+~~~~~~~~~~~~~~~~~
+
+ A spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
target buffer (BTB) before issuing syscall to launch an attack.
After entering the kernel, the kernel could use the poisoned branch
target buffer on indirect jump and jump to gadget code in speculative
@@ -280,11 +324,18 @@ The sysfs file showing Spectre variant 1 mitigation status is:
The possible values in this file are:
- ======================================= =================================
- 'Mitigation: __user pointer sanitation' Protection in kernel on a case by
- case base with explicit pointer
- sanitation.
- ======================================= =================================
+ .. list-table::
+
+ * - 'Not affected'
+ - The processor is not vulnerable.
+ * - 'Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers'
+ - The swapgs protections are disabled; otherwise it has
+ protection in the kernel on a case by case base with explicit
+ pointer sanitation and usercopy LFENCE barriers.
+ * - 'Mitigation: usercopy/swapgs barriers and __user pointer sanitization'
+ - Protection in the kernel on a case by case base with explicit
+ pointer sanitation, usercopy LFENCE barriers, and swapgs LFENCE
+ barriers.
However, the protections are put in place on a case by case basis,
and there is no guarantee that all possible attack vectors for Spectre
@@ -366,12 +417,27 @@ Turning on mitigation for Spectre variant 1 and Spectre variant 2
1. Kernel mitigation
^^^^^^^^^^^^^^^^^^^^
+Spectre variant 1
+~~~~~~~~~~~~~~~~~
+
For the Spectre variant 1, vulnerable kernel code (as determined
by code audit or scanning tools) is annotated on a case by case
basis to use nospec accessor macros for bounds clipping :ref:`[2]
<spec_ref2>` to avoid any usable disclosure gadgets. However, it may
not cover all attack vectors for Spectre variant 1.
+ Copy-from-user code has an LFENCE barrier to prevent the access_ok()
+ check from being mis-speculated. The barrier is done by the
+ barrier_nospec() macro.
+
+ For the swapgs variant of Spectre variant 1, LFENCE barriers are
+ added to interrupt, exception and NMI entry where needed. These
+ barriers are done by the FENCE_SWAPGS_KERNEL_ENTRY and
+ FENCE_SWAPGS_USER_ENTRY macros.
+
+Spectre variant 2
+~~~~~~~~~~~~~~~~~
+
For Spectre variant 2 mitigation, the compiler turns indirect calls or
jumps in the kernel into equivalent return trampolines (retpolines)
:ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
@@ -473,6 +539,12 @@ Mitigation control on the kernel command line
Spectre variant 2 mitigation can be disabled or force enabled at the
kernel command line.
+ nospectre_v1
+
+ [X86,PPC] Disable mitigations for Spectre Variant 1
+ (bounds check bypass). With this option data leaks are
+ possible in the system.
+
nospectre_v2
[X86] Disable all mitigations for the Spectre variant 2
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 46b826fcb5ad..47d981a86e2f 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2545,7 +2545,7 @@
mem_encrypt=on: Activate SME
mem_encrypt=off: Do not activate SME
- Refer to Documentation/virtual/kvm/amd-memory-encryption.rst
+ Refer to Documentation/virt/kvm/amd-memory-encryption.rst
for details on when memory encryption can be activated.
mem_sleep_default= [SUSPEND] Default system suspend mode:
@@ -2604,7 +2604,7 @@
expose users to several CPU vulnerabilities.
Equivalent to: nopti [X86,PPC]
kpti=0 [ARM64]
- nospectre_v1 [PPC]
+ nospectre_v1 [X86,PPC]
nobp=0 [S390]
nospectre_v2 [X86,PPC,S390,ARM64]
spectre_v2_user=off [X86]
@@ -2965,9 +2965,9 @@
nosmt=force: Force disable SMT, cannot be undone
via the sysfs control file.
- nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
- check bypass). With this option data leaks are possible
- in the system.
+ nospectre_v1 [X86,PPC] Disable mitigations for Spectre Variant 1
+ (bounds check bypass). With this option data leaks are
+ possible in the system.
nospectre_v2 [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
the Spectre variant 2 (indirect branch prediction)
diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
index 7ab93a8404b9..bd5714547cee 100644
--- a/Documentation/admin-guide/mm/transhuge.rst
+++ b/Documentation/admin-guide/mm/transhuge.rst
@@ -53,7 +53,7 @@ disabled, there is ``khugepaged`` daemon that scans memory and
collapses sequences of basic pages into huge pages.
The THP behaviour is controlled via :ref:`sysfs <thp_sysfs>`
-interface and using madivse(2) and prctl(2) system calls.
+interface and using madvise(2) and prctl(2) system calls.
Transparent Hugepage Support maximizes the usefulness of free memory
if compared to the reservation approach of hugetlbfs by allowing all
diff --git a/Documentation/conf.py b/Documentation/conf.py
index 3b2397bcb565..a8fe845832bc 100644
--- a/Documentation/conf.py
+++ b/Documentation/conf.py
@@ -16,6 +16,8 @@ import sys
import os
import sphinx
+from subprocess import check_output
+
# Get Sphinx version
major, minor, patch = sphinx.version_info[:3]
@@ -276,10 +278,21 @@ latex_elements = {
\\setsansfont{DejaVu Sans}
\\setromanfont{DejaVu Serif}
\\setmonofont{DejaVu Sans Mono}
-
'''
}
+# At least one book (translations) may have Asian characters
+# with are only displayed if xeCJK is used
+
+cjk_cmd = check_output(['fc-list', '--format="%{family[0]}\n"']).decode('utf-8', 'ignore')
+if cjk_cmd.find("Noto Sans CJK SC") >= 0:
+ print ("enabling CJK for LaTeX builder")
+ latex_elements['preamble'] += '''
+ % This is needed for translations
+ \\usepackage{xeCJK}
+ \\setCJKmainfont{Noto Sans CJK SC}
+ '''
+
# Fix reference escape troubles with Sphinx 1.4.x
if major == 1 and minor > 3:
latex_elements['preamble'] += '\\renewcommand*{\\DUrole}[2]{ #2 }\n'
@@ -410,6 +423,21 @@ latex_documents = [
'The kernel development community', 'manual'),
]
+# Add all other index files from Documentation/ subdirectories
+for fn in os.listdir('.'):
+ doc = os.path.join(fn, "index")
+ if os.path.exists(doc + ".rst"):
+ has = False
+ for l in latex_documents:
+ if l[0] == doc:
+ has = True
+ break
+ if not has:
+ latex_documents.append((doc, fn + '.tex',
+ 'Linux %s Documentation' % fn.capitalize(),
+ 'The kernel development community',
+ 'manual'))
+
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
diff --git a/Documentation/core-api/conf.py b/Documentation/core-api/conf.py
deleted file mode 100644
index db1f7659f3da..000000000000
--- a/Documentation/core-api/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "Core-API Documentation"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'core-api.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/crypto/conf.py b/Documentation/crypto/conf.py
deleted file mode 100644
index 4335d251ddf3..000000000000
--- a/Documentation/crypto/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = 'Linux Kernel Crypto API'
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'crypto-api.tex', 'Linux Kernel Crypto API manual',
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/dev-tools/conf.py b/Documentation/dev-tools/conf.py
deleted file mode 100644
index 7faafa3f7888..000000000000
--- a/Documentation/dev-tools/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "Development tools for the kernel"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'dev-tools.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/devicetree/bindings/arm/idle-states.txt b/Documentation/devicetree/bindings/arm/idle-states.txt
index 326f29b270ad..2d325bed37e5 100644
--- a/Documentation/devicetree/bindings/arm/idle-states.txt
+++ b/Documentation/devicetree/bindings/arm/idle-states.txt
@@ -703,4 +703,4 @@ cpus {
https://www.devicetree.org/specifications/
[6] ARM Linux Kernel documentation - Booting AArch64 Linux
- Documentation/arm64/booting.txt
+ Documentation/arm64/booting.rst
diff --git a/Documentation/devicetree/bindings/arm/renesas.yaml b/Documentation/devicetree/bindings/arm/renesas.yaml
index 08c923f8c257..28eb458f761a 100644
--- a/Documentation/devicetree/bindings/arm/renesas.yaml
+++ b/Documentation/devicetree/bindings/arm/renesas.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/shmobile.yaml#
+$id: http://devicetree.org/schemas/arm/renesas.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Renesas SH-Mobile, R-Mobile, and R-Car Platform Device Tree Bindings
diff --git a/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml
index aae53fc3cb1e..2bd519d2e855 100644
--- a/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml
+++ b/Documentation/devicetree/bindings/arm/socionext/milbeaut.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/milbeaut.yaml#
+$id: http://devicetree.org/schemas/arm/socionext/milbeaut.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Milbeaut platforms device tree bindings
diff --git a/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml b/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml
index 4326d2cfa15d..a8765ba29476 100644
--- a/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml
+++ b/Documentation/devicetree/bindings/arm/ti/ti,davinci.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/arm/ti/davinci.yaml#
+$id: http://devicetree.org/schemas/arm/ti/ti,davinci.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Texas Instruments DaVinci Platforms Device Tree Bindings
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
index c935405458fe..fa4d143a73de 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/phy/allwinner,sun4i-a10-ccu.yaml#
+$id: http://devicetree.org/schemas/clock/allwinner,sun4i-a10-ccu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner Clock Control Unit Device Tree Bindings
diff --git a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
index 8cb136c376fb..4f0db8ee226a 100644
--- a/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
+++ b/Documentation/devicetree/bindings/firmware/intel,ixp4xx-network-processing-engine.yaml
@@ -2,7 +2,7 @@
# Copyright 2019 Linaro Ltd.
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/firmware/intel-ixp4xx-network-processing-engine.yaml#"
+$id: "http://devicetree.org/schemas/firmware/intel,ixp4xx-network-processing-engine.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Intel IXP4xx Network Processing Engine
diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
index 7ba167e2e1ea..c602b6fe1c0c 100644
--- a/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/iio/accelerometers/adi,adxl345.yaml#
+$id: http://devicetree.org/schemas/iio/accel/adi,adxl345.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices ADXL345/ADXL375 3-Axis Digital Accelerometers
diff --git a/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml b/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml
index a7fafb9bf5c6..e7daffec88d3 100644
--- a/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml
+++ b/Documentation/devicetree/bindings/iio/accel/adi,adxl372.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/iio/accelerometers/adi,adxl372.yaml#
+$id: http://devicetree.org/schemas/iio/accel/adi,adxl372.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analog Devices ADXL372 3-Axis, +/-(200g) Digital Accelerometer
diff --git a/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt b/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
index 4e82fd575cec..c676b03c752e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/amazon,al-fic.txt
@@ -5,21 +5,19 @@ Required properties:
- compatible: should be "amazon,al-fic"
- reg: physical base address and size of the registers
- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: must be 2.
- First cell defines the index of the interrupt within the controller.
- Second cell is used to specify the trigger type and must be one of the
- following:
- - bits[3:0] trigger type and level flags
- 1 = low-to-high edge triggered
- 4 = active high level-sensitive
-- interrupt-parent: specifies the parent interrupt controller.
+- #interrupt-cells : must be 2. Specifies the number of cells needed to encode
+ an interrupt source. Supported trigger types are low-to-high edge
+ triggered and active high level-sensitive.
- interrupts: describes which input line in the interrupt parent, this
fic's output is connected to. This field property depends on the parent's
binding
+Please refer to interrupts.txt in this directory for details of the common
+Interrupt Controllers bindings used by client devices.
+
Example:
-amazon_fic: interrupt-controller@0xfd8a8500 {
+amazon_fic: interrupt-controller@fd8a8500 {
compatible = "amazon,al-fic";
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
index bae10e261fa9..507c141ea760 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/intel,ixp4xx-interrupt.yaml
@@ -2,7 +2,7 @@
# Copyright 2018 Linaro Ltd.
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/interrupt/intel-ixp4xx-interrupt.yaml#"
+$id: "http://devicetree.org/schemas/interrupt-controller/intel,ixp4xx-interrupt.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Intel IXP4xx XScale Networking Processors Interrupt Controller
diff --git a/Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml b/Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml
index d2313b1d9405..0ea21a6f70b4 100644
--- a/Documentation/devicetree/bindings/misc/intel,ixp4xx-queue-manager.yaml
+++ b/Documentation/devicetree/bindings/misc/intel,ixp4xx-ahb-queue-manager.yaml
@@ -2,7 +2,7 @@
# Copyright 2019 Linaro Ltd.
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/misc/intel-ixp4xx-ahb-queue-manager.yaml#"
+$id: "http://devicetree.org/schemas/misc/intel,ixp4xx-ahb-queue-manager.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Intel IXP4xx AHB Queue Manager
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
index d4084c149768..3fb0714e761e 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/net/allwinner,sun8i-a83t-gmac.yaml#
+$id: http://devicetree.org/schemas/net/allwinner,sun8i-a83t-emac.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner A83t EMAC Device Tree Bindings
diff --git a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
index c9efd6e2c134..1084e9d2917d 100644
--- a/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
+++ b/Documentation/devicetree/bindings/nvmem/allwinner,sun4i-a10-sid.yaml
@@ -37,13 +37,13 @@ required:
examples:
- |
- sid@1c23800 {
+ efuse@1c23800 {
compatible = "allwinner,sun4i-a10-sid";
reg = <0x01c23800 0x10>;
};
- |
- sid@1c23800 {
+ efuse@1c23800 {
compatible = "allwinner,sun7i-a20-sid";
reg = <0x01c23800 0x200>;
};
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml b/Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml
new file mode 100644
index 000000000000..b7c00ed31085
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/nvmem-consumer.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/nvmem-consumer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVMEM (Non Volatile Memory) Consumer Device Tree Bindings
+
+maintainers:
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+select: true
+
+properties:
+ nvmem:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ List of phandle to the nvmem providers.
+
+ nvmem-cells:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description:
+ List of phandle to the nvmem data cells.
+
+ nvmem-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ Names for the each nvmem provider.
+
+ nvmem-cell-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ Names for each nvmem-cells specified.
+
+dependencies:
+ nvmem-names: [ nvmem ]
+ nvmem-cell-names: [ nvmem-cells ]
+
+examples:
+ - |
+ tsens {
+ /* ... */
+ nvmem-cells = <&tsens_calibration>;
+ nvmem-cell-names = "calibration";
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.txt b/Documentation/devicetree/bindings/nvmem/nvmem.txt
index fd06c09b822b..46a7ef485e24 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.txt
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.txt
@@ -1,80 +1 @@
-= NVMEM(Non Volatile Memory) Data Device Tree Bindings =
-
-This binding is intended to represent the location of hardware
-configuration data stored in NVMEMs like eeprom, efuses and so on.
-
-On a significant proportion of boards, the manufacturer has stored
-some data on NVMEM, for the OS to be able to retrieve these information
-and act upon it. Obviously, the OS has to know about where to retrieve
-these data from, and where they are stored on the storage device.
-
-This document is here to document this.
-
-= Data providers =
-Contains bindings specific to provider drivers and data cells as children
-of this node.
-
-Optional properties:
- read-only: Mark the provider as read only.
-
-= Data cells =
-These are the child nodes of the provider which contain data cell
-information like offset and size in nvmem provider.
-
-Required properties:
-reg: specifies the offset in byte within the storage device.
-
-Optional properties:
-
-bits: Is pair of bit location and number of bits, which specifies offset
- in bit and number of bits within the address range specified by reg property.
- Offset takes values from 0-7.
-
-For example:
-
- /* Provider */
- qfprom: qfprom@700000 {
- ...
-
- /* Data cells */
- tsens_calibration: calib@404 {
- reg = <0x404 0x10>;
- };
-
- tsens_calibration_bckp: calib_bckp@504 {
- reg = <0x504 0x11>;
- bits = <6 128>
- };
-
- pvs_version: pvs-version@6 {
- reg = <0x6 0x2>
- bits = <7 2>
- };
-
- speed_bin: speed-bin@c{
- reg = <0xc 0x1>;
- bits = <2 3>;
-
- };
- ...
- };
-
-= Data consumers =
-Are device nodes which consume nvmem data cells/providers.
-
-Required-properties:
-nvmem-cells: list of phandle to the nvmem data cells.
-nvmem-cell-names: names for the each nvmem-cells specified. Required if
- nvmem-cells is used.
-
-Optional-properties:
-nvmem : list of phandles to nvmem providers.
-nvmem-names: names for the each nvmem provider. required if nvmem is used.
-
-For example:
-
- tsens {
- ...
- nvmem-cells = <&tsens_calibration>;
- nvmem-cell-names = "calibration";
- };
+This file has been moved to nvmem.yaml and nvmem-consumer.yaml.
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
new file mode 100644
index 000000000000..1c75a059206c
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/nvmem.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVMEM (Non Volatile Memory) Device Tree Bindings
+
+maintainers:
+ - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+
+description: |
+ This binding is intended to represent the location of hardware
+ configuration data stored in NVMEMs like eeprom, efuses and so on.
+
+ On a significant proportion of boards, the manufacturer has stored
+ some data on NVMEM, for the OS to be able to retrieve these
+ information and act upon it. Obviously, the OS has to know about
+ where to retrieve these data from, and where they are stored on the
+ storage device.
+
+properties:
+ $nodename:
+ pattern: "^(eeprom|efuse|nvram)(@.*|-[0-9a-f])*$"
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ read-only:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ Mark the provider as read only.
+
+patternProperties:
+ "^.*@[0-9a-f]+$":
+ type: object
+
+ properties:
+ reg:
+ maxItems: 1
+ description:
+ Offset and size in bytes within the storage device.
+
+ bits:
+ maxItems: 1
+ items:
+ items:
+ - minimum: 0
+ maximum: 7
+ description:
+ Offset in bit within the address range specified by reg.
+ - minimum: 1
+ description:
+ Size in bit within the address range specified by reg.
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+examples:
+ - |
+ qfprom: eeprom@700000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* ... */
+
+ /* Data cells */
+ tsens_calibration: calib@404 {
+ reg = <0x404 0x10>;
+ };
+
+ tsens_calibration_bckp: calib_bckp@504 {
+ reg = <0x504 0x11>;
+ bits = <6 128>;
+ };
+
+ pvs_version: pvs-version@6 {
+ reg = <0x6 0x2>;
+ bits = <7 2>;
+ };
+
+ speed_bin: speed-bin@c{
+ reg = <0xc 0x1>;
+ bits = <2 3>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
index 250f9d5aabdf..fa46670de299 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun6i-a31-mipi-dphy.yaml
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
%YAML 1.2
---
-$id: http://devicetree.org/schemas/display/allwinner,sun6i-a31-mipi-dphy.yaml#
+$id: http://devicetree.org/schemas/phy/allwinner,sun6i-a31-mipi-dphy.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner A31 MIPI D-PHY Controller Device Tree Bindings
diff --git a/Documentation/devicetree/bindings/spi/spi-controller.yaml b/Documentation/devicetree/bindings/spi/spi-controller.yaml
index 876c0623f322..a02e2fe2bfb2 100644
--- a/Documentation/devicetree/bindings/spi/spi-controller.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-controller.yaml
@@ -73,7 +73,6 @@ patternProperties:
Compatible of the SPI device.
reg:
- maxItems: 1
minimum: 0
maximum: 256
description:
diff --git a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
index a36a0746c056..2807225db902 100644
--- a/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/intel,ixp4xx-timer.yaml
@@ -2,7 +2,7 @@
# Copyright 2018 Linaro Ltd.
%YAML 1.2
---
-$id: "http://devicetree.org/schemas/timer/intel-ixp4xx-timer.yaml#"
+$id: "http://devicetree.org/schemas/timer/intel,ixp4xx-timer.yaml#"
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: Intel IXP4xx XScale Networking Processors Timers
diff --git a/Documentation/devicetree/bindings/usb/usb251xb.txt b/Documentation/devicetree/bindings/usb/usb251xb.txt
index bc7945e9dbfe..17915f64b8ee 100644
--- a/Documentation/devicetree/bindings/usb/usb251xb.txt
+++ b/Documentation/devicetree/bindings/usb/usb251xb.txt
@@ -64,10 +64,8 @@ Optional properties :
- power-on-time-ms : Specifies the time it takes from the time the host
initiates the power-on sequence to a port until the port has adequate
power. The value is given in ms in a 0 - 510 range (default is 100ms).
- - swap-dx-lanes : Specifies the downstream ports which will swap the
- differential-pair (D+/D-), default is not-swapped.
- - swap-us-lanes : Selects the upstream port differential-pair (D+/D-)
- swapping (boolean, default is not-swapped)
+ - swap-dx-lanes : Specifies the ports which will swap the differential-pair
+ (D+/D-), default is not-swapped.
Examples:
usb2512b@2c {
diff --git a/Documentation/doc-guide/conf.py b/Documentation/doc-guide/conf.py
deleted file mode 100644
index fd3731182d5a..000000000000
--- a/Documentation/doc-guide/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = 'Linux Kernel Documentation Guide'
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'kernel-doc-guide.tex', 'Linux Kernel Documentation Guide',
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/driver-api/80211/conf.py b/Documentation/driver-api/80211/conf.py
deleted file mode 100644
index 4424b4b0b9c3..000000000000
--- a/Documentation/driver-api/80211/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "Linux 802.11 Driver Developer's Guide"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', '80211.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/driver-api/conf.py b/Documentation/driver-api/conf.py
deleted file mode 100644
index 202726d20088..000000000000
--- a/Documentation/driver-api/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "The Linux driver implementer's API guide"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'driver-api.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst
index 0c161b1a3be6..8382f01a53e3 100644
--- a/Documentation/driver-api/generic-counter.rst
+++ b/Documentation/driver-api/generic-counter.rst
@@ -233,7 +233,7 @@ Userspace Interface
Several sysfs attributes are generated by the Generic Counter interface,
and reside under the /sys/bus/counter/devices/counterX directory, where
counterX refers to the respective counter device. Please see
-Documentation/ABI/testing/sys-bus-counter-generic-sysfs for detailed
+Documentation/ABI/testing/sysfs-bus-counter for detailed
information on each Generic Counter interface sysfs attribute.
Through these sysfs attributes, programs and scripts may interact with
@@ -325,7 +325,7 @@ sysfs attributes, where Y is the unique ID of the respective Count:
For a more detailed breakdown of the available Generic Counter interface
sysfs attributes, please refer to the
-Documentation/ABI/testing/sys-bus-counter file.
+Documentation/ABI/testing/sysfs-bus-counter file.
The Signals and Counts associated with the Counter device are registered
to the system as well by the counter_register function. The
diff --git a/Documentation/driver-api/phy/phy.rst b/Documentation/driver-api/phy/phy.rst
index 457c3e0f86d6..8fc1ce0bb905 100644
--- a/Documentation/driver-api/phy/phy.rst
+++ b/Documentation/driver-api/phy/phy.rst
@@ -179,8 +179,8 @@ PHY Mappings
In order to get reference to a PHY without help from DeviceTree, the framework
offers lookups which can be compared to clkdev that allow clk structures to be
-bound to devices. A lookup can be made be made during runtime when a handle to
-the struct phy already exists.
+bound to devices. A lookup can be made during runtime when a handle to the
+struct phy already exists.
The framework offers the following API for registering and unregistering the
lookups::
diff --git a/Documentation/driver-api/pm/conf.py b/Documentation/driver-api/pm/conf.py
deleted file mode 100644
index a89fac11272f..000000000000
--- a/Documentation/driver-api/pm/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "Device Power Management"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'pm.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/filesystems/conf.py b/Documentation/filesystems/conf.py
deleted file mode 100644
index ea44172af5c4..000000000000
--- a/Documentation/filesystems/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "Linux Filesystems API"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'filesystems.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/gpu/conf.py b/Documentation/gpu/conf.py
deleted file mode 100644
index 1757b040fb32..000000000000
--- a/Documentation/gpu/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "Linux GPU Driver Developer's Guide"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'gpu.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/hwmon/k8temp.rst b/Documentation/hwmon/k8temp.rst
index 72da12aa17e5..fe9109521056 100644
--- a/Documentation/hwmon/k8temp.rst
+++ b/Documentation/hwmon/k8temp.rst
@@ -9,7 +9,7 @@ Supported chips:
Addresses scanned: PCI space
- Datasheet: http://support.amd.com/us/Processor_TechDocs/32559.pdf
+ Datasheet: http://www.amd.com/system/files/TechDocs/32559.pdf
Author: Rudolf Marek
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 70ae148ec980..2df5a3da563c 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -111,9 +111,11 @@ needed).
netlabel/index
networking/index
pcmcia/index
+ power/index
target/index
timers/index
watchdog/index
+ virtual/index
input/index
hwmon/index
gpu/index
@@ -143,6 +145,7 @@ implementation.
arm64/index
ia64/index
m68k/index
+ powerpc/index
riscv/index
s390/index
sh/index
diff --git a/Documentation/input/conf.py b/Documentation/input/conf.py
deleted file mode 100644
index d2352fdc92ed..000000000000
--- a/Documentation/input/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "The Linux input driver subsystem"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'linux-input.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/kernel-hacking/conf.py b/Documentation/kernel-hacking/conf.py
deleted file mode 100644
index 3d8acf0f33ad..000000000000
--- a/Documentation/kernel-hacking/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "Kernel Hacking Guides"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'kernel-hacking.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/locking/spinlocks.rst b/Documentation/locking/spinlocks.rst
index 098107fb7d86..e93ec6645238 100644
--- a/Documentation/locking/spinlocks.rst
+++ b/Documentation/locking/spinlocks.rst
@@ -82,7 +82,7 @@ itself. The read lock allows many concurrent readers. Anything that
**changes** the list will have to get the write lock.
NOTE! RCU is better for list traversal, but requires careful
- attention to design detail (see Documentation/RCU/listRCU.txt).
+ attention to design detail (see Documentation/RCU/listRCU.rst).
Also, you cannot "upgrade" a read-lock to a write-lock, so if you at _any_
time need to do any changes (even if you don't do it every time), you have
@@ -90,7 +90,7 @@ to get the write-lock at the very beginning.
NOTE! We are working hard to remove reader-writer spinlocks in most
cases, so please don't add a new one without consensus. (Instead, see
- Documentation/RCU/rcu.txt for complete information.)
+ Documentation/RCU/rcu.rst for complete information.)
----
diff --git a/Documentation/maintainer/conf.py b/Documentation/maintainer/conf.py
deleted file mode 100644
index 81e9eb7a7884..000000000000
--- a/Documentation/maintainer/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = 'Linux Kernel Development Documentation'
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'maintainer.tex', 'Linux Kernel Development Documentation',
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/media/conf.py b/Documentation/media/conf.py
deleted file mode 100644
index 1f194fcd2cae..000000000000
--- a/Documentation/media/conf.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-# SPDX-License-Identifier: GPL-2.0
-
-project = 'Linux Media Subsystem Documentation'
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'media.tex', 'Linux Media Subsystem Documentation',
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 045bb8148fe9..1adbb8a371c7 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -548,7 +548,7 @@ There are certain things that the Linux kernel memory barriers do not guarantee:
[*] For information on bus mastering DMA and coherency please read:
- Documentation/PCI/pci.rst
+ Documentation/driver-api/pci/pci.rst
Documentation/DMA-API-HOWTO.txt
Documentation/DMA-API.txt
diff --git a/Documentation/networking/conf.py b/Documentation/networking/conf.py
deleted file mode 100644
index 40f69e67a883..000000000000
--- a/Documentation/networking/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "Linux Networking Documentation"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'networking.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
index 048e5ca44824..b70b70dc4524 100644
--- a/Documentation/networking/tls-offload.rst
+++ b/Documentation/networking/tls-offload.rst
@@ -424,13 +424,24 @@ Statistics
Following minimum set of TLS-related statistics should be reported
by the driver:
- * ``rx_tls_decrypted`` - number of successfully decrypted TLS segments
- * ``tx_tls_encrypted`` - number of in-order TLS segments passed to device
- for encryption
+ * ``rx_tls_decrypted_packets`` - number of successfully decrypted RX packets
+ which were part of a TLS stream.
+ * ``rx_tls_decrypted_bytes`` - number of TLS payload bytes in RX packets
+ which were successfully decrypted.
+ * ``tx_tls_encrypted_packets`` - number of TX packets passed to the device
+ for encryption of their TLS payload.
+ * ``tx_tls_encrypted_bytes`` - number of TLS payload bytes in TX packets
+ passed to the device for encryption.
+ * ``tx_tls_ctx`` - number of TLS TX HW offload contexts added to device for
+ encryption.
* ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream
- but did not arrive in the expected order
- * ``tx_tls_drop_no_sync_data`` - number of TX packets dropped because
- they arrived out of order and associated record could not be found
+ but did not arrive in the expected order.
+ * ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of
+ a TLS stream dropped, because they arrived out of order and associated
+ record could not be found.
+ * ``tx_tls_drop_bypass_req`` - number of TX packets which were part of a TLS
+ stream dropped, because they contain both data that has been encrypted by
+ software and data that expects hardware crypto offload.
Notable corner cases, exceptions and additional requirements
============================================================
diff --git a/Documentation/power/index.rst b/Documentation/power/index.rst
index 20415f21e48a..002e42745263 100644
--- a/Documentation/power/index.rst
+++ b/Documentation/power/index.rst
@@ -1,4 +1,4 @@
-:orphan:
+.. SPDX-License-Identifier: GPL-2.0
================
Power Management
diff --git a/Documentation/powerpc/bootwrapper.txt b/Documentation/powerpc/bootwrapper.rst
index d60fced5e1cc..a6292afba573 100644
--- a/Documentation/powerpc/bootwrapper.txt
+++ b/Documentation/powerpc/bootwrapper.rst
@@ -1,5 +1,7 @@
+========================
The PowerPC boot wrapper
-------------------------
+========================
+
Copyright (C) Secret Lab Technologies Ltd.
PowerPC image targets compresses and wraps the kernel image (vmlinux) with
@@ -21,6 +23,7 @@ it uses the wrapper script (arch/powerpc/boot/wrapper) to generate target
image. The details of the build system is discussed in the next section.
Currently, the following image format targets exist:
+ ==================== ========================================================
cuImage.%: Backwards compatible uImage for older version of
U-Boot (for versions that don't understand the device
tree). This image embeds a device tree blob inside
@@ -29,31 +32,36 @@ Currently, the following image format targets exist:
with boot wrapper code that extracts data from the old
bd_info structure and loads the data into the device
tree before jumping into the kernel.
- Because of the series of #ifdefs found in the
+
+ Because of the series of #ifdefs found in the
bd_info structure used in the old U-Boot interfaces,
cuImages are platform specific. Each specific
U-Boot platform has a different platform init file
which populates the embedded device tree with data
from the platform specific bd_info file. The platform
specific cuImage platform init code can be found in
- arch/powerpc/boot/cuboot.*.c. Selection of the correct
+ `arch/powerpc/boot/cuboot.*.c`. Selection of the correct
cuImage init code for a specific board can be found in
the wrapper structure.
+
dtbImage.%: Similar to zImage, except device tree blob is embedded
inside the image instead of provided by firmware. The
output image file can be either an elf file or a flat
binary depending on the platform.
- dtbImages are used on systems which do not have an
+
+ dtbImages are used on systems which do not have an
interface for passing a device tree directly.
dtbImages are similar to simpleImages except that
dtbImages have platform specific code for extracting
data from the board firmware, but simpleImages do not
talk to the firmware at all.
- PlayStation 3 support uses dtbImage. So do Embedded
+
+ PlayStation 3 support uses dtbImage. So do Embedded
Planet boards using the PlanetCore firmware. Board
specific initialization code is typically found in a
file named arch/powerpc/boot/<platform>.c; but this
can be overridden by the wrapper script.
+
simpleImage.%: Firmware independent compressed image that does not
depend on any particular firmware interface and embeds
a device tree blob. This image is a flat binary that
@@ -61,14 +69,16 @@ Currently, the following image format targets exist:
Firmware cannot pass any configuration data to the
kernel with this image type and it depends entirely on
the embedded device tree for all information.
- The simpleImage is useful for booting systems with
+
+ The simpleImage is useful for booting systems with
an unknown firmware interface or for booting from
a debugger when no firmware is present (such as on
the Xilinx Virtex platform). The only assumption that
simpleImage makes is that RAM is correctly initialized
and that the MMU is either off or has RAM mapped to
base address 0.
- simpleImage also supports inserting special platform
+
+ simpleImage also supports inserting special platform
specific initialization code to the start of the bootup
sequence. The virtex405 platform uses this feature to
ensure that the cache is invalidated before caching
@@ -81,9 +91,11 @@ Currently, the following image format targets exist:
named (virtex405-<board>.dts). Search the wrapper
script for 'virtex405' and see the file
arch/powerpc/boot/virtex405-head.S for details.
+
treeImage.%; Image format for used with OpenBIOS firmware found
on some ppc4xx hardware. This image embeds a device
tree blob inside the image.
+
uImage: Native image format used by U-Boot. The uImage target
does not add any boot code. It just wraps a compressed
vmlinux in the uImage data structure. This image
@@ -91,12 +103,14 @@ Currently, the following image format targets exist:
a device tree to the kernel at boot. If using an older
version of U-Boot, then you need to use a cuImage
instead.
+
zImage.%: Image format which does not embed a device tree.
Used by OpenFirmware and other firmware interfaces
which are able to supply a device tree. This image
expects firmware to provide the device tree at boot.
Typically, if you have general purpose PowerPC
hardware then you want this image format.
+ ==================== ========================================================
Image types which embed a device tree blob (simpleImage, dtbImage, treeImage,
and cuImage) all generate the device tree blob from a file in the
diff --git a/Documentation/powerpc/cpu_families.txt b/Documentation/powerpc/cpu_families.rst
index fc08e22feb1a..1e063c5440c3 100644
--- a/Documentation/powerpc/cpu_families.txt
+++ b/Documentation/powerpc/cpu_families.rst
@@ -1,3 +1,4 @@
+============
CPU Families
============
@@ -8,8 +9,8 @@ and are supported by arch/powerpc.
Book3S (aka sPAPR)
------------------
- - Hash MMU
- - Mix of 32 & 64 bit
+- Hash MMU
+- Mix of 32 & 64 bit::
+--------------+ +----------------+
| Old POWER | --------------> | RS64 (threads) |
@@ -108,8 +109,8 @@ Book3S (aka sPAPR)
IBM BookE
---------
- - Software loaded TLB.
- - All 32 bit
+- Software loaded TLB.
+- All 32 bit::
+--------------+
| 401 |
@@ -155,8 +156,8 @@ IBM BookE
Motorola/Freescale 8xx
----------------------
- - Software loaded with hardware assist.
- - All 32 bit
+- Software loaded with hardware assist.
+- All 32 bit::
+-------------+
| MPC8xx Core |
@@ -166,9 +167,9 @@ Motorola/Freescale 8xx
Freescale BookE
---------------
- - Software loaded TLB.
- - e6500 adds HW loaded indirect TLB entries.
- - Mix of 32 & 64 bit
+- Software loaded TLB.
+- e6500 adds HW loaded indirect TLB entries.
+- Mix of 32 & 64 bit::
+--------------+
| e200 |
@@ -207,8 +208,8 @@ Freescale BookE
IBM A2 core
-----------
- - Book3E, software loaded TLB + HW loaded indirect TLB entries.
- - 64 bit
+- Book3E, software loaded TLB + HW loaded indirect TLB entries.
+- 64 bit::
+--------------+ +----------------+
| A2 core | --> | WSP |
diff --git a/Documentation/powerpc/cpu_features.txt b/Documentation/powerpc/cpu_features.rst
index ae09df8722c8..b7bcdd2f41bb 100644
--- a/Documentation/powerpc/cpu_features.txt
+++ b/Documentation/powerpc/cpu_features.rst
@@ -1,3 +1,7 @@
+============
+CPU Features
+============
+
Hollis Blanchard <hollis@austin.ibm.com>
5 Jun 2002
@@ -32,7 +36,7 @@ anyways).
After detecting the processor type, the kernel patches out sections of code
that shouldn't be used by writing nop's over it. Using cpufeatures requires
just 2 macros (found in arch/powerpc/include/asm/cputable.h), as seen in head.S
-transfer_to_handler:
+transfer_to_handler::
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
diff --git a/Documentation/powerpc/cxl.txt b/Documentation/powerpc/cxl.rst
index c5e8d5098ed3..920546d81326 100644
--- a/Documentation/powerpc/cxl.txt
+++ b/Documentation/powerpc/cxl.rst
@@ -1,3 +1,4 @@
+====================================
Coherent Accelerator Interface (CXL)
====================================
@@ -21,6 +22,8 @@ Introduction
Hardware overview
=================
+ ::
+
POWER8/9 FPGA
+----------+ +---------+
| | | |
@@ -59,14 +62,16 @@ Hardware overview
the fault. The context to which this fault is serviced is based on
who owns that acceleration function.
- POWER8 <-----> PSL Version 8 is compliant to the CAIA Version 1.0.
- POWER9 <-----> PSL Version 9 is compliant to the CAIA Version 2.0.
+ - POWER8 and PSL Version 8 are compliant to the CAIA Version 1.0.
+ - POWER9 and PSL Version 9 are compliant to the CAIA Version 2.0.
+
This PSL Version 9 provides new features such as:
+
* Interaction with the nest MMU on the P9 chip.
* Native DMA support.
* Supports sending ASB_Notify messages for host thread wakeup.
* Supports Atomic operations.
- * ....
+ * etc.
Cards with a PSL9 won't work on a POWER8 system and cards with a
PSL8 won't work on a POWER9 system.
@@ -147,7 +152,9 @@ User API
master devices.
A userspace library libcxl is available here:
+
https://github.com/ibm-capi/libcxl
+
This provides a C interface to this kernel API.
open
@@ -165,7 +172,8 @@ open
When all available contexts are allocated the open call will fail
and return -ENOSPC.
- Note: IRQs need to be allocated for each context, which may limit
+ Note:
+ IRQs need to be allocated for each context, which may limit
the number of contexts that can be created, and therefore
how many times the device can be opened. The POWER8 CAPP
supports 2040 IRQs and 3 are used by the kernel, so 2037 are
@@ -186,7 +194,9 @@ ioctl
updated as userspace allocates and frees memory. This ioctl
returns once the AFU context is started.
- Takes a pointer to a struct cxl_ioctl_start_work:
+ Takes a pointer to a struct cxl_ioctl_start_work
+
+ ::
struct cxl_ioctl_start_work {
__u64 flags;
@@ -269,7 +279,7 @@ read
The buffer passed to read() must be at least 4K bytes.
The result of the read will be a buffer of one or more events,
- each event is of type struct cxl_event, of varying size.
+ each event is of type struct cxl_event, of varying size::
struct cxl_event {
struct cxl_event_header header;
@@ -280,7 +290,9 @@ read
};
};
- The struct cxl_event_header is defined as:
+ The struct cxl_event_header is defined as
+
+ ::
struct cxl_event_header {
__u16 type;
@@ -307,7 +319,9 @@ read
For future extensions and padding.
If the event type is CXL_EVENT_AFU_INTERRUPT then the event
- structure is defined as:
+ structure is defined as
+
+ ::
struct cxl_event_afu_interrupt {
__u16 flags;
@@ -326,7 +340,9 @@ read
For future extensions and padding.
If the event type is CXL_EVENT_DATA_STORAGE then the event
- structure is defined as:
+ structure is defined as
+
+ ::
struct cxl_event_data_storage {
__u16 flags;
@@ -356,7 +372,9 @@ read
For future extensions
If the event type is CXL_EVENT_AFU_ERROR then the event structure
- is defined as:
+ is defined as
+
+ ::
struct cxl_event_afu_error {
__u16 flags;
@@ -393,15 +411,15 @@ open
ioctl
-----
-CXL_IOCTL_DOWNLOAD_IMAGE:
-CXL_IOCTL_VALIDATE_IMAGE:
+CXL_IOCTL_DOWNLOAD_IMAGE / CXL_IOCTL_VALIDATE_IMAGE:
Starts and controls flashing a new FPGA image. Partial
reconfiguration is not supported (yet), so the image must contain
a copy of the PSL and AFU(s). Since an image can be quite large,
the caller may have to iterate, splitting the image in smaller
chunks.
- Takes a pointer to a struct cxl_adapter_image:
+ Takes a pointer to a struct cxl_adapter_image::
+
struct cxl_adapter_image {
__u64 flags;
__u64 data;
@@ -442,7 +460,7 @@ Udev rules
The following udev rules could be used to create a symlink to the
most logical chardev to use in any programming mode (afuX.Yd for
dedicated, afuX.Ys for afu directed), since the API is virtually
- identical for each:
+ identical for each::
SUBSYSTEM=="cxl", ATTRS{mode}=="dedicated_process", SYMLINK="cxl/%b"
SUBSYSTEM=="cxl", ATTRS{mode}=="afu_directed", \
diff --git a/Documentation/powerpc/cxlflash.txt b/Documentation/powerpc/cxlflash.rst
index a64bdaa0a1cf..cea67931b3b9 100644
--- a/Documentation/powerpc/cxlflash.txt
+++ b/Documentation/powerpc/cxlflash.rst
@@ -1,3 +1,7 @@
+================================
+Coherent Accelerator (CXL) Flash
+================================
+
Introduction
============
@@ -28,7 +32,7 @@ Introduction
responsible for the initialization of the adapter, setting up the
special path for user space access, and performing error recovery. It
communicates directly the Flash Accelerator Functional Unit (AFU)
- as described in Documentation/powerpc/cxl.txt.
+ as described in Documentation/powerpc/cxl.rst.
The cxlflash driver supports two, mutually exclusive, modes of
operation at the device (LUN) level:
@@ -58,7 +62,7 @@ Overview
The CXL Flash Adapter Driver establishes a master context with the
AFU. It uses memory mapped I/O (MMIO) for this control and setup. The
- Adapter Problem Space Memory Map looks like this:
+ Adapter Problem Space Memory Map looks like this::
+-------------------------------+
| 512 * 64 KB User MMIO |
@@ -375,7 +379,7 @@ CXL Flash Driver Host IOCTLs
Each host adapter instance that is supported by the cxlflash driver
has a special character device associated with it to enable a set of
host management function. These character devices are hosted in a
- class dedicated for cxlflash and can be accessed via /dev/cxlflash/*.
+ class dedicated for cxlflash and can be accessed via `/dev/cxlflash/*`.
Applications can be written to perform various functions using the
host ioctl APIs below.
diff --git a/Documentation/powerpc/DAWR-POWER9.txt b/Documentation/powerpc/dawr-power9.rst
index ecdbb076438c..c96ab6befd9c 100644
--- a/Documentation/powerpc/DAWR-POWER9.txt
+++ b/Documentation/powerpc/dawr-power9.rst
@@ -1,10 +1,11 @@
+=====================
DAWR issues on POWER9
-============================
+=====================
On POWER9 the Data Address Watchpoint Register (DAWR) can cause a checkstop
if it points to cache inhibited (CI) memory. Currently Linux has no way to
disinguish CI memory when configuring the DAWR, so (for now) the DAWR is
-disabled by this commit:
+disabled by this commit::
commit 9654153158d3e0684a1bdb76dbababdb7111d5a0
Author: Michael Neuling <mikey@neuling.org>
@@ -12,7 +13,7 @@ disabled by this commit:
powerpc: Disable DAWR in the base POWER9 CPU features
Technical Details:
-============================
+==================
DAWR has 6 different ways of being set.
1) ptrace
@@ -37,7 +38,7 @@ DAWR on the migration.
For xmon, the 'bd' command will return an error on P9.
Consequences for users
-============================
+======================
For GDB watchpoints (ie 'watch' command) on POWER9 bare metal , GDB
will accept the command. Unfortunately since there is no hardware
@@ -57,8 +58,8 @@ trapped in GDB. The watchpoint is remembered, so if the guest is
migrated back to the POWER8 host, it will start working again.
Force enabling the DAWR
-=============================
-Kernels (since ~v5.2) have an option to force enable the DAWR via:
+=======================
+Kernels (since ~v5.2) have an option to force enable the DAWR via::
echo Y > /sys/kernel/debug/powerpc/dawr_enable_dangerous
@@ -86,5 +87,7 @@ dawr_enable_dangerous file will fail if the hypervisor doesn't support
writing the DAWR.
To double check the DAWR is working, run this kernel selftest:
+
tools/testing/selftests/powerpc/ptrace/ptrace-hwbreak.c
+
Any errors/failures/skips mean something is wrong.
diff --git a/Documentation/powerpc/dscr.txt b/Documentation/powerpc/dscr.rst
index ece300c64f76..2ab99006014c 100644
--- a/Documentation/powerpc/dscr.txt
+++ b/Documentation/powerpc/dscr.rst
@@ -1,5 +1,6 @@
- DSCR (Data Stream Control Register)
- ================================================
+===================================
+DSCR (Data Stream Control Register)
+===================================
DSCR register in powerpc allows user to have some control of prefetch of data
stream in the processor. Please refer to the ISA documents or related manual
@@ -10,14 +11,17 @@ user interface.
(A) Data Structures:
- (1) thread_struct:
+ (1) thread_struct::
+
dscr /* Thread DSCR value */
dscr_inherit /* Thread has changed default DSCR */
- (2) PACA:
+ (2) PACA::
+
dscr_default /* per-CPU DSCR default value */
- (3) sysfs.c:
+ (3) sysfs.c::
+
dscr_default /* System DSCR default value */
(B) Scheduler Changes:
@@ -35,8 +39,8 @@ user interface.
(C) SYSFS Interface:
- Global DSCR default: /sys/devices/system/cpu/dscr_default
- CPU specific DSCR default: /sys/devices/system/cpu/cpuN/dscr
+ - Global DSCR default: /sys/devices/system/cpu/dscr_default
+ - CPU specific DSCR default: /sys/devices/system/cpu/cpuN/dscr
Changing the global DSCR default in the sysfs will change all the CPU
specific DSCR defaults immediately in their PACA structures. Again if
diff --git a/Documentation/powerpc/eeh-pci-error-recovery.txt b/Documentation/powerpc/eeh-pci-error-recovery.rst
index 678189280bb4..438a87ebc095 100644
--- a/Documentation/powerpc/eeh-pci-error-recovery.txt
+++ b/Documentation/powerpc/eeh-pci-error-recovery.rst
@@ -1,10 +1,10 @@
+==========================
+PCI Bus EEH Error Recovery
+==========================
+Linas Vepstas <linas@austin.ibm.com>
- PCI Bus EEH Error Recovery
- --------------------------
- Linas Vepstas
- <linas@austin.ibm.com>
- 12 January 2005
+12 January 2005
Overview:
@@ -143,17 +143,17 @@ seen in /proc/ppc64/eeh (subject to change). Normally, almost
all of these occur during boot, when the PCI bus is scanned, where
a large number of 0xff reads are part of the bus scan procedure.
-If a frozen slot is detected, code in
-arch/powerpc/platforms/pseries/eeh.c will print a stack trace to
-syslog (/var/log/messages). This stack trace has proven to be very
-useful to device-driver authors for finding out at what point the EEH
-error was detected, as the error itself usually occurs slightly
+If a frozen slot is detected, code in
+arch/powerpc/platforms/pseries/eeh.c will print a stack trace to
+syslog (/var/log/messages). This stack trace has proven to be very
+useful to device-driver authors for finding out at what point the EEH
+error was detected, as the error itself usually occurs slightly
beforehand.
Next, it uses the Linux kernel notifier chain/work queue mechanism to
allow any interested parties to find out about the failure. Device
drivers, or other parts of the kernel, can use
-eeh_register_notifier(struct notifier_block *) to find out about EEH
+`eeh_register_notifier(struct notifier_block *)` to find out about EEH
events. The event will include a pointer to the pci device, the
device node and some state info. Receivers of the event can "do as
they wish"; the default handler will be described further in this
@@ -162,10 +162,13 @@ section.
To assist in the recovery of the device, eeh.c exports the
following functions:
-rtas_set_slot_reset() -- assert the PCI #RST line for 1/8th of a second
-rtas_configure_bridge() -- ask firmware to configure any PCI bridges
+rtas_set_slot_reset()
+ assert the PCI #RST line for 1/8th of a second
+rtas_configure_bridge()
+ ask firmware to configure any PCI bridges
located topologically under the pci slot.
-eeh_save_bars() and eeh_restore_bars(): save and restore the PCI
+eeh_save_bars() and eeh_restore_bars():
+ save and restore the PCI
config-space info for a device and any devices under it.
@@ -191,7 +194,7 @@ events get delivered to user-space scripts.
Following is an example sequence of events that cause a device driver
close function to be called during the first phase of an EEH reset.
-The following sequence is an example of the pcnet32 device driver.
+The following sequence is an example of the pcnet32 device driver::
rpa_php_unconfig_pci_adapter (struct slot *) // in rpaphp_pci.c
{
@@ -241,53 +244,54 @@ The following sequence is an example of the pcnet32 device driver.
}}}}}}
- in drivers/pci/pci_driver.c,
- struct device_driver->remove() is just pci_device_remove()
- which calls struct pci_driver->remove() which is pcnet32_remove_one()
- which calls unregister_netdev() (in net/core/dev.c)
- which calls dev_close() (in net/core/dev.c)
- which calls dev->stop() which is pcnet32_close()
- which then does the appropriate shutdown.
+in drivers/pci/pci_driver.c,
+struct device_driver->remove() is just pci_device_remove()
+which calls struct pci_driver->remove() which is pcnet32_remove_one()
+which calls unregister_netdev() (in net/core/dev.c)
+which calls dev_close() (in net/core/dev.c)
+which calls dev->stop() which is pcnet32_close()
+which then does the appropriate shutdown.
---
+
Following is the analogous stack trace for events sent to user-space
-when the pci device is unconfigured.
+when the pci device is unconfigured::
-rpa_php_unconfig_pci_adapter() { // in rpaphp_pci.c
- calls
- pci_remove_bus_device (struct pci_dev *) { // in /drivers/pci/remove.c
+ rpa_php_unconfig_pci_adapter() { // in rpaphp_pci.c
calls
- pci_destroy_dev (struct pci_dev *) {
+ pci_remove_bus_device (struct pci_dev *) { // in /drivers/pci/remove.c
calls
- device_unregister (&dev->dev) { // in /drivers/base/core.c
+ pci_destroy_dev (struct pci_dev *) {
calls
- device_del(struct device * dev) { // in /drivers/base/core.c
+ device_unregister (&dev->dev) { // in /drivers/base/core.c
calls
- kobject_del() { //in /libs/kobject.c
+ device_del(struct device * dev) { // in /drivers/base/core.c
calls
- kobject_uevent() { // in /libs/kobject.c
+ kobject_del() { //in /libs/kobject.c
calls
- kset_uevent() { // in /lib/kobject.c
+ kobject_uevent() { // in /libs/kobject.c
calls
- kset->uevent_ops->uevent() // which is really just
- a call to
- dev_uevent() { // in /drivers/base/core.c
+ kset_uevent() { // in /lib/kobject.c
calls
- dev->bus->uevent() which is really just a call to
- pci_uevent () { // in drivers/pci/hotplug.c
- which prints device name, etc....
+ kset->uevent_ops->uevent() // which is really just
+ a call to
+ dev_uevent() { // in /drivers/base/core.c
+ calls
+ dev->bus->uevent() which is really just a call to
+ pci_uevent () { // in drivers/pci/hotplug.c
+ which prints device name, etc....
+ }
}
- }
- then kobject_uevent() sends a netlink uevent to userspace
- --> userspace uevent
- (during early boot, nobody listens to netlink events and
- kobject_uevent() executes uevent_helper[], which runs the
- event process /sbin/hotplug)
+ then kobject_uevent() sends a netlink uevent to userspace
+ --> userspace uevent
+ (during early boot, nobody listens to netlink events and
+ kobject_uevent() executes uevent_helper[], which runs the
+ event process /sbin/hotplug)
+ }
}
- }
- kobject_del() then calls sysfs_remove_dir(), which would
- trigger any user-space daemon that was watching /sysfs,
- and notice the delete event.
+ kobject_del() then calls sysfs_remove_dir(), which would
+ trigger any user-space daemon that was watching /sysfs,
+ and notice the delete event.
Pro's and Con's of the Current Design
@@ -299,12 +303,12 @@ individual device drivers, so that the current design throws a wide net.
The biggest negative of the design is that it potentially disturbs
network daemons and file systems that didn't need to be disturbed.
--- A minor complaint is that resetting the network card causes
+- A minor complaint is that resetting the network card causes
user-space back-to-back ifdown/ifup burps that potentially disturb
network daemons, that didn't need to even know that the pci
card was being rebooted.
--- A more serious concern is that the same reset, for SCSI devices,
+- A more serious concern is that the same reset, for SCSI devices,
causes havoc to mounted file systems. Scripts cannot post-facto
unmount a file system without flushing pending buffers, but this
is impossible, because I/O has already been stopped. Thus,
@@ -322,7 +326,7 @@ network daemons and file systems that didn't need to be disturbed.
from the block layer. It would be very natural to add an EEH
reset into this chain of events.
--- If a SCSI error occurs for the root device, all is lost unless
+- If a SCSI error occurs for the root device, all is lost unless
the sysadmin had the foresight to run /bin, /sbin, /etc, /var
and so on, out of ramdisk/tmpfs.
@@ -330,5 +334,3 @@ network daemons and file systems that didn't need to be disturbed.
Conclusions
-----------
There's forward progress ...
-
-
diff --git a/Documentation/powerpc/firmware-assisted-dump.txt b/Documentation/powerpc/firmware-assisted-dump.rst
index 10e7f4d16c14..9ca12830a48e 100644
--- a/Documentation/powerpc/firmware-assisted-dump.txt
+++ b/Documentation/powerpc/firmware-assisted-dump.rst
@@ -1,7 +1,8 @@
+======================
+Firmware-Assisted Dump
+======================
- Firmware-Assisted Dump
- ------------------------
- July 2011
+July 2011
The goal of firmware-assisted dump is to enable the dump of
a crashed system, and to do so from a fully-reset system, and
@@ -27,11 +28,11 @@ in production use.
Comparing with kdump or other strategies, firmware-assisted
dump offers several strong, practical advantages:
--- Unlike kdump, the system has been reset, and loaded
+- Unlike kdump, the system has been reset, and loaded
with a fresh copy of the kernel. In particular,
PCI and I/O devices have been reinitialized and are
in a clean, consistent state.
--- Once the dump is copied out, the memory that held the dump
+- Once the dump is copied out, the memory that held the dump
is immediately available to the running kernel. And therefore,
unlike kdump, fadump doesn't need a 2nd reboot to get back
the system to the production configuration.
@@ -40,17 +41,18 @@ The above can only be accomplished by coordination with,
and assistance from the Power firmware. The procedure is
as follows:
--- The first kernel registers the sections of memory with the
+- The first kernel registers the sections of memory with the
Power firmware for dump preservation during OS initialization.
These registered sections of memory are reserved by the first
kernel during early boot.
--- When a system crashes, the Power firmware will save
+- When a system crashes, the Power firmware will save
the low memory (boot memory of size larger of 5% of system RAM
or 256MB) of RAM to the previous registered region. It will
also save system registers, and hardware PTE's.
- NOTE: The term 'boot memory' means size of the low memory chunk
+ NOTE:
+ The term 'boot memory' means size of the low memory chunk
that is required for a kernel to boot successfully when
booted with restricted memory. By default, the boot memory
size will be the larger of 5% of system RAM or 256MB.
@@ -64,12 +66,12 @@ as follows:
as fadump uses a predefined offset to reserve memory
for boot memory dump preservation in case of a crash.
--- After the low memory (boot memory) area has been saved, the
+- After the low memory (boot memory) area has been saved, the
firmware will reset PCI and other hardware state. It will
*not* clear the RAM. It will then launch the bootloader, as
normal.
--- The freshly booted kernel will notice that there is a new
+- The freshly booted kernel will notice that there is a new
node (ibm,dump-kernel) in the device tree, indicating that
there is crash data available from a previous boot. During
the early boot OS will reserve rest of the memory above
@@ -77,17 +79,18 @@ as follows:
size. This will make sure that the second kernel will not
touch any of the dump memory area.
--- User-space tools will read /proc/vmcore to obtain the contents
+- User-space tools will read /proc/vmcore to obtain the contents
of memory, which holds the previous crashed kernel dump in ELF
format. The userspace tools may copy this info to disk, or
network, nas, san, iscsi, etc. as desired.
--- Once the userspace tool is done saving dump, it will echo
+- Once the userspace tool is done saving dump, it will echo
'1' to /sys/kernel/fadump_release_mem to release the reserved
memory back to general use, except the memory required for
next firmware-assisted dump registration.
- e.g.
+ e.g.::
+
# echo 1 > /sys/kernel/fadump_release_mem
Please note that the firmware-assisted dump feature
@@ -95,7 +98,7 @@ is only available on Power6 and above systems with recent
firmware versions.
Implementation details:
-----------------------
+-----------------------
During boot, a check is made to see if firmware supports
this feature on that particular machine. If it does, then
@@ -121,7 +124,7 @@ Allocator (CMA) for memory reservation if CMA is configured for kernel.
With CMA reservation this memory will be available for applications to
use it, while kernel is prevented from using it. With this fadump will
still be able to capture all of the kernel memory and most of the user
-space memory except the user pages that were present in CMA region.
+space memory except the user pages that were present in CMA region::
o Memory Reservation during first kernel
@@ -166,7 +169,7 @@ The tools to examine the dump will be same as the ones
used for kdump.
How to enable firmware-assisted dump (fadump):
--------------------------------------
+----------------------------------------------
1. Set config option CONFIG_FA_DUMP=y and build kernel.
2. Boot into linux kernel with 'fadump=on' kernel cmdline option.
@@ -177,19 +180,20 @@ How to enable firmware-assisted dump (fadump):
to specify size of the memory to reserve for boot memory dump
preservation.
-NOTE: 1. 'fadump_reserve_mem=' parameter has been deprecated. Instead
- use 'crashkernel=' to specify size of the memory to reserve
- for boot memory dump preservation.
- 2. If firmware-assisted dump fails to reserve memory then it
- will fallback to existing kdump mechanism if 'crashkernel='
- option is set at kernel cmdline.
- 3. if user wants to capture all of user space memory and ok with
- reserved memory not available to production system, then
- 'fadump=nocma' kernel parameter can be used to fallback to
- old behaviour.
+NOTE:
+ 1. 'fadump_reserve_mem=' parameter has been deprecated. Instead
+ use 'crashkernel=' to specify size of the memory to reserve
+ for boot memory dump preservation.
+ 2. If firmware-assisted dump fails to reserve memory then it
+ will fallback to existing kdump mechanism if 'crashkernel='
+ option is set at kernel cmdline.
+ 3. if user wants to capture all of user space memory and ok with
+ reserved memory not available to production system, then
+ 'fadump=nocma' kernel parameter can be used to fallback to
+ old behaviour.
Sysfs/debugfs files:
-------------
+--------------------
Firmware-assisted dump feature uses sysfs file system to hold
the control files and debugfs file to display memory reserved region.
@@ -197,20 +201,20 @@ the control files and debugfs file to display memory reserved region.
Here is the list of files under kernel sysfs:
/sys/kernel/fadump_enabled
-
This is used to display the fadump status.
- 0 = fadump is disabled
- 1 = fadump is enabled
+
+ - 0 = fadump is disabled
+ - 1 = fadump is enabled
This interface can be used by kdump init scripts to identify if
fadump is enabled in the kernel and act accordingly.
/sys/kernel/fadump_registered
-
This is used to display the fadump registration status as well
as to control (start/stop) the fadump registration.
- 0 = fadump is not registered.
- 1 = fadump is registered and ready to handle system crash.
+
+ - 0 = fadump is not registered.
+ - 1 = fadump is registered and ready to handle system crash.
To register fadump echo 1 > /sys/kernel/fadump_registered and
echo 0 > /sys/kernel/fadump_registered for un-register and stop the
@@ -219,13 +223,12 @@ Here is the list of files under kernel sysfs:
easily integrated with kdump service start/stop.
/sys/kernel/fadump_release_mem
-
This file is available only when fadump is active during
second kernel. This is used to release the reserved memory
region that are held for saving crash dump. To release the
- reserved memory echo 1 to it:
+ reserved memory echo 1 to it::
- echo 1 > /sys/kernel/fadump_release_mem
+ echo 1 > /sys/kernel/fadump_release_mem
After echo 1, the content of the /sys/kernel/debug/powerpc/fadump_region
file will change to reflect the new memory reservations.
@@ -238,38 +241,39 @@ Here is the list of files under powerpc debugfs:
(Assuming debugfs is mounted on /sys/kernel/debug directory.)
/sys/kernel/debug/powerpc/fadump_region
-
This file shows the reserved memory regions if fadump is
enabled otherwise this file is empty. The output format
- is:
- <region>: [<start>-<end>] <reserved-size> bytes, Dumped: <dump-size>
+ is::
+
+ <region>: [<start>-<end>] <reserved-size> bytes, Dumped: <dump-size>
e.g.
- Contents when fadump is registered during first kernel
+ Contents when fadump is registered during first kernel::
- # cat /sys/kernel/debug/powerpc/fadump_region
- CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x0
- HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x0
- DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x0
+ # cat /sys/kernel/debug/powerpc/fadump_region
+ CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x0
+ HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x0
+ DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x0
- Contents when fadump is active during second kernel
+ Contents when fadump is active during second kernel::
- # cat /sys/kernel/debug/powerpc/fadump_region
- CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x40020
- HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x1000
- DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x10000000
- : [0x00000010000000-0x0000006ffaffff] 0x5ffb0000 bytes, Dumped: 0x5ffb0000
+ # cat /sys/kernel/debug/powerpc/fadump_region
+ CPU : [0x0000006ffb0000-0x0000006fff001f] 0x40020 bytes, Dumped: 0x40020
+ HPTE: [0x0000006fff0020-0x0000006fff101f] 0x1000 bytes, Dumped: 0x1000
+ DUMP: [0x0000006fff1020-0x0000007fff101f] 0x10000000 bytes, Dumped: 0x10000000
+ : [0x00000010000000-0x0000006ffaffff] 0x5ffb0000 bytes, Dumped: 0x5ffb0000
-NOTE: Please refer to Documentation/filesystems/debugfs.txt on
+NOTE:
+ Please refer to Documentation/filesystems/debugfs.txt on
how to mount the debugfs filesystem.
TODO:
-----
- o Need to come up with the better approach to find out more
+ - Need to come up with the better approach to find out more
accurate boot memory size that is required for a kernel to
boot successfully when booted with restricted memory.
- o The fadump implementation introduces a fadump crash info structure
+ - The fadump implementation introduces a fadump crash info structure
in the scratch area before the ELF core header. The idea of introducing
this structure is to pass some important crash info data to the second
kernel which will help second kernel to populate ELF core header with
@@ -277,7 +281,9 @@ TODO:
design implementation does not address a possibility of introducing
additional fields (in future) to this structure without affecting
compatibility. Need to come up with the better approach to address this.
+
The possible approaches are:
+
1. Introduce version field for version tracking, bump up the version
whenever a new field is added to the structure in future. The version
field can be used to find out what fields are valid for the current
@@ -285,8 +291,11 @@ TODO:
2. Reserve the area of predefined size (say PAGE_SIZE) for this
structure and have unused area as reserved (initialized to zero)
for future field additions.
+
The advantage of approach 1 over 2 is we don't need to reserve extra space.
----
+
Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+
This document is based on the original documentation written for phyp
+
assisted dump by Linas Vepstas and Manish Ahuja.
diff --git a/Documentation/powerpc/hvcs.txt b/Documentation/powerpc/hvcs.rst
index a730ca5a07f8..6808acde672f 100644
--- a/Documentation/powerpc/hvcs.txt
+++ b/Documentation/powerpc/hvcs.rst
@@ -1,19 +1,22 @@
-===========================================================================
- HVCS
- IBM "Hypervisor Virtual Console Server" Installation Guide
- for Linux Kernel 2.6.4+
- Copyright (C) 2004 IBM Corporation
+===============================================================
+HVCS IBM "Hypervisor Virtual Console Server" Installation Guide
+===============================================================
-===========================================================================
-NOTE:Eight space tabs are the optimum editor setting for reading this file.
-===========================================================================
+for Linux Kernel 2.6.4+
- Author(s) : Ryan S. Arnold <rsa@us.ibm.com>
- Date Created: March, 02, 2004
- Last Changed: August, 24, 2004
+Copyright (C) 2004 IBM Corporation
----------------------------------------------------------------------------
-Table of contents:
+.. ===========================================================================
+.. NOTE:Eight space tabs are the optimum editor setting for reading this file.
+.. ===========================================================================
+
+
+Author(s): Ryan S. Arnold <rsa@us.ibm.com>
+
+Date Created: March, 02, 2004
+Last Changed: August, 24, 2004
+
+.. Table of contents:
1. Driver Introduction:
2. System Requirements
@@ -27,8 +30,8 @@ Table of contents:
8. Questions & Answers:
9. Reporting Bugs:
----------------------------------------------------------------------------
1. Driver Introduction:
+=======================
This is the device driver for the IBM Hypervisor Virtual Console Server,
"hvcs". The IBM hvcs provides a tty driver interface to allow Linux user
@@ -38,8 +41,8 @@ ppc64 system. Physical hardware consoles per partition are not practical
on this hardware so system consoles are accessed by this driver using
firmware interfaces to virtual terminal devices.
----------------------------------------------------------------------------
2. System Requirements:
+=======================
This device driver was written using 2.6.4 Linux kernel APIs and will only
build and run on kernels of this version or later.
@@ -52,8 +55,8 @@ Sysfs must be mounted on the system so that the user can determine which
major and minor numbers are associated with each vty-server. Directions
for sysfs mounting are outside the scope of this document.
----------------------------------------------------------------------------
3. Build Options:
+=================
The hvcs driver registers itself as a tty driver. The tty layer
dynamically allocates a block of major and minor numbers in a quantity
@@ -65,11 +68,11 @@ If the default number of device entries is adequate then this driver can be
built into the kernel. If not, the default can be over-ridden by inserting
the driver as a module with insmod parameters.
----------------------------------------------------------------------------
3.1 Built-in:
+-------------
The following menuconfig example demonstrates selecting to build this
-driver into the kernel.
+driver into the kernel::
Device Drivers --->
Character devices --->
@@ -77,11 +80,11 @@ driver into the kernel.
Begin the kernel make process.
----------------------------------------------------------------------------
3.2 Module:
+-----------
The following menuconfig example demonstrates selecting to build this
-driver as a kernel module.
+driver as a kernel module::
Device Drivers --->
Character devices --->
@@ -89,11 +92,11 @@ driver as a kernel module.
The make process will build the following kernel modules:
- hvcs.ko
- hvcserver.ko
+ - hvcs.ko
+ - hvcserver.ko
To insert the module with the default allocation execute the following
-commands in the order they appear:
+commands in the order they appear::
insmod hvcserver.ko
insmod hvcs.ko
@@ -103,7 +106,7 @@ be inserted first, otherwise the hvcs module will not find some of the
symbols it expects.
To override the default use an insmod parameter as follows (requesting 4
-tty devices as an example):
+tty devices as an example)::
insmod hvcs.ko hvcs_parm_num_devs=4
@@ -115,31 +118,31 @@ source file before building.
NOTE: The length of time it takes to insmod the driver seems to be related
to the number of tty interfaces the registering driver requests.
-In order to remove the driver module execute the following command:
+In order to remove the driver module execute the following command::
rmmod hvcs.ko
The recommended method for installing hvcs as a module is to use depmod to
build a current modules.dep file in /lib/modules/`uname -r` and then
-execute:
+execute::
-modprobe hvcs hvcs_parm_num_devs=4
+ modprobe hvcs hvcs_parm_num_devs=4
The modules.dep file indicates that hvcserver.ko needs to be inserted
before hvcs.ko and modprobe uses this file to smartly insert the modules in
the proper order.
The following modprobe command is used to remove hvcs and hvcserver in the
-proper order:
+proper order::
-modprobe -r hvcs
+ modprobe -r hvcs
----------------------------------------------------------------------------
4. Installation:
+================
The tty layer creates sysfs entries which contain the major and minor
numbers allocated for the hvcs driver. The following snippet of "tree"
-output of the sysfs directory shows where these numbers are presented:
+output of the sysfs directory shows where these numbers are presented::
sys/
|-- *other sysfs base dirs*
@@ -164,7 +167,7 @@ output of the sysfs directory shows where these numbers are presented:
|-- *other sysfs base dirs*
For the above examples the following output is a result of cat'ing the
-"dev" entry in the hvcs directory:
+"dev" entry in the hvcs directory::
Pow5:/sys/class/tty/hvcs0/ # cat dev
254:0
@@ -184,7 +187,7 @@ systems running hvcs will already have the device entries created or udev
will do it automatically.
Given the example output above, to manually create a /dev/hvcs* node entry
-mknod can be used as follows:
+mknod can be used as follows::
mknod /dev/hvcs0 c 254 0
mknod /dev/hvcs1 c 254 1
@@ -195,15 +198,15 @@ Using mknod to manually create the device entries makes these device nodes
persistent. Once created they will exist prior to the driver insmod.
Attempting to connect an application to /dev/hvcs* prior to insertion of
-the hvcs module will result in an error message similar to the following:
+the hvcs module will result in an error message similar to the following::
"/dev/hvcs*: No such device".
NOTE: Just because there is a device node present doesn't mean that there
is a vty-server device configured for that node.
----------------------------------------------------------------------------
5. Connection
+=============
Since this driver controls devices that provide a tty interface a user can
interact with the device node entries using any standard tty-interactive
@@ -249,7 +252,7 @@ vty-server adapter is associated with which /dev/hvcs* node a special sysfs
attribute has been added to each vty-server sysfs entry. This entry is
called "index" and showing it reveals an integer that refers to the
/dev/hvcs* entry to use to connect to that device. For instance cating the
-index attribute of vty-server adapter 30000004 shows the following.
+index attribute of vty-server adapter 30000004 shows the following::
Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat index
2
@@ -262,8 +265,8 @@ system the /dev/hvcs* entry that interacts with a particular vty-server
adapter is not guaranteed to remain the same across system reboots. Look
in the Q & A section for more on this issue.
----------------------------------------------------------------------------
6. Disconnection
+================
As a security feature to prevent the delivery of stale data to an
unintended target the Power5 system firmware disables the fetching of data
@@ -305,7 +308,7 @@ connection between the vty-server and target vty ONLY if the vterm_state
previously read '1'. The write directive is ignored if the vterm_state
read '0' or if any value other than '0' was written to the vterm_state
attribute. The following example will show the method used for verifying
-the vty-server connection status and disconnecting a vty-server connection.
+the vty-server connection status and disconnecting a vty-server connection::
Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat vterm_state
1
@@ -318,12 +321,12 @@ the vty-server connection status and disconnecting a vty-server connection.
All vty-server connections are automatically terminated when the device is
hotplug removed and when the module is removed.
----------------------------------------------------------------------------
7. Configuration
+================
Each vty-server has a sysfs entry in the /sys/devices/vio directory, which
is symlinked in several other sysfs tree directories, notably under the
-hvcs driver entry, which looks like the following example:
+hvcs driver entry, which looks like the following example::
Pow5:/sys/bus/vio/drivers/hvcs # ls
. .. 30000003 30000004 rescan
@@ -344,7 +347,7 @@ completed or was never executed.
Vty-server entries in this directory are a 32 bit partition unique unit
address that is created by firmware. An example vty-server sysfs entry
-looks like the following:
+looks like the following::
Pow5:/sys/bus/vio/drivers/hvcs/30000004 # ls
. current_vty devspec name partner_vtys
@@ -352,21 +355,21 @@ looks like the following:
Each entry is provided, by default with a "name" attribute. Reading the
"name" attribute will reveal the device type as shown in the following
-example:
+example::
Pow5:/sys/bus/vio/drivers/hvcs/30000003 # cat name
vty-server
Each entry is also provided, by default, with a "devspec" attribute which
reveals the full device specification when read, as shown in the following
-example:
+example::
Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat devspec
/vdevice/vty-server@30000004
Each vty-server sysfs dir is provided with two read-only attributes that
provide lists of easily parsed partner vty data: "partner_vtys" and
-"partner_clcs".
+"partner_clcs"::
Pow5:/sys/bus/vio/drivers/hvcs/30000004 # cat partner_vtys
30000000
@@ -396,7 +399,7 @@ A vty-server can only be connected to a single vty at a time. The entry,
read.
The current_vty can be changed by writing a valid partner clc to the entry
-as in the following example:
+as in the following example::
Pow5:/sys/bus/vio/drivers/hvcs/30000004 # echo U5112.428.10304
8A-V4-C0 > current_vty
@@ -408,9 +411,9 @@ currently open connection is freed.
Information on the "vterm_state" attribute was covered earlier on the
chapter entitled "disconnection".
----------------------------------------------------------------------------
8. Questions & Answers:
-===========================================================================
+=======================
+
Q: What are the security concerns involving hvcs?
A: There are three main security concerns:
@@ -429,6 +432,7 @@ A: There are three main security concerns:
partition) will experience the previously logged in session.
---------------------------------------------------------------------------
+
Q: How do I multiplex a console that I grab through hvcs so that other
people can see it:
@@ -440,6 +444,7 @@ term type "screen" to others. This means that curses based programs may
not display properly in screen sessions.
---------------------------------------------------------------------------
+
Q: Why are the colors all messed up?
Q: Why are the control characters acting strange or not working?
Q: Why is the console output all strange and unintelligible?
@@ -455,6 +460,7 @@ disconnect from the console. This will ensure that the next user gets
their own TERM type set when they login.
---------------------------------------------------------------------------
+
Q: When I try to CONNECT kermit to an hvcs device I get:
"Sorry, can't open connection: /dev/hvcs*"What is happening?
@@ -490,6 +496,7 @@ A: There is not a corresponding vty-server device that maps to an existing
/dev/hvcs* entry.
---------------------------------------------------------------------------
+
Q: When I try to CONNECT kermit to an hvcs device I get:
"Sorry, write access to UUCP lockfile directory denied."
@@ -497,6 +504,7 @@ A: The /dev/hvcs* entry you have specified doesn't exist where you said it
does? Maybe you haven't inserted the module (on systems with udev).
---------------------------------------------------------------------------
+
Q: If I already have one Linux partition installed can I use hvcs on said
partition to provide the console for the install of a second Linux
partition?
@@ -505,6 +513,7 @@ A: Yes granted that your are connected to the /dev/hvcs* device using
kermit or cu or some other program that doesn't provide terminal emulation.
---------------------------------------------------------------------------
+
Q: Can I connect to more than one partition's console at a time using this
driver?
@@ -512,6 +521,7 @@ A: Yes. Of course this means that there must be more than one vty-server
configured for this partition and each must point to a disconnected vty.
---------------------------------------------------------------------------
+
Q: Does the hvcs driver support dynamic (hotplug) addition of devices?
A: Yes, if you have dlpar and hotplug enabled for your system and it has
@@ -519,6 +529,7 @@ been built into the kernel the hvcs drivers is configured to dynamically
handle additions of new devices and removals of unused devices.
---------------------------------------------------------------------------
+
Q: For some reason /dev/hvcs* doesn't map to the same vty-server adapter
after a reboot. What happened?
@@ -533,6 +544,7 @@ on how to determine which vty-server goes with which /dev/hvcs* node.
Hint; look at the sysfs "index" attribute for the vty-server.
---------------------------------------------------------------------------
+
Q: Can I use /dev/hvcs* as a conduit to another partition and use a tty
device on that partition as the other end of the pipe?
@@ -554,7 +566,9 @@ read or write to /dev/hvcs*. Now you have a tty conduit between two
partitions.
---------------------------------------------------------------------------
+
9. Reporting Bugs:
+==================
The proper channel for reporting bugs is either through the Linux OS
distribution company that provided your OS or by posting issues to the
diff --git a/Documentation/powerpc/index.rst b/Documentation/powerpc/index.rst
new file mode 100644
index 000000000000..549b1cdd77ae
--- /dev/null
+++ b/Documentation/powerpc/index.rst
@@ -0,0 +1,34 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======
+powerpc
+=======
+
+.. toctree::
+ :maxdepth: 1
+
+ bootwrapper
+ cpu_families
+ cpu_features
+ cxl
+ cxlflash
+ dawr-power9
+ dscr
+ eeh-pci-error-recovery
+ firmware-assisted-dump
+ hvcs
+ isa-versions
+ mpc52xx
+ pci_iov_resource_on_powernv
+ pmu-ebb
+ ptrace
+ qe_firmware
+ syscall64-abi
+ transactional_memory
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/powerpc/isa-versions.rst b/Documentation/powerpc/isa-versions.rst
index 66c24140ebf1..a363d8c1603c 100644
--- a/Documentation/powerpc/isa-versions.rst
+++ b/Documentation/powerpc/isa-versions.rst
@@ -1,13 +1,12 @@
-:orphan:
-
+==========================
CPU to ISA Version Mapping
==========================
Mapping of some CPU versions to relevant ISA versions.
-========= ====================
+========= ====================================================================
CPU Architecture version
-========= ====================
+========= ====================================================================
Power9 Power ISA v3.0B
Power8 Power ISA v2.07
Power7 Power ISA v2.06
@@ -24,7 +23,7 @@ PPC970 - PowerPC User Instruction Set Architecture Book I v2.01
- PowerPC Virtual Environment Architecture Book II v2.01
- PowerPC Operating Environment Architecture Book III v2.01
- Plus Altivec/VMX ~= 2.03
-========= ====================
+========= ====================================================================
Key Features
@@ -60,9 +59,9 @@ Power5 No
PPC970 No
========== ====
-========== ====================
+========== ====================================
CPU Transactional Memory
-========== ====================
+========== ====================================
Power9 Yes (* see transactional_memory.txt)
Power8 Yes
Power7 No
@@ -73,4 +72,4 @@ Power5++ No
Power5+ No
Power5 No
PPC970 No
-========== ====================
+========== ====================================
diff --git a/Documentation/powerpc/mpc52xx.txt b/Documentation/powerpc/mpc52xx.rst
index 0d540a31ea1a..8676ac63e077 100644
--- a/Documentation/powerpc/mpc52xx.txt
+++ b/Documentation/powerpc/mpc52xx.rst
@@ -1,11 +1,13 @@
+=============================
Linux 2.6.x on MPC52xx family
------------------------------
+=============================
For the latest info, go to http://www.246tNt.com/mpc52xx/
To compile/use :
- - U-Boot:
+ - U-Boot::
+
# <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION
if you wish to ).
# make lite5200_defconfig
@@ -16,7 +18,8 @@ To compile/use :
=> tftpboot 400000 pRamdisk
=> bootm 200000 400000
- - DBug:
+ - DBug::
+
# <edit Makefile to set ARCH=ppc & CROSS_COMPILE=... ( also EXTRAVERSION
if you wish to ).
# make lite5200_defconfig
@@ -28,7 +31,8 @@ To compile/use :
DBug> dn -i zImage.initrd.lite5200
-Some remarks :
+Some remarks:
+
- The port is named mpc52xxx, and config options are PPC_MPC52xx. The MGT5100
is not supported, and I'm not sure anyone is interesting in working on it
so. I didn't took 5xxx because there's apparently a lot of 5xxx that have
diff --git a/Documentation/powerpc/pci_iov_resource_on_powernv.txt b/Documentation/powerpc/pci_iov_resource_on_powernv.rst
index b55c5cd83f8d..f5a5793e1613 100644
--- a/Documentation/powerpc/pci_iov_resource_on_powernv.txt
+++ b/Documentation/powerpc/pci_iov_resource_on_powernv.rst
@@ -1,6 +1,13 @@
+===================================================
+PCI Express I/O Virtualization Resource on Powerenv
+===================================================
+
Wei Yang <weiyang@linux.vnet.ibm.com>
+
Benjamin Herrenschmidt <benh@au1.ibm.com>
+
Bjorn Helgaas <bhelgaas@google.com>
+
26 Aug 2014
This document describes the requirement from hardware for PCI MMIO resource
@@ -10,6 +17,7 @@ Endpoints and the implementation on P8 (IODA2). The next two sections talks
about considerations on enabling SRIOV on IODA2.
1. Introduction to Partitionable Endpoints
+==========================================
A Partitionable Endpoint (PE) is a way to group the various resources
associated with a device or a set of devices to provide isolation between
@@ -35,6 +43,7 @@ is a completely separate HW entity that replicates the entire logic, so has
its own set of PEs, etc.
2. Implementation of Partitionable Endpoints on P8 (IODA2)
+==========================================================
P8 supports up to 256 Partitionable Endpoints per PHB.
@@ -149,6 +158,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB.
sense, but we haven't done it yet.
3. Considerations for SR-IOV on PowerKVM
+========================================
* SR-IOV Background
@@ -224,7 +234,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB.
IODA supports 256 PEs, so segmented windows contain 256 segments, so if
total_VFs is less than 256, we have the situation in Figure 1.0, where
segments [total_VFs, 255] of the M64 window may map to some MMIO range on
- other devices:
+ other devices::
0 1 total_VFs - 1
+------+------+- -+------+------+
@@ -243,7 +253,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB.
Figure 1.0 Direct map VF(n) BAR space
Our current solution is to allocate 256 segments even if the VF(n) BAR
- space doesn't need that much, as shown in Figure 1.1:
+ space doesn't need that much, as shown in Figure 1.1::
0 1 total_VFs - 1 255
+------+------+- -+------+------+- -+------+------+
@@ -269,6 +279,7 @@ P8 supports up to 256 Partitionable Endpoints per PHB.
responds to segments [total_VFs, 255].
4. Implications for the Generic PCI Code
+========================================
The PCIe SR-IOV spec requires that the base of the VF(n) BAR space be
aligned to the size of an individual VF BAR.
diff --git a/Documentation/powerpc/pmu-ebb.txt b/Documentation/powerpc/pmu-ebb.rst
index 73cd163dbfb8..4f474758eb55 100644
--- a/Documentation/powerpc/pmu-ebb.txt
+++ b/Documentation/powerpc/pmu-ebb.rst
@@ -1,3 +1,4 @@
+========================
PMU Event Based Branches
========================
diff --git a/Documentation/powerpc/ptrace.rst b/Documentation/powerpc/ptrace.rst
new file mode 100644
index 000000000000..864d4b6dddd1
--- /dev/null
+++ b/Documentation/powerpc/ptrace.rst
@@ -0,0 +1,156 @@
+======
+Ptrace
+======
+
+GDB intends to support the following hardware debug features of BookE
+processors:
+
+4 hardware breakpoints (IAC)
+2 hardware watchpoints (read, write and read-write) (DAC)
+2 value conditions for the hardware watchpoints (DVC)
+
+For that, we need to extend ptrace so that GDB can query and set these
+resources. Since we're extending, we're trying to create an interface
+that's extendable and that covers both BookE and server processors, so
+that GDB doesn't need to special-case each of them. We added the
+following 3 new ptrace requests.
+
+1. PTRACE_PPC_GETHWDEBUGINFO
+============================
+
+Query for GDB to discover the hardware debug features. The main info to
+be returned here is the minimum alignment for the hardware watchpoints.
+BookE processors don't have restrictions here, but server processors have
+an 8-byte alignment restriction for hardware watchpoints. We'd like to avoid
+adding special cases to GDB based on what it sees in AUXV.
+
+Since we're at it, we added other useful info that the kernel can return to
+GDB: this query will return the number of hardware breakpoints, hardware
+watchpoints and whether it supports a range of addresses and a condition.
+The query will fill the following structure provided by the requesting process::
+
+ struct ppc_debug_info {
+ unit32_t version;
+ unit32_t num_instruction_bps;
+ unit32_t num_data_bps;
+ unit32_t num_condition_regs;
+ unit32_t data_bp_alignment;
+ unit32_t sizeof_condition; /* size of the DVC register */
+ uint64_t features; /* bitmask of the individual flags */
+ };
+
+features will have bits indicating whether there is support for::
+
+ #define PPC_DEBUG_FEATURE_INSN_BP_RANGE 0x1
+ #define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x2
+ #define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x4
+ #define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x8
+ #define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x10
+
+2. PTRACE_SETHWDEBUG
+
+Sets a hardware breakpoint or watchpoint, according to the provided structure::
+
+ struct ppc_hw_breakpoint {
+ uint32_t version;
+ #define PPC_BREAKPOINT_TRIGGER_EXECUTE 0x1
+ #define PPC_BREAKPOINT_TRIGGER_READ 0x2
+ #define PPC_BREAKPOINT_TRIGGER_WRITE 0x4
+ uint32_t trigger_type; /* only some combinations allowed */
+ #define PPC_BREAKPOINT_MODE_EXACT 0x0
+ #define PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE 0x1
+ #define PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE 0x2
+ #define PPC_BREAKPOINT_MODE_MASK 0x3
+ uint32_t addr_mode; /* address match mode */
+
+ #define PPC_BREAKPOINT_CONDITION_MODE 0x3
+ #define PPC_BREAKPOINT_CONDITION_NONE 0x0
+ #define PPC_BREAKPOINT_CONDITION_AND 0x1
+ #define PPC_BREAKPOINT_CONDITION_EXACT 0x1 /* different name for the same thing as above */
+ #define PPC_BREAKPOINT_CONDITION_OR 0x2
+ #define PPC_BREAKPOINT_CONDITION_AND_OR 0x3
+ #define PPC_BREAKPOINT_CONDITION_BE_ALL 0x00ff0000 /* byte enable bits */
+ #define PPC_BREAKPOINT_CONDITION_BE(n) (1<<((n)+16))
+ uint32_t condition_mode; /* break/watchpoint condition flags */
+
+ uint64_t addr;
+ uint64_t addr2;
+ uint64_t condition_value;
+ };
+
+A request specifies one event, not necessarily just one register to be set.
+For instance, if the request is for a watchpoint with a condition, both the
+DAC and DVC registers will be set in the same request.
+
+With this GDB can ask for all kinds of hardware breakpoints and watchpoints
+that the BookE supports. COMEFROM breakpoints available in server processors
+are not contemplated, but that is out of the scope of this work.
+
+ptrace will return an integer (handle) uniquely identifying the breakpoint or
+watchpoint just created. This integer will be used in the PTRACE_DELHWDEBUG
+request to ask for its removal. Return -ENOSPC if the requested breakpoint
+can't be allocated on the registers.
+
+Some examples of using the structure to:
+
+- set a breakpoint in the first breakpoint register::
+
+ p.version = PPC_DEBUG_CURRENT_VERSION;
+ p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE;
+ p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
+ p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
+ p.addr = (uint64_t) address;
+ p.addr2 = 0;
+ p.condition_value = 0;
+
+- set a watchpoint which triggers on reads in the second watchpoint register::
+
+ p.version = PPC_DEBUG_CURRENT_VERSION;
+ p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ;
+ p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
+ p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
+ p.addr = (uint64_t) address;
+ p.addr2 = 0;
+ p.condition_value = 0;
+
+- set a watchpoint which triggers only with a specific value::
+
+ p.version = PPC_DEBUG_CURRENT_VERSION;
+ p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ;
+ p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
+ p.condition_mode = PPC_BREAKPOINT_CONDITION_AND | PPC_BREAKPOINT_CONDITION_BE_ALL;
+ p.addr = (uint64_t) address;
+ p.addr2 = 0;
+ p.condition_value = (uint64_t) condition;
+
+- set a ranged hardware breakpoint::
+
+ p.version = PPC_DEBUG_CURRENT_VERSION;
+ p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE;
+ p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
+ p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
+ p.addr = (uint64_t) begin_range;
+ p.addr2 = (uint64_t) end_range;
+ p.condition_value = 0;
+
+- set a watchpoint in server processors (BookS)::
+
+ p.version = 1;
+ p.trigger_type = PPC_BREAKPOINT_TRIGGER_RW;
+ p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
+ or
+ p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
+
+ p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
+ p.addr = (uint64_t) begin_range;
+ /* For PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE addr2 needs to be specified, where
+ * addr2 - addr <= 8 Bytes.
+ */
+ p.addr2 = (uint64_t) end_range;
+ p.condition_value = 0;
+
+3. PTRACE_DELHWDEBUG
+
+Takes an integer which identifies an existing breakpoint or watchpoint
+(i.e., the value returned from PTRACE_SETHWDEBUG), and deletes the
+corresponding breakpoint or watchpoint..
diff --git a/Documentation/powerpc/ptrace.txt b/Documentation/powerpc/ptrace.txt
deleted file mode 100644
index 99c5ce88d0fe..000000000000
--- a/Documentation/powerpc/ptrace.txt
+++ /dev/null
@@ -1,151 +0,0 @@
-GDB intends to support the following hardware debug features of BookE
-processors:
-
-4 hardware breakpoints (IAC)
-2 hardware watchpoints (read, write and read-write) (DAC)
-2 value conditions for the hardware watchpoints (DVC)
-
-For that, we need to extend ptrace so that GDB can query and set these
-resources. Since we're extending, we're trying to create an interface
-that's extendable and that covers both BookE and server processors, so
-that GDB doesn't need to special-case each of them. We added the
-following 3 new ptrace requests.
-
-1. PTRACE_PPC_GETHWDEBUGINFO
-
-Query for GDB to discover the hardware debug features. The main info to
-be returned here is the minimum alignment for the hardware watchpoints.
-BookE processors don't have restrictions here, but server processors have
-an 8-byte alignment restriction for hardware watchpoints. We'd like to avoid
-adding special cases to GDB based on what it sees in AUXV.
-
-Since we're at it, we added other useful info that the kernel can return to
-GDB: this query will return the number of hardware breakpoints, hardware
-watchpoints and whether it supports a range of addresses and a condition.
-The query will fill the following structure provided by the requesting process:
-
-struct ppc_debug_info {
- unit32_t version;
- unit32_t num_instruction_bps;
- unit32_t num_data_bps;
- unit32_t num_condition_regs;
- unit32_t data_bp_alignment;
- unit32_t sizeof_condition; /* size of the DVC register */
- uint64_t features; /* bitmask of the individual flags */
-};
-
-features will have bits indicating whether there is support for:
-
-#define PPC_DEBUG_FEATURE_INSN_BP_RANGE 0x1
-#define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x2
-#define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x4
-#define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x8
-#define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x10
-
-2. PTRACE_SETHWDEBUG
-
-Sets a hardware breakpoint or watchpoint, according to the provided structure:
-
-struct ppc_hw_breakpoint {
- uint32_t version;
-#define PPC_BREAKPOINT_TRIGGER_EXECUTE 0x1
-#define PPC_BREAKPOINT_TRIGGER_READ 0x2
-#define PPC_BREAKPOINT_TRIGGER_WRITE 0x4
- uint32_t trigger_type; /* only some combinations allowed */
-#define PPC_BREAKPOINT_MODE_EXACT 0x0
-#define PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE 0x1
-#define PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE 0x2
-#define PPC_BREAKPOINT_MODE_MASK 0x3
- uint32_t addr_mode; /* address match mode */
-
-#define PPC_BREAKPOINT_CONDITION_MODE 0x3
-#define PPC_BREAKPOINT_CONDITION_NONE 0x0
-#define PPC_BREAKPOINT_CONDITION_AND 0x1
-#define PPC_BREAKPOINT_CONDITION_EXACT 0x1 /* different name for the same thing as above */
-#define PPC_BREAKPOINT_CONDITION_OR 0x2
-#define PPC_BREAKPOINT_CONDITION_AND_OR 0x3
-#define PPC_BREAKPOINT_CONDITION_BE_ALL 0x00ff0000 /* byte enable bits */
-#define PPC_BREAKPOINT_CONDITION_BE(n) (1<<((n)+16))
- uint32_t condition_mode; /* break/watchpoint condition flags */
-
- uint64_t addr;
- uint64_t addr2;
- uint64_t condition_value;
-};
-
-A request specifies one event, not necessarily just one register to be set.
-For instance, if the request is for a watchpoint with a condition, both the
-DAC and DVC registers will be set in the same request.
-
-With this GDB can ask for all kinds of hardware breakpoints and watchpoints
-that the BookE supports. COMEFROM breakpoints available in server processors
-are not contemplated, but that is out of the scope of this work.
-
-ptrace will return an integer (handle) uniquely identifying the breakpoint or
-watchpoint just created. This integer will be used in the PTRACE_DELHWDEBUG
-request to ask for its removal. Return -ENOSPC if the requested breakpoint
-can't be allocated on the registers.
-
-Some examples of using the structure to:
-
-- set a breakpoint in the first breakpoint register
-
- p.version = PPC_DEBUG_CURRENT_VERSION;
- p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE;
- p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
- p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
- p.addr = (uint64_t) address;
- p.addr2 = 0;
- p.condition_value = 0;
-
-- set a watchpoint which triggers on reads in the second watchpoint register
-
- p.version = PPC_DEBUG_CURRENT_VERSION;
- p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ;
- p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
- p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
- p.addr = (uint64_t) address;
- p.addr2 = 0;
- p.condition_value = 0;
-
-- set a watchpoint which triggers only with a specific value
-
- p.version = PPC_DEBUG_CURRENT_VERSION;
- p.trigger_type = PPC_BREAKPOINT_TRIGGER_READ;
- p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
- p.condition_mode = PPC_BREAKPOINT_CONDITION_AND | PPC_BREAKPOINT_CONDITION_BE_ALL;
- p.addr = (uint64_t) address;
- p.addr2 = 0;
- p.condition_value = (uint64_t) condition;
-
-- set a ranged hardware breakpoint
-
- p.version = PPC_DEBUG_CURRENT_VERSION;
- p.trigger_type = PPC_BREAKPOINT_TRIGGER_EXECUTE;
- p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
- p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
- p.addr = (uint64_t) begin_range;
- p.addr2 = (uint64_t) end_range;
- p.condition_value = 0;
-
-- set a watchpoint in server processors (BookS)
-
- p.version = 1;
- p.trigger_type = PPC_BREAKPOINT_TRIGGER_RW;
- p.addr_mode = PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE;
- or
- p.addr_mode = PPC_BREAKPOINT_MODE_EXACT;
-
- p.condition_mode = PPC_BREAKPOINT_CONDITION_NONE;
- p.addr = (uint64_t) begin_range;
- /* For PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE addr2 needs to be specified, where
- * addr2 - addr <= 8 Bytes.
- */
- p.addr2 = (uint64_t) end_range;
- p.condition_value = 0;
-
-3. PTRACE_DELHWDEBUG
-
-Takes an integer which identifies an existing breakpoint or watchpoint
-(i.e., the value returned from PTRACE_SETHWDEBUG), and deletes the
-corresponding breakpoint or watchpoint..
diff --git a/Documentation/powerpc/qe_firmware.txt b/Documentation/powerpc/qe_firmware.rst
index e7ac24aec4ff..42f5103140c9 100644
--- a/Documentation/powerpc/qe_firmware.txt
+++ b/Documentation/powerpc/qe_firmware.rst
@@ -1,23 +1,23 @@
- Freescale QUICC Engine Firmware Uploading
- -----------------------------------------
+=========================================
+Freescale QUICC Engine Firmware Uploading
+=========================================
(c) 2007 Timur Tabi <timur at freescale.com>,
Freescale Semiconductor
-Table of Contents
-=================
+.. Table of Contents
- I - Software License for Firmware
+ I - Software License for Firmware
- II - Microcode Availability
+ II - Microcode Availability
- III - Description and Terminology
+ III - Description and Terminology
- IV - Microcode Programming Details
+ IV - Microcode Programming Details
- V - Firmware Structure Layout
+ V - Firmware Structure Layout
- VI - Sample Code for Creating Firmware Files
+ VI - Sample Code for Creating Firmware Files
Revision Information
====================
@@ -39,7 +39,7 @@ http://opensource.freescale.com. For other firmware files, please contact
your Freescale representative or your operating system vendor.
III - Description and Terminology
-================================
+=================================
In this document, the term 'microcode' refers to the sequence of 32-bit
integers that compose the actual QE microcode.
@@ -89,7 +89,7 @@ being fixed in the RAM package utilizing they should be activated. This data
structure signals the microcode which of these virtual traps is active.
This structure contains 6 words that the application should copy to some
-specific been defined. This table describes the structure.
+specific been defined. This table describes the structure::
---------------------------------------------------------------
| Offset in | | Destination Offset | Size of |
@@ -119,7 +119,7 @@ Extended Modes
This is a double word bit array (64 bits) that defines special functionality
which has an impact on the software drivers. Each bit has its own impact
and has special instructions for the s/w associated with it. This structure is
-described in this table:
+described in this table::
-----------------------------------------------------------------------
| Bit # | Name | Description |
@@ -220,7 +220,8 @@ The 'model' field is a 16-bit number that matches the actual SOC. The
'major' and 'minor' fields are the major and minor revision numbers,
respectively, of the SOC.
-For example, to match the 8323, revision 1.0:
+For example, to match the 8323, revision 1.0::
+
soc.model = 8323
soc.major = 1
soc.minor = 0
@@ -273,10 +274,10 @@ library and available to any driver that calles qe_get_firmware_info().
'reserved'.
After the last microcode is a 32-bit CRC. It can be calculated using
-this algorithm:
+this algorithm::
-u32 crc32(const u8 *p, unsigned int len)
-{
+ u32 crc32(const u8 *p, unsigned int len)
+ {
unsigned int i;
u32 crc = 0;
@@ -286,7 +287,7 @@ u32 crc32(const u8 *p, unsigned int len)
crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
}
return crc;
-}
+ }
VI - Sample Code for Creating Firmware Files
============================================
diff --git a/Documentation/powerpc/syscall64-abi.txt b/Documentation/powerpc/syscall64-abi.rst
index fa716a0d88bd..e49f69f941b9 100644
--- a/Documentation/powerpc/syscall64-abi.txt
+++ b/Documentation/powerpc/syscall64-abi.rst
@@ -5,12 +5,12 @@ Power Architecture 64-bit Linux system call ABI
syscall
=======
-syscall calling sequence[*] matches the Power Architecture 64-bit ELF ABI
+syscall calling sequence\ [1]_ matches the Power Architecture 64-bit ELF ABI
specification C function calling sequence, including register preservation
rules, with the following differences.
-[*] Some syscalls (typically low-level management functions) may have
- different calling sequences (e.g., rt_sigreturn).
+.. [1] Some syscalls (typically low-level management functions) may have
+ different calling sequences (e.g., rt_sigreturn).
Parameters and return value
---------------------------
@@ -33,12 +33,14 @@ Register preservation rules
Register preservation rules match the ELF ABI calling sequence with the
following differences:
-r0: Volatile. (System call number.)
-r3: Volatile. (Parameter 1, and return value.)
-r4-r8: Volatile. (Parameters 2-6.)
-cr0: Volatile (cr0.SO is the return error condition)
-cr1, cr5-7: Nonvolatile.
-lr: Nonvolatile.
+=========== ============= ========================================
+r0 Volatile (System call number.)
+r3 Volatile (Parameter 1, and return value.)
+r4-r8 Volatile (Parameters 2-6.)
+cr0 Volatile (cr0.SO is the return error condition)
+cr1, cr5-7 Nonvolatile
+lr Nonvolatile
+=========== ============= ========================================
All floating point and vector data registers as well as control and status
registers are nonvolatile.
@@ -90,9 +92,12 @@ The vsyscall may or may not use the caller's stack frame save areas.
Register preservation rules
---------------------------
-r0: Volatile.
-cr1, cr5-7: Volatile.
-lr: Volatile.
+
+=========== ========
+r0 Volatile
+cr1, cr5-7 Volatile
+lr Volatile
+=========== ========
Invocation
----------
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.rst
index 52c023e14f26..09955103acb4 100644
--- a/Documentation/powerpc/transactional_memory.txt
+++ b/Documentation/powerpc/transactional_memory.rst
@@ -1,3 +1,4 @@
+============================
Transactional Memory support
============================
@@ -17,29 +18,29 @@ instructions are presented to delimit transactions; transactions are
guaranteed to either complete atomically or roll back and undo any partial
changes.
-A simple transaction looks like this:
+A simple transaction looks like this::
-begin_move_money:
- tbegin
- beq abort_handler
+ begin_move_money:
+ tbegin
+ beq abort_handler
- ld r4, SAVINGS_ACCT(r3)
- ld r5, CURRENT_ACCT(r3)
- subi r5, r5, 1
- addi r4, r4, 1
- std r4, SAVINGS_ACCT(r3)
- std r5, CURRENT_ACCT(r3)
+ ld r4, SAVINGS_ACCT(r3)
+ ld r5, CURRENT_ACCT(r3)
+ subi r5, r5, 1
+ addi r4, r4, 1
+ std r4, SAVINGS_ACCT(r3)
+ std r5, CURRENT_ACCT(r3)
- tend
+ tend
- b continue
+ b continue
-abort_handler:
- ... test for odd failures ...
+ abort_handler:
+ ... test for odd failures ...
- /* Retry the transaction if it failed because it conflicted with
- * someone else: */
- b begin_move_money
+ /* Retry the transaction if it failed because it conflicted with
+ * someone else: */
+ b begin_move_money
The 'tbegin' instruction denotes the start point, and 'tend' the end point.
@@ -123,7 +124,7 @@ Transaction-aware signal handlers can read the transactional register state
from the second ucontext. This will be necessary for crash handlers to
determine, for example, the address of the instruction causing the SIGSEGV.
-Example signal handler:
+Example signal handler::
void crash_handler(int sig, siginfo_t *si, void *uc)
{
@@ -133,9 +134,9 @@ Example signal handler:
if (ucp_link) {
u64 msr = ucp->uc_mcontext.regs->msr;
/* May have transactional ucontext! */
-#ifndef __powerpc64__
+ #ifndef __powerpc64__
msr |= ((u64)transactional_ucp->uc_mcontext.regs->msr) << 32;
-#endif
+ #endif
if (MSR_TM_ACTIVE(msr)) {
/* Yes, we crashed during a transaction. Oops. */
fprintf(stderr, "Transaction to be restarted at 0x%llx, but "
@@ -176,6 +177,7 @@ Failure cause codes used by kernel
These are defined in <asm/reg.h>, and distinguish different reasons why the
kernel aborted a transaction:
+ ====================== ================================
TM_CAUSE_RESCHED Thread was rescheduled.
TM_CAUSE_TLBI Software TLB invalid.
TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap.
@@ -184,6 +186,7 @@ kernel aborted a transaction:
TM_CAUSE_MISC Currently unused.
TM_CAUSE_ALIGNMENT Alignment fault.
TM_CAUSE_EMULATE Emulation that touched memory.
+ ====================== ================================
These can be checked by the user program's abort handler as TEXASR[0:7]. If
bit 7 is set, it indicates that the error is consider persistent. For example
@@ -203,7 +206,7 @@ POWER9
======
TM on POWER9 has issues with storing the complete register state. This
-is described in this commit:
+is described in this commit::
commit 4bb3c7a0208fc13ca70598efd109901a7cd45ae7
Author: Paul Mackerras <paulus@ozlabs.org>
diff --git a/Documentation/process/conf.py b/Documentation/process/conf.py
deleted file mode 100644
index 1b01a80ad9ce..000000000000
--- a/Documentation/process/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = 'Linux Kernel Development Documentation'
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'process.tex', 'Linux Kernel Development Documentation',
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/process/deprecated.rst b/Documentation/process/deprecated.rst
index 49e0f64a3427..053b24a6dd38 100644
--- a/Documentation/process/deprecated.rst
+++ b/Documentation/process/deprecated.rst
@@ -119,3 +119,17 @@ array may exceed the remaining memory in the stack segment. This could
lead to a crash, possible overwriting sensitive contents at the end of the
stack (when built without `CONFIG_THREAD_INFO_IN_TASK=y`), or overwriting
memory adjacent to the stack (when built without `CONFIG_VMAP_STACK=y`)
+
+Implicit switch case fall-through
+---------------------------------
+The C language allows switch cases to "fall through" when
+a "break" statement is missing at the end of a case. This,
+however, introduces ambiguity in the code, as it's not always
+clear if the missing break is intentional or a bug. As there
+have been a long list of flaws `due to missing "break" statements
+<https://cwe.mitre.org/data/definitions/484.html>`_, we no longer allow
+"implicit fall-through". In order to identify an intentional fall-through
+case, we have adopted the marking used by static analyzers: a comment
+saying `/* Fall through */`. Once the C++17 `__attribute__((fallthrough))`
+is more widely handled by C compilers, static analyzers, and IDEs, we can
+switch to using that instead.
diff --git a/Documentation/s390/vfio-ccw.rst b/Documentation/s390/vfio-ccw.rst
index 1e210c6afa88..fca9c4f5bd9c 100644
--- a/Documentation/s390/vfio-ccw.rst
+++ b/Documentation/s390/vfio-ccw.rst
@@ -180,6 +180,13 @@ The process of how these work together.
add it to an iommu_group and a vfio_group. Then we could pass through
the mdev to a guest.
+
+VFIO-CCW Regions
+----------------
+
+The vfio-ccw driver exposes MMIO regions to accept requests from and return
+results to userspace.
+
vfio-ccw I/O region
-------------------
@@ -205,6 +212,25 @@ irb_area stores the I/O result.
ret_code stores a return code for each access of the region.
+This region is always available.
+
+vfio-ccw cmd region
+-------------------
+
+The vfio-ccw cmd region is used to accept asynchronous instructions
+from userspace::
+
+ #define VFIO_CCW_ASYNC_CMD_HSCH (1 << 0)
+ #define VFIO_CCW_ASYNC_CMD_CSCH (1 << 1)
+ struct ccw_cmd_region {
+ __u32 command;
+ __u32 ret_code;
+ } __packed;
+
+This region is exposed via region type VFIO_REGION_SUBTYPE_CCW_ASYNC_CMD.
+
+Currently, CLEAR SUBCHANNEL and HALT SUBCHANNEL use this region.
+
vfio-ccw operation details
--------------------------
@@ -306,9 +332,8 @@ Together with the corresponding work in QEMU, we can bring the passed
through DASD/ECKD device online in a guest now and use it as a block
device.
-While the current code allows the guest to start channel programs via
-START SUBCHANNEL, support for HALT SUBCHANNEL or CLEAR SUBCHANNEL is
-not yet implemented.
+The current code allows the guest to start channel programs via
+START SUBCHANNEL, and to issue HALT SUBCHANNEL and CLEAR SUBCHANNEL.
vfio-ccw supports classic (command mode) channel I/O only. Transport
mode (HPF) is not supported.
diff --git a/Documentation/sh/conf.py b/Documentation/sh/conf.py
deleted file mode 100644
index 1eb684a13ac8..000000000000
--- a/Documentation/sh/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "SuperH architecture implementation manual"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'sh.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/sound/conf.py b/Documentation/sound/conf.py
deleted file mode 100644
index 3f1fc5e74e7b..000000000000
--- a/Documentation/sound/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "Linux Sound Subsystem Documentation"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'sound.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/sphinx/load_config.py b/Documentation/sphinx/load_config.py
index 301a21aa4f63..eeb394b39e2c 100644
--- a/Documentation/sphinx/load_config.py
+++ b/Documentation/sphinx/load_config.py
@@ -21,6 +21,29 @@ def loadConfig(namespace):
and os.path.normpath(namespace["__file__"]) != os.path.normpath(config_file) ):
config_file = os.path.abspath(config_file)
+ # Let's avoid one conf.py file just due to latex_documents
+ start = config_file.find('Documentation/')
+ if start >= 0:
+ start = config_file.find('/', start + 1)
+
+ end = config_file.rfind('/')
+ if start >= 0 and end > 0:
+ dir = config_file[start + 1:end]
+
+ print("source directory: %s" % dir)
+ new_latex_docs = []
+ latex_documents = namespace['latex_documents']
+
+ for l in latex_documents:
+ if l[0].find(dir + '/') == 0:
+ has = True
+ fn = l[0][len(dir) + 1:]
+ new_latex_docs.append((fn, l[1], l[2], l[3], l[4]))
+ break
+
+ namespace['latex_documents'] = new_latex_docs
+
+ # If there is an extra conf.py file, load it
if os.path.isfile(config_file):
sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
config = namespace.copy()
@@ -29,4 +52,6 @@ def loadConfig(namespace):
del config['__file__']
namespace.update(config)
else:
- sys.stderr.write("WARNING: additional sphinx-config not found: %s\n" % config_file)
+ config = namespace.copy()
+ config['tags'].add("subproject")
+ namespace.update(config)
diff --git a/Documentation/translations/it_IT/doc-guide/sphinx.rst b/Documentation/translations/it_IT/doc-guide/sphinx.rst
index 1739cba8863e..f1ad4504b734 100644
--- a/Documentation/translations/it_IT/doc-guide/sphinx.rst
+++ b/Documentation/translations/it_IT/doc-guide/sphinx.rst
@@ -242,8 +242,9 @@ del kernel:
* Per inserire blocchi di testo con caratteri a dimensione fissa (codici di
esempio, casi d'uso, eccetera): utilizzate ``::`` quando non è necessario
evidenziare la sintassi, specialmente per piccoli frammenti; invece,
- utilizzate ``.. code-block:: <language>`` per blocchi di più lunghi che
- potranno beneficiare dell'avere la sintassi evidenziata.
+ utilizzate ``.. code-block:: <language>`` per blocchi più lunghi che
+ beneficeranno della sintassi evidenziata. Per un breve pezzo di codice da
+ inserire nel testo, usate \`\`.
Il dominio C
@@ -267,12 +268,14 @@ molto comune come ``open`` o ``ioctl``:
Il nome della funzione (per esempio ioctl) rimane nel testo ma il nome del suo
riferimento cambia da ``ioctl`` a ``VIDIOC_LOG_STATUS``. Anche la voce
-nell'indice cambia in ``VIDIOC_LOG_STATUS`` e si potrà quindi fare riferimento
-a questa funzione scrivendo:
-
-.. code-block:: rst
-
- :c:func:`VIDIOC_LOG_STATUS`
+nell'indice cambia in ``VIDIOC_LOG_STATUS``.
+
+Notate che per una funzione non c'è bisogno di usare ``c:func:`` per generarne
+i riferimenti nella documentazione. Grazie a qualche magica estensione a
+Sphinx, il sistema di generazione della documentazione trasformerà
+automaticamente un riferimento ad una ``funzione()`` in un riferimento
+incrociato quando questa ha una voce nell'indice. Se trovate degli usi di
+``c:func:`` nella documentazione del kernel, sentitevi liberi di rimuoverli.
Tabelle a liste
diff --git a/Documentation/translations/it_IT/process/index.rst b/Documentation/translations/it_IT/process/index.rst
index 2eda85d5cd1e..012de0f3154a 100644
--- a/Documentation/translations/it_IT/process/index.rst
+++ b/Documentation/translations/it_IT/process/index.rst
@@ -27,6 +27,7 @@ Di seguito le guide che ogni sviluppatore dovrebbe leggere.
code-of-conduct
development-process
submitting-patches
+ programming-language
coding-style
maintainer-pgp-guide
email-clients
diff --git a/Documentation/translations/it_IT/process/kernel-docs.rst b/Documentation/translations/it_IT/process/kernel-docs.rst
index 7bd70d661737..38e0a955121a 100644
--- a/Documentation/translations/it_IT/process/kernel-docs.rst
+++ b/Documentation/translations/it_IT/process/kernel-docs.rst
@@ -1,6 +1,7 @@
.. include:: ../disclaimer-ita.rst
:Original: :ref:`Documentation/process/kernel-docs.rst <kernel_docs>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
.. _it_kernel_docs:
@@ -8,6 +9,10 @@
Indice di documenti per le persone interessate a capire e/o scrivere per il kernel Linux
========================================================================================
-.. warning::
-
- TODO ancora da tradurre
+.. note::
+ Questo documento contiene riferimenti a documenti in lingua inglese; inoltre
+ utilizza dai campi *ReStructuredText* di supporto alla ricerca e che per
+ questo motivo è meglio non tradurre al fine di garantirne un corretto
+ utilizzo.
+ Per questi motivi il documento non verrà tradotto. Per favore fate
+ riferimento al documento originale in lingua inglese.
diff --git a/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst b/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
index 276db0e37f43..118fb4153e8f 100644
--- a/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
+++ b/Documentation/translations/it_IT/process/maintainer-pgp-guide.rst
@@ -248,7 +248,10 @@ possano ricevere la vostra nuova sottochiave::
kernel.
Se per qualche ragione preferite rimanere con sottochiavi RSA, nel comando
- precedente, sostituite "ed25519" con "rsa2048".
+ precedente, sostituite "ed25519" con "rsa2048". In aggiunta, se avete
+ intenzione di usare un dispositivo hardware che non supporta le chiavi
+ ED25519 ECC, come la Nitrokey Pro o la Yubikey, allora dovreste usare
+ "nistp256" al posto di "ed25519".
Copia di riserva della chiave primaria per gestire il recupero da disastro
--------------------------------------------------------------------------
@@ -449,23 +452,27 @@ implementi le funzionalità delle smartcard. Sul mercato ci sono diverse
soluzioni disponibili:
- `Nitrokey Start`_: è Open hardware e Free Software, è basata sul progetto
- `GnuK`_ della FSIJ. Ha il supporto per chiavi ECC, ma meno funzionalità di
- sicurezza (come la resistenza alla manomissione o alcuni attacchi ad un
- canale laterale).
+ `GnuK`_ della FSIJ. Questo è uno dei pochi dispositivi a supportare le chiavi
+ ECC ED25519, ma offre meno funzionalità di sicurezza (come la resistenza
+ alla manomissione o alcuni attacchi ad un canale laterale).
- `Nitrokey Pro`_: è simile alla Nitrokey Start, ma è più resistente alla
- manomissione e offre più funzionalità di sicurezza, ma l'ECC.
-- `Yubikey 4`_: l'hardware e il software sono proprietari, ma è più economica
+ manomissione e offre più funzionalità di sicurezza. La Pro 2 supporta la
+ crittografia ECC (NISTP).
+- `Yubikey 5`_: l'hardware e il software sono proprietari, ma è più economica
della Nitrokey Pro ed è venduta anche con porta USB-C il che è utile con i
computer portatili più recenti. In aggiunta, offre altre funzionalità di
- sicurezza come FIDO, U2F, ma non l'ECC
+ sicurezza come FIDO, U2F, e ora supporta anche le chiavi ECC (NISTP)
`Su LWN c'è una buona recensione`_ dei modelli elencati qui sopra e altri.
+La scelta dipenderà dal costo, dalla disponibilità nella vostra area
+geografica e vostre considerazioni sull'hardware aperto/proprietario.
+
Se volete usare chiavi ECC, la vostra migliore scelta sul mercato è la
Nitrokey Start.
.. _`Nitrokey Start`: https://shop.nitrokey.com/shop/product/nitrokey-start-6
-.. _`Nitrokey Pro`: https://shop.nitrokey.com/shop/product/nitrokey-pro-3
-.. _`Yubikey 4`: https://www.yubico.com/product/yubikey-4-series/
+.. _`Nitrokey Pro 2`: https://shop.nitrokey.com/shop/product/nitrokey-pro-2-3
+.. _`Yubikey 5`: https://www.yubico.com/product/yubikey-5-overview/
.. _Gnuk: http://www.fsij.org/doc-gnuk/
.. _`Su LWN c'è una buona recensione`: https://lwn.net/Articles/736231/
diff --git a/Documentation/translations/it_IT/process/programming-language.rst b/Documentation/translations/it_IT/process/programming-language.rst
new file mode 100644
index 000000000000..f4b006395849
--- /dev/null
+++ b/Documentation/translations/it_IT/process/programming-language.rst
@@ -0,0 +1,51 @@
+.. include:: ../disclaimer-ita.rst
+
+:Original: :ref:`Documentation/process/programming-language.rst <programming_language>`
+:Translator: Federico Vaga <federico.vaga@vaga.pv.it>
+
+.. _it_programming_language:
+
+Linguaggio di programmazione
+============================
+
+Il kernel è scritto nel linguaggio di programmazione C [c-language]_.
+Più precisamente, il kernel viene compilato con ``gcc`` [gcc]_ usando
+l'opzione ``-std=gnu89`` [gcc-c-dialect-options]_: il dialetto GNU
+dello standard ISO C90 (con l'aggiunta di alcune funzionalità da C99)
+
+Questo dialetto contiene diverse estensioni al linguaggio [gnu-extensions]_,
+e molte di queste vengono usate sistematicamente dal kernel.
+
+Il kernel offre un certo livello di supporto per la compilazione con ``clang``
+[clang]_ e ``icc`` [icc]_ su diverse architetture, tuttavia in questo momento
+il supporto non è completo e richiede delle patch aggiuntive.
+
+Attributi
+---------
+
+Una delle estensioni più comuni e usate nel kernel sono gli attributi
+[gcc-attribute-syntax]_. Gli attributi permettono di aggiungere una semantica,
+definita dell'implementazione, alle entità del linguaggio (come le variabili,
+le funzioni o i tipi) senza dover fare importanti modifiche sintattiche al
+linguaggio stesso (come l'aggiunta di nuove parole chiave) [n2049]_.
+
+In alcuni casi, gli attributi sono opzionali (ovvero un compilatore che non
+dovesse supportarli dovrebbe produrre comunque codice corretto, anche se
+più lento o che non esegue controlli aggiuntivi durante la compilazione).
+
+Il kernel definisce alcune pseudo parole chiave (per esempio ``__pure``)
+in alternativa alla sintassi GNU per gli attributi (per esempio
+``__attribute__((__pure__))``) allo scopo di mostrare quali funzionalità si
+possono usare e/o per accorciare il codice.
+
+Per maggiori informazioni consultate il file d'intestazione
+``include/linux/compiler_attributes.h``.
+
+.. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards
+.. [gcc] https://gcc.gnu.org
+.. [clang] https://clang.llvm.org
+.. [icc] https://software.intel.com/en-us/c-compilers
+.. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html
+.. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html
+.. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html
+.. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf
diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt
index a33c2a536542..2774624ee843 100644
--- a/Documentation/translations/ko_KR/memory-barriers.txt
+++ b/Documentation/translations/ko_KR/memory-barriers.txt
@@ -569,7 +569,7 @@ ACQUIRE 는 해당 오í¼ë ˆì´ì…˜ì˜ 로드 부분ì—만 ì ìš©ë˜ê³  RELEASE ë
[*] 버스 ë§ˆìŠ¤í„°ë§ DMA 와 ì¼ê´€ì„±ì— 대해서는 다ìŒì„ 참고하시기 ë°”ëžë‹ˆë‹¤:
- Documentation/PCI/pci.rst
+ Documentation/driver-api/pci/pci.rst
Documentation/DMA-API-HOWTO.txt
Documentation/DMA-API.txt
diff --git a/Documentation/userspace-api/conf.py b/Documentation/userspace-api/conf.py
deleted file mode 100644
index 2eaf59f844e5..000000000000
--- a/Documentation/userspace-api/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "The Linux kernel user-space API guide"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'userspace-api.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/virtual/index.rst b/Documentation/virt/index.rst
index 062ffb527043..062ffb527043 100644
--- a/Documentation/virtual/index.rst
+++ b/Documentation/virt/index.rst
diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virt/kvm/amd-memory-encryption.rst
index d18c97b4e140..d18c97b4e140 100644
--- a/Documentation/virtual/kvm/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/amd-memory-encryption.rst
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virt/kvm/api.txt
index e54a3f51ddc5..2d067767b617 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virt/kvm/api.txt
@@ -3781,7 +3781,7 @@ encrypted VMs.
Currently, this ioctl is used for issuing Secure Encrypted Virtualization
(SEV) commands on AMD Processors. The SEV commands are defined in
-Documentation/virtual/kvm/amd-memory-encryption.rst.
+Documentation/virt/kvm/amd-memory-encryption.rst.
4.111 KVM_MEMORY_ENCRYPT_REG_REGION
diff --git a/Documentation/virtual/kvm/arm/hyp-abi.txt b/Documentation/virt/kvm/arm/hyp-abi.txt
index a20a0bee268d..a20a0bee268d 100644
--- a/Documentation/virtual/kvm/arm/hyp-abi.txt
+++ b/Documentation/virt/kvm/arm/hyp-abi.txt
diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virt/kvm/arm/psci.txt
index 559586fc9d37..559586fc9d37 100644
--- a/Documentation/virtual/kvm/arm/psci.txt
+++ b/Documentation/virt/kvm/arm/psci.txt
diff --git a/Documentation/virtual/kvm/cpuid.rst b/Documentation/virt/kvm/cpuid.rst
index 01b081f6e7ea..01b081f6e7ea 100644
--- a/Documentation/virtual/kvm/cpuid.rst
+++ b/Documentation/virt/kvm/cpuid.rst
diff --git a/Documentation/virtual/kvm/devices/README b/Documentation/virt/kvm/devices/README
index 34a69834124a..34a69834124a 100644
--- a/Documentation/virtual/kvm/devices/README
+++ b/Documentation/virt/kvm/devices/README
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-its.txt b/Documentation/virt/kvm/devices/arm-vgic-its.txt
index eeaa95b893a8..eeaa95b893a8 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic-its.txt
+++ b/Documentation/virt/kvm/devices/arm-vgic-its.txt
diff --git a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt b/Documentation/virt/kvm/devices/arm-vgic-v3.txt
index ff290b43c8e5..ff290b43c8e5 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic-v3.txt
+++ b/Documentation/virt/kvm/devices/arm-vgic-v3.txt
diff --git a/Documentation/virtual/kvm/devices/arm-vgic.txt b/Documentation/virt/kvm/devices/arm-vgic.txt
index 97b6518148f8..97b6518148f8 100644
--- a/Documentation/virtual/kvm/devices/arm-vgic.txt
+++ b/Documentation/virt/kvm/devices/arm-vgic.txt
diff --git a/Documentation/virtual/kvm/devices/mpic.txt b/Documentation/virt/kvm/devices/mpic.txt
index 8257397adc3c..8257397adc3c 100644
--- a/Documentation/virtual/kvm/devices/mpic.txt
+++ b/Documentation/virt/kvm/devices/mpic.txt
diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virt/kvm/devices/s390_flic.txt
index a4e20a090174..a4e20a090174 100644
--- a/Documentation/virtual/kvm/devices/s390_flic.txt
+++ b/Documentation/virt/kvm/devices/s390_flic.txt
diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virt/kvm/devices/vcpu.txt
index 2b5dab16c4f2..2b5dab16c4f2 100644
--- a/Documentation/virtual/kvm/devices/vcpu.txt
+++ b/Documentation/virt/kvm/devices/vcpu.txt
diff --git a/Documentation/virtual/kvm/devices/vfio.txt b/Documentation/virt/kvm/devices/vfio.txt
index 528c77c8022c..528c77c8022c 100644
--- a/Documentation/virtual/kvm/devices/vfio.txt
+++ b/Documentation/virt/kvm/devices/vfio.txt
diff --git a/Documentation/virtual/kvm/devices/vm.txt b/Documentation/virt/kvm/devices/vm.txt
index 4ffb82b02468..4ffb82b02468 100644
--- a/Documentation/virtual/kvm/devices/vm.txt
+++ b/Documentation/virt/kvm/devices/vm.txt
diff --git a/Documentation/virtual/kvm/devices/xics.txt b/Documentation/virt/kvm/devices/xics.txt
index 42864935ac5d..42864935ac5d 100644
--- a/Documentation/virtual/kvm/devices/xics.txt
+++ b/Documentation/virt/kvm/devices/xics.txt
diff --git a/Documentation/virtual/kvm/devices/xive.txt b/Documentation/virt/kvm/devices/xive.txt
index 9a24a4525253..9a24a4525253 100644
--- a/Documentation/virtual/kvm/devices/xive.txt
+++ b/Documentation/virt/kvm/devices/xive.txt
diff --git a/Documentation/virtual/kvm/halt-polling.txt b/Documentation/virt/kvm/halt-polling.txt
index 4f791b128dd2..4f791b128dd2 100644
--- a/Documentation/virtual/kvm/halt-polling.txt
+++ b/Documentation/virt/kvm/halt-polling.txt
diff --git a/Documentation/virtual/kvm/hypercalls.txt b/Documentation/virt/kvm/hypercalls.txt
index da210651f714..5f6d291bd004 100644
--- a/Documentation/virtual/kvm/hypercalls.txt
+++ b/Documentation/virt/kvm/hypercalls.txt
@@ -18,7 +18,7 @@ S390:
number in R1.
For further information on the S390 diagnose call as supported by KVM,
- refer to Documentation/virtual/kvm/s390-diag.txt.
+ refer to Documentation/virt/kvm/s390-diag.txt.
PowerPC:
It uses R3-R10 and hypercall number in R11. R4-R11 are used as output registers.
@@ -26,7 +26,7 @@ S390:
KVM hypercalls uses 4 byte opcode, that are patched with 'hypercall-instructions'
property inside the device tree's /hypervisor node.
- For more information refer to Documentation/virtual/kvm/ppc-pv.txt
+ For more information refer to Documentation/virt/kvm/ppc-pv.txt
MIPS:
KVM hypercalls use the HYPCALL instruction with code 0 and the hypercall
diff --git a/Documentation/virtual/kvm/index.rst b/Documentation/virt/kvm/index.rst
index 0b206a06f5be..ada224a511fe 100644
--- a/Documentation/virtual/kvm/index.rst
+++ b/Documentation/virt/kvm/index.rst
@@ -9,3 +9,4 @@ KVM
amd-memory-encryption
cpuid
+ vcpu-requests
diff --git a/Documentation/virtual/kvm/locking.txt b/Documentation/virt/kvm/locking.txt
index 635cd6eaf714..635cd6eaf714 100644
--- a/Documentation/virtual/kvm/locking.txt
+++ b/Documentation/virt/kvm/locking.txt
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virt/kvm/mmu.txt
index 2efe0efc516e..1b9880dfba0a 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virt/kvm/mmu.txt
@@ -298,7 +298,7 @@ Handling a page fault is performed as follows:
vcpu->arch.mmio_gfn, and call the emulator
- If both P bit and R/W bit of error code are set, this could possibly
be handled as a "fast page fault" (fixed without taking the MMU lock). See
- the description in Documentation/virtual/kvm/locking.txt.
+ the description in Documentation/virt/kvm/locking.txt.
- if needed, walk the guest page tables to determine the guest translation
(gva->gpa or ngpa->gpa)
- if permissions are insufficient, reflect the fault back to the guest
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virt/kvm/msr.txt
index df1f4338b3ca..df1f4338b3ca 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virt/kvm/msr.txt
diff --git a/Documentation/virtual/kvm/nested-vmx.txt b/Documentation/virt/kvm/nested-vmx.txt
index 97eb1353e962..97eb1353e962 100644
--- a/Documentation/virtual/kvm/nested-vmx.txt
+++ b/Documentation/virt/kvm/nested-vmx.txt
diff --git a/Documentation/virtual/kvm/ppc-pv.txt b/Documentation/virt/kvm/ppc-pv.txt
index e26115ce4258..e26115ce4258 100644
--- a/Documentation/virtual/kvm/ppc-pv.txt
+++ b/Documentation/virt/kvm/ppc-pv.txt
diff --git a/Documentation/virtual/kvm/review-checklist.txt b/Documentation/virt/kvm/review-checklist.txt
index a83b27635fdd..499af499e296 100644
--- a/Documentation/virtual/kvm/review-checklist.txt
+++ b/Documentation/virt/kvm/review-checklist.txt
@@ -7,7 +7,7 @@ Review checklist for kvm patches
2. Patches should be against kvm.git master branch.
3. If the patch introduces or modifies a new userspace API:
- - the API must be documented in Documentation/virtual/kvm/api.txt
+ - the API must be documented in Documentation/virt/kvm/api.txt
- the API must be discoverable using KVM_CHECK_EXTENSION
4. New state must include support for save/restore.
diff --git a/Documentation/virtual/kvm/s390-diag.txt b/Documentation/virt/kvm/s390-diag.txt
index 7c52e5f8b210..7c52e5f8b210 100644
--- a/Documentation/virtual/kvm/s390-diag.txt
+++ b/Documentation/virt/kvm/s390-diag.txt
diff --git a/Documentation/virtual/kvm/timekeeping.txt b/Documentation/virt/kvm/timekeeping.txt
index 76808a17ad84..76808a17ad84 100644
--- a/Documentation/virtual/kvm/timekeeping.txt
+++ b/Documentation/virt/kvm/timekeeping.txt
diff --git a/Documentation/virtual/kvm/vcpu-requests.rst b/Documentation/virt/kvm/vcpu-requests.rst
index 5feb3706a7ae..5feb3706a7ae 100644
--- a/Documentation/virtual/kvm/vcpu-requests.rst
+++ b/Documentation/virt/kvm/vcpu-requests.rst
diff --git a/Documentation/virtual/paravirt_ops.rst b/Documentation/virt/paravirt_ops.rst
index 6b789d27cead..6b789d27cead 100644
--- a/Documentation/virtual/paravirt_ops.rst
+++ b/Documentation/virt/paravirt_ops.rst
diff --git a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt b/Documentation/virt/uml/UserModeLinux-HOWTO.txt
index 87b80f589e1c..87b80f589e1c 100644
--- a/Documentation/virtual/uml/UserModeLinux-HOWTO.txt
+++ b/Documentation/virt/uml/UserModeLinux-HOWTO.txt
diff --git a/Documentation/vm/conf.py b/Documentation/vm/conf.py
deleted file mode 100644
index 3b0b601af558..000000000000
--- a/Documentation/vm/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "Linux Memory Management Documentation"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'memory-management.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/Documentation/vm/hmm.rst b/Documentation/vm/hmm.rst
index 7d90964abbb0..710ce1c701bf 100644
--- a/Documentation/vm/hmm.rst
+++ b/Documentation/vm/hmm.rst
@@ -237,7 +237,7 @@ The usage pattern is::
ret = hmm_range_snapshot(&range);
if (ret) {
up_read(&mm->mmap_sem);
- if (ret == -EAGAIN) {
+ if (ret == -EBUSY) {
/*
* No need to check hmm_range_wait_until_valid() return value
* on retry we will get proper error with hmm_range_snapshot()
diff --git a/Documentation/watchdog/hpwdt.rst b/Documentation/watchdog/hpwdt.rst
index c165d92cfd12..c824cd7f6e32 100644
--- a/Documentation/watchdog/hpwdt.rst
+++ b/Documentation/watchdog/hpwdt.rst
@@ -63,7 +63,7 @@ Last reviewed: 08/20/2018
and loop forever. This is generally not what a watchdog user wants.
For those wishing to learn more please see:
- Documentation/kdump/kdump.rst
+ Documentation/admin-guide/kdump/kdump.rst
Documentation/admin-guide/kernel-parameters.txt (panic=)
Your Linux Distribution specific documentation.
diff --git a/Documentation/x86/conf.py b/Documentation/x86/conf.py
deleted file mode 100644
index 33c5c3142e20..000000000000
--- a/Documentation/x86/conf.py
+++ /dev/null
@@ -1,10 +0,0 @@
-# -*- coding: utf-8; mode: python -*-
-
-project = "X86 architecture specific documentation"
-
-tags.add("subproject")
-
-latex_documents = [
- ('index', 'x86.tex', project,
- 'The kernel development community', 'manual'),
-]
diff --git a/MAINTAINERS b/MAINTAINERS
index ee663e0e2f2e..e352550a6895 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -899,7 +899,7 @@ L: linux-iio@vger.kernel.org
W: http://ez.analog.com/community/linux-device-drivers
S: Supported
F: drivers/iio/adc/ad7124.c
-F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.txt
+F: Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
ANALOG DEVICES INC AD7606 DRIVER
M: Stefan Popa <stefan.popa@analog.com>
@@ -1194,7 +1194,7 @@ F: include/uapi/linux/if_arcnet.h
ARM ARCHITECTED TIMER DRIVER
M: Mark Rutland <mark.rutland@arm.com>
-M: Marc Zyngier <marc.zyngier@arm.com>
+M: Marc Zyngier <maz@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/include/asm/arch_timer.h
@@ -2155,10 +2155,12 @@ F: Documentation/devicetree/bindings/arm/realtek.txt
ARM/RENESAS ARM64 ARCHITECTURE
M: Simon Horman <horms@verge.net.au>
+M: Geert Uytterhoeven <geert+renesas@glider.be>
M: Magnus Damm <magnus.damm@gmail.com>
L: linux-renesas-soc@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
S: Supported
F: arch/arm64/boot/dts/renesas/
F: Documentation/devicetree/bindings/arm/renesas.yaml
@@ -2269,10 +2271,12 @@ F: drivers/media/platform/s5p-mfc/
ARM/SHMOBILE ARM ARCHITECTURE
M: Simon Horman <horms@verge.net.au>
+M: Geert Uytterhoeven <geert+renesas@glider.be>
M: Magnus Damm <magnus.damm@gmail.com>
L: linux-renesas-soc@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
S: Supported
F: arch/arm/boot/dts/emev2*
F: arch/arm/boot/dts/gr-peach*
@@ -4195,7 +4199,7 @@ M: Jens Axboe <axboe@kernel.dk>
L: cgroups@vger.kernel.org
L: linux-block@vger.kernel.org
T: git git://git.kernel.dk/linux-block
-F: Documentation/cgroup-v1/blkio-controller.rst
+F: Documentation/admin-guide/cgroup-v1/blkio-controller.rst
F: block/blk-cgroup.c
F: include/linux/blk-cgroup.h
F: block/blk-throttle.c
@@ -4474,7 +4478,7 @@ F: arch/powerpc/platforms/powernv/pci-cxl.c
F: drivers/misc/cxl/
F: include/misc/cxl*
F: include/uapi/misc/cxl.h
-F: Documentation/powerpc/cxl.txt
+F: Documentation/powerpc/cxl.rst
F: Documentation/ABI/testing/sysfs-class-cxl
CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
@@ -4485,7 +4489,7 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/cxlflash/
F: include/uapi/scsi/cxlflash_ioctl.h
-F: Documentation/powerpc/cxlflash.txt
+F: Documentation/powerpc/cxlflash.rst
CYBERPRO FB DRIVER
M: Russell King <linux@armlinux.org.uk>
@@ -6327,7 +6331,8 @@ F: Documentation/devicetree/bindings/counter/ftm-quaddec.txt
F: drivers/counter/ftm-quaddec.c
FLOPPY DRIVER
-S: Orphan
+M: Denis Efremov <efremov@linux.com>
+S: Odd Fixes
L: linux-block@vger.kernel.org
F: drivers/block/floppy.c
@@ -6827,13 +6832,6 @@ F: Documentation/filesystems/gfs2*.txt
F: fs/gfs2/
F: include/uapi/linux/gfs2_ondisk.h
-GIGASET ISDN DRIVERS
-M: Paul Bolle <pebolle@tiscali.nl>
-L: gigaset307x-common@lists.sourceforge.net
-W: http://gigaset307x.sourceforge.net/
-S: Odd Fixes
-F: drivers/staging/isdn/gigaset/
-
GNSS SUBSYSTEM
M: Johan Hovold <johan@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/johan/gnss.git
@@ -6861,7 +6859,7 @@ R: Sagi Shahar <sagis@google.com>
R: Jon Olson <jonolson@google.com>
L: netdev@vger.kernel.org
S: Supported
-F: Documentation/networking/device_drivers/google/gve.txt
+F: Documentation/networking/device_drivers/google/gve.rst
F: drivers/net/ethernet/google
GPD POCKET FAN DRIVER
@@ -8495,7 +8493,7 @@ S: Obsolete
F: include/uapi/linux/ipx.h
IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
-M: Marc Zyngier <marc.zyngier@arm.com>
+M: Marc Zyngier <maz@kernel.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: Documentation/IRQ-domain.txt
@@ -8513,7 +8511,7 @@ F: kernel/irq/
IRQCHIP DRIVERS
M: Thomas Gleixner <tglx@linutronix.de>
M: Jason Cooper <jason@lakedaemon.net>
-M: Marc Zyngier <marc.zyngier@arm.com>
+M: Marc Zyngier <maz@kernel.org>
L: linux-kernel@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@ -8813,7 +8811,7 @@ L: kvm@vger.kernel.org
W: http://www.linux-kvm.org
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
S: Supported
-F: Documentation/virtual/kvm/
+F: Documentation/virt/kvm/
F: include/trace/events/kvm.h
F: include/uapi/asm-generic/kvm*
F: include/uapi/linux/kvm*
@@ -8833,10 +8831,10 @@ F: arch/x86/include/asm/svm.h
F: arch/x86/kvm/svm.c
KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
-M: Marc Zyngier <marc.zyngier@arm.com>
+M: Marc Zyngier <maz@kernel.org>
R: James Morse <james.morse@arm.com>
-R: Julien Thierry <julien.thierry@arm.com>
-R: Suzuki K Pouloze <suzuki.poulose@arm.com>
+R: Julien Thierry <julien.thierry.kdev@gmail.com>
+R: Suzuki K Poulose <suzuki.poulose@arm.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: kvmarm@lists.cs.columbia.edu
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
@@ -11149,6 +11147,7 @@ L: netdev@vger.kernel.org
S: Maintained
W: https://fedorahosted.org/dropwatch/
F: net/core/drop_monitor.c
+F: include/uapi/linux/net_dropmon.h
NETWORKING DRIVERS
M: "David S. Miller" <davem@davemloft.net>
@@ -11287,6 +11286,7 @@ M: Aviad Yehezkel <aviadye@mellanox.com>
M: Dave Watson <davejwatson@fb.com>
M: John Fastabend <john.fastabend@gmail.com>
M: Daniel Borkmann <daniel@iogearbox.net>
+M: Jakub Kicinski <jakub.kicinski@netronome.com>
L: netdev@vger.kernel.org
S: Maintained
F: net/tls/*
@@ -12141,7 +12141,7 @@ M: Thomas Hellstrom <thellstrom@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
L: virtualization@lists.linux-foundation.org
S: Supported
-F: Documentation/virtual/paravirt_ops.txt
+F: Documentation/virt/paravirt_ops.rst
F: arch/*/kernel/paravirt*
F: arch/*/include/asm/paravirt*.h
F: include/linux/hypervisor.h
@@ -12398,7 +12398,7 @@ F: Documentation/PCI/pci-error-recovery.rst
F: drivers/pci/pcie/aer.c
F: drivers/pci/pcie/dpc.c
F: drivers/pci/pcie/err.c
-F: Documentation/powerpc/eeh-pci-error-recovery.txt
+F: Documentation/powerpc/eeh-pci-error-recovery.rst
F: arch/powerpc/kernel/eeh*.c
F: arch/powerpc/platforms/*/eeh*.c
F: arch/powerpc/include/*/eeh*.h
@@ -13729,6 +13729,7 @@ F: drivers/mtd/nand/raw/r852.c
F: drivers/mtd/nand/raw/r852.h
RISC-V ARCHITECTURE
+M: Paul Walmsley <paul.walmsley@sifive.com>
M: Palmer Dabbelt <palmer@sifive.com>
M: Albert Ou <aou@eecs.berkeley.edu>
L: linux-riscv@lists.infradead.org
@@ -13951,7 +13952,6 @@ F: drivers/pci/hotplug/s390_pci_hpc.c
S390 VFIO-CCW DRIVER
M: Cornelia Huck <cohuck@redhat.com>
-M: Farhan Ali <alifm@linux.ibm.com>
M: Eric Farman <farman@linux.ibm.com>
R: Halil Pasic <pasic@linux.ibm.com>
L: linux-s390@vger.kernel.org
@@ -14020,6 +14020,12 @@ F: drivers/media/common/saa7146/
F: drivers/media/pci/saa7146/
F: include/media/drv-intf/saa7146*
+SAFESETID SECURITY MODULE
+M: Micah Morton <mortonm@chromium.org>
+S: Supported
+F: security/safesetid/
+F: Documentation/admin-guide/LSM/SafeSetID.rst
+
SAMSUNG AUDIO (ASoC) DRIVERS
M: Krzysztof Kozlowski <krzk@kernel.org>
M: Sangbeom Kim <sbkim73@samsung.com>
@@ -16858,7 +16864,7 @@ W: http://user-mode-linux.sourceforge.net
Q: https://patchwork.ozlabs.org/project/linux-um/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git
S: Maintained
-F: Documentation/virtual/uml/
+F: Documentation/virt/uml/
F: arch/um/
F: arch/x86/um/
F: fs/hostfs/
@@ -17127,7 +17133,7 @@ F: drivers/virtio/virtio_input.c
F: include/uapi/linux/virtio_input.h
VIRTIO IOMMU DRIVER
-M: Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
+M: Jean-Philippe Brucker <jean-philippe@linaro.org>
L: virtualization@lists.linux-foundation.org
S: Maintained
F: drivers/iommu/virtio-iommu.c
@@ -17176,7 +17182,6 @@ F: drivers/vme/
F: include/linux/vme*
VMWARE BALLOON DRIVER
-M: Julien Freche <jfreche@vmware.com>
M: Nadav Amit <namit@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
L: linux-kernel@vger.kernel.org
@@ -17559,7 +17564,6 @@ M: Jakub Kicinski <jakub.kicinski@netronome.com>
M: Jesper Dangaard Brouer <hawk@kernel.org>
M: John Fastabend <john.fastabend@gmail.com>
L: netdev@vger.kernel.org
-L: xdp-newbies@vger.kernel.org
L: bpf@vger.kernel.org
S: Supported
F: net/core/xdp.c
diff --git a/Makefile b/Makefile
index 9be5834073f8..23cdf1f41364 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 3
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
NAME = Bobtail Squid
# *DOCUMENTATION*
@@ -472,6 +472,7 @@ KBUILD_CFLAGS_MODULE := -DMODULE
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
KBUILD_LDFLAGS :=
GCC_PLUGINS_CFLAGS :=
+CLANG_FLAGS :=
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
export CPP AR NM STRIP OBJCOPY OBJDUMP PAHOLE KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
@@ -519,7 +520,7 @@ endif
ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
ifneq ($(CROSS_COMPILE),)
-CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
+CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
@@ -843,6 +844,9 @@ NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
# warn about C99 declaration after statement
KBUILD_CFLAGS += -Wdeclaration-after-statement
+# Warn about unmarked fall-throughs in switch statement.
+KBUILD_CFLAGS += $(call cc-option,-Wimplicit-fallthrough=3,)
+
# Variable Length Arrays (VLAs) should not be used anywhere in the kernel
KBUILD_CFLAGS += -Wvla
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index c929bea9a9ff..85710e078afb 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1535,7 +1535,6 @@ config DEBUG_LL_INCLUDE
DEBUG_IMX7D_UART
default "debug/ks8695.S" if DEBUG_KS8695_UART
default "debug/msm.S" if DEBUG_QCOM_UARTDM
- default "debug/netx.S" if DEBUG_NETX_UART
default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
default "debug/renesas-scif.S" if DEBUG_R7S72100_SCIF2
default "debug/renesas-scif.S" if DEBUG_RCAR_GEN1_SCIF0
@@ -1575,7 +1574,6 @@ config DEBUG_UART_8250
config DEBUG_UART_PHYS
hex "Physical base address of debug UART"
- default 0x00100a00 if DEBUG_NETX_UART
default 0x01c20000 if DEBUG_DAVINCI_DMx_UART0
default 0x01c28000 if DEBUG_SUNXI_UART0
default 0x01c28400 if DEBUG_SUNXI_UART1
@@ -1700,7 +1698,6 @@ config DEBUG_UART_PHYS
DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
DEBUG_LL_UART_EFM32 || \
DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
- DEBUG_NETX_UART || \
DEBUG_QCOM_UARTDM || DEBUG_R7S72100_SCIF2 || \
DEBUG_RCAR_GEN1_SCIF0 || DEBUG_RCAR_GEN1_SCIF2 || \
DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF1 || \
@@ -1717,7 +1714,6 @@ config DEBUG_UART_VIRT
default 0xc881f000 if DEBUG_RV1108_UART2
default 0xc8821000 if DEBUG_RV1108_UART1
default 0xc8912000 if DEBUG_RV1108_UART0
- default 0xe0000a00 if DEBUG_NETX_UART
default 0xe0010fe0 if ARCH_RPC
default 0xf0000be0 if ARCH_EBSA110
default 0xf0010000 if DEBUG_ASM9260_UART
@@ -1822,7 +1818,6 @@ config DEBUG_UART_VIRT
default DEBUG_UART_PHYS if !MMU
depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
- DEBUG_NETX_UART || \
DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
DEBUG_S3C64XX_UART || \
DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
index 18d0ae46e76c..0faae8950375 100644
--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
@@ -124,6 +124,9 @@
};
mdio-bus-mux {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
/* BIT(9) = 1 => external mdio */
mdio_ext: mdio@200 {
reg = <0x200>;
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
index cbe61b61a212..c2a9dd57e56a 100644
--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
@@ -112,7 +112,7 @@
};
&i2c2 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6ul-geam.dts b/arch/arm/boot/dts/imx6ul-geam.dts
index 21ddd359d3ed..9f63706383a7 100644
--- a/arch/arm/boot/dts/imx6ul-geam.dts
+++ b/arch/arm/boot/dts/imx6ul-geam.dts
@@ -156,7 +156,7 @@
};
&i2c2 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6ul-isiot.dtsi b/arch/arm/boot/dts/imx6ul-isiot.dtsi
index b26d4f57c655..cc9adce638f5 100644
--- a/arch/arm/boot/dts/imx6ul-isiot.dtsi
+++ b/arch/arm/boot/dts/imx6ul-isiot.dtsi
@@ -148,7 +148,7 @@
};
&i2c2 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
index 39eeeddac39e..09f7ffa9ad8c 100644
--- a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
+++ b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
@@ -43,7 +43,7 @@
};
&i2c2 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx6ul-pico-pi.dts b/arch/arm/boot/dts/imx6ul-pico-pi.dts
index de07357b27fc..6cd7d5877d20 100644
--- a/arch/arm/boot/dts/imx6ul-pico-pi.dts
+++ b/arch/arm/boot/dts/imx6ul-pico-pi.dts
@@ -43,7 +43,7 @@
};
&i2c2 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c2>;
status = "okay";
@@ -58,7 +58,7 @@
};
&i2c3 {
- clock_frequency = <100000>;
+ clock-frequency = <100000>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c3>;
status = "okay";
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index 992747a57442..56907bb4b329 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -186,7 +186,7 @@
reg = <0x40330200 0x200>;
};
- usbphy1: usb-phy@0x40350000 {
+ usbphy1: usb-phy@40350000 {
compatible = "fsl,imx7ulp-usbphy", "fsl,imx6ul-usbphy";
reg = <0x40350000 0x1000>;
interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index e6b98b6eb88d..822cddfbf1af 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -4,17 +4,9 @@ CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_U8500=y
-CONFIG_MACH_HREFV60=y
-CONFIG_MACH_SNOWBALL=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
CONFIG_HIGHMEM=y
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
@@ -25,6 +17,11 @@ CONFIG_CPU_IDLE=y
CONFIG_ARM_U8500_CPUIDLE=y
CONFIG_VFP=y
CONFIG_NEON=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_CMA=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -47,7 +44,6 @@ CONFIG_SMSC911X=y
CONFIG_SMSC_PHY=y
CONFIG_CW1200=y
CONFIG_CW1200_WLAN_SDIO=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_KEYBOARD_ATKBD is not set
CONFIG_KEYBOARD_GPIO=y
@@ -63,7 +59,6 @@ CONFIG_RMI4_CORE=y
CONFIG_RMI4_I2C=y
CONFIG_RMI4_F11=y
# CONFIG_SERIO is not set
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
@@ -72,6 +67,7 @@ CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_STMPE=y
CONFIG_GPIO_TC3589X=y
+CONFIG_SENSORS_IIO_HWMON=y
CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
CONFIG_WATCHDOG=y
@@ -79,6 +75,13 @@ CONFIG_MFD_STMPE=y
CONFIG_MFD_TC3589X=y
CONFIG_REGULATOR_AB8500=y
CONFIG_REGULATOR_GPIO=y
+CONFIG_DRM=y
+CONFIG_DRM_PANEL_SAMSUNG_S6D16D0=y
+CONFIG_DRM_LIMA=y
+CONFIG_DRM_MCDE=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=m
+CONFIG_LOGO=y
CONFIG_SOUND=y
CONFIG_SND=y
CONFIG_SND_SOC=y
@@ -87,6 +90,7 @@ CONFIG_SND_SOC_UX500_MACH_MOP500=y
CONFIG_USB=y
CONFIG_USB_MUSB_HDRC=y
CONFIG_USB_MUSB_UX500=y
+CONFIG_MUSB_PIO_ONLY=y
CONFIG_AB8500_USB=y
CONFIG_USB_GADGET=y
CONFIG_USB_ETH=m
@@ -103,6 +107,7 @@ CONFIG_RTC_DRV_AB8500=y
CONFIG_RTC_DRV_PL031=y
CONFIG_DMADEVICES=y
CONFIG_STE_DMA40=y
+CONFIG_HWSPINLOCK=y
CONFIG_HSEM_U8500=y
CONFIG_IIO=y
CONFIG_IIO_SW_TRIGGER=y
@@ -126,20 +131,19 @@ CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_CRYPTO_DEV_UX500=y
+CONFIG_CRYPTO_DEV_UX500_CRYP=y
+CONFIG_CRYPTO_DEV_UX500_HASH=y
+CONFIG_CRYPTO_DEV_UX500_DEBUG=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
# CONFIG_SCHED_DEBUG is not set
-# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
CONFIG_DEBUG_USER=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_SINK_TPIU=y
CONFIG_CORESIGHT_SINK_ETBV10=y
CONFIG_CORESIGHT_SOURCE_ETM3X=y
-CONFIG_CRYPTO_DEV_UX500=y
-CONFIG_CRYPTO_DEV_UX500_CRYP=y
-CONFIG_CRYPTO_DEV_UX500_HASH=y
-CONFIG_CRYPTO_DEV_UX500_DEBUG=y
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 7e0486ad1318..dba9355e2484 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -18,7 +18,9 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{
- return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL;
+ if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
+ return &arm_dma_ops;
+ return NULL;
}
#ifdef __arch_page_to_dma
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index 05d03f09ff54..71262dcdbca3 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -24,6 +24,7 @@
#define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
.text
+ .arch armv5te
/*
* Move DaVinci into deep sleep state
*
diff --git a/arch/arm/mach-netx/Kconfig b/arch/arm/mach-netx/Kconfig
deleted file mode 100644
index 1e5d9c870784..000000000000
--- a/arch/arm/mach-netx/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "NetX Implementations"
- depends on ARCH_NETX
-
-config MACH_NXDKN
- bool "Enable Hilscher nxdkn Eval Board support"
- help
- Board support for the Hilscher NetX Eval Board
-
-config MACH_NXDB500
- bool "Enable Hilscher nxdb500 Eval Board support"
- select ARM_AMBA
- help
- Board support for the Hilscher nxdb500 Eval Board
-
-config MACH_NXEB500HMI
- bool "Enable Hilscher nxeb500hmi Eval Board support"
- select ARM_AMBA
- help
- Board support for the Hilscher nxeb500hmi Eval Board
-
-endmenu
diff --git a/arch/arm/mach-netx/Makefile b/arch/arm/mach-netx/Makefile
deleted file mode 100644
index 44ea83f7d9c2..000000000000
--- a/arch/arm/mach-netx/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for the linux kernel.
-#
-
-# Object file lists.
-
-obj-y += time.o generic.o pfifo.o xc.o
-
-# Specific board support
-obj-$(CONFIG_MACH_NXDKN) += nxdkn.o
-obj-$(CONFIG_MACH_NXDB500) += nxdb500.o fb.o
-obj-$(CONFIG_MACH_NXEB500HMI) += nxeb500hmi.o fb.o
diff --git a/arch/arm/mach-netx/Makefile.boot b/arch/arm/mach-netx/Makefile.boot
deleted file mode 100644
index 2eb23c0cb6b0..000000000000
--- a/arch/arm/mach-netx/Makefile.boot
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
- zreladdr-y += 0x80008000
-
diff --git a/arch/arm/mach-netx/fb.c b/arch/arm/mach-netx/fb.c
deleted file mode 100644
index 2dc80db07390..000000000000
--- a/arch/arm/mach-netx/fb.c
+++ /dev/null
@@ -1,65 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-netx/fb.c
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-#include <linux/err.h>
-#include <linux/gfp.h>
-
-#include <asm/irq.h>
-
-#include <mach/netx-regs.h>
-#include <mach/hardware.h>
-
-static struct clcd_panel *netx_panel;
-
-void netx_clcd_enable(struct clcd_fb *fb)
-{
-}
-
-int netx_clcd_setup(struct clcd_fb *fb)
-{
- dma_addr_t dma;
-
- fb->panel = netx_panel;
-
- fb->fb.screen_base = dma_alloc_wc(&fb->dev->dev, 1024 * 1024, &dma,
- GFP_KERNEL);
- if (!fb->fb.screen_base) {
- printk(KERN_ERR "CLCD: unable to map framebuffer\n");
- return -ENOMEM;
- }
-
- fb->fb.fix.smem_start = dma;
- fb->fb.fix.smem_len = 1024*1024;
-
- return 0;
-}
-
-int netx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma)
-{
- return dma_mmap_wc(&fb->dev->dev, vma, fb->fb.screen_base,
- fb->fb.fix.smem_start, fb->fb.fix.smem_len);
-}
-
-void netx_clcd_remove(struct clcd_fb *fb)
-{
- dma_free_wc(&fb->dev->dev, fb->fb.fix.smem_len, fb->fb.screen_base,
- fb->fb.fix.smem_start);
-}
-
-static AMBA_AHB_DEVICE(fb, "fb", 0, 0x00104000, { NETX_IRQ_LCD }, NULL);
-
-int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel)
-{
- netx_panel = panel;
- fb_device.dev.platform_data = board;
- return amba_device_register(&fb_device, &iomem_resource);
-}
diff --git a/arch/arm/mach-netx/fb.h b/arch/arm/mach-netx/fb.h
deleted file mode 100644
index 5cdc01fc3c86..000000000000
--- a/arch/arm/mach-netx/fb.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-netx/fb.h
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-void netx_clcd_enable(struct clcd_fb *fb);
-int netx_clcd_setup(struct clcd_fb *fb);
-int netx_clcd_mmap(struct clcd_fb *fb, struct vm_area_struct *vma);
-void netx_clcd_remove(struct clcd_fb *fb);
-int netx_fb_init(struct clcd_board *board, struct clcd_panel *panel);
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c
deleted file mode 100644
index 88881fd45e9f..000000000000
--- a/arch/arm/mach-netx/generic.c
+++ /dev/null
@@ -1,182 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-netx/generic.c
- *
- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/irqchip/arm-vic.h>
-#include <linux/reboot.h>
-#include <mach/hardware.h>
-#include <asm/mach/map.h>
-#include <mach/netx-regs.h>
-#include <asm/mach/irq.h>
-
-static struct map_desc netx_io_desc[] __initdata = {
- {
- .virtual = NETX_IO_VIRT,
- .pfn = __phys_to_pfn(NETX_IO_PHYS),
- .length = NETX_IO_SIZE,
- .type = MT_DEVICE
- }
-};
-
-void __init netx_map_io(void)
-{
- iotable_init(netx_io_desc, ARRAY_SIZE(netx_io_desc));
-}
-
-static struct resource netx_rtc_resources[] = {
- [0] = {
- .start = 0x00101200,
- .end = 0x00101220,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device netx_rtc_device = {
- .name = "netx-rtc",
- .id = 0,
- .num_resources = ARRAY_SIZE(netx_rtc_resources),
- .resource = netx_rtc_resources,
-};
-
-static struct platform_device *devices[] __initdata = {
- &netx_rtc_device,
-};
-
-#if 0
-#define DEBUG_IRQ(fmt...) printk(fmt)
-#else
-#define DEBUG_IRQ(fmt...) while (0) {}
-#endif
-
-static void netx_hif_demux_handler(struct irq_desc *desc)
-{
- unsigned int irq = NETX_IRQ_HIF_CHAINED(0);
- unsigned int stat;
-
- stat = ((readl(NETX_DPMAS_INT_EN) &
- readl(NETX_DPMAS_INT_STAT)) >> 24) & 0x1f;
-
- while (stat) {
- if (stat & 1) {
- DEBUG_IRQ("handling irq %d\n", irq);
- generic_handle_irq(irq);
- }
- irq++;
- stat >>= 1;
- }
-}
-
-static int
-netx_hif_irq_type(struct irq_data *d, unsigned int type)
-{
- unsigned int val, irq;
-
- val = readl(NETX_DPMAS_IF_CONF1);
-
- irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
-
- if (type & IRQ_TYPE_EDGE_RISING) {
- DEBUG_IRQ("rising edges\n");
- val |= (1 << 26) << irq;
- }
- if (type & IRQ_TYPE_EDGE_FALLING) {
- DEBUG_IRQ("falling edges\n");
- val &= ~((1 << 26) << irq);
- }
- if (type & IRQ_TYPE_LEVEL_LOW) {
- DEBUG_IRQ("low level\n");
- val &= ~((1 << 26) << irq);
- }
- if (type & IRQ_TYPE_LEVEL_HIGH) {
- DEBUG_IRQ("high level\n");
- val |= (1 << 26) << irq;
- }
-
- writel(val, NETX_DPMAS_IF_CONF1);
-
- return 0;
-}
-
-static void
-netx_hif_ack_irq(struct irq_data *d)
-{
- unsigned int val, irq;
-
- irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
- writel((1 << 24) << irq, NETX_DPMAS_INT_STAT);
-
- val = readl(NETX_DPMAS_INT_EN);
- val &= ~((1 << 24) << irq);
- writel(val, NETX_DPMAS_INT_EN);
-
- DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
-}
-
-static void
-netx_hif_mask_irq(struct irq_data *d)
-{
- unsigned int val, irq;
-
- irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
- val = readl(NETX_DPMAS_INT_EN);
- val &= ~((1 << 24) << irq);
- writel(val, NETX_DPMAS_INT_EN);
- DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
-}
-
-static void
-netx_hif_unmask_irq(struct irq_data *d)
-{
- unsigned int val, irq;
-
- irq = d->irq - NETX_IRQ_HIF_CHAINED(0);
- val = readl(NETX_DPMAS_INT_EN);
- val |= (1 << 24) << irq;
- writel(val, NETX_DPMAS_INT_EN);
- DEBUG_IRQ("%s: irq %d\n", __func__, d->irq);
-}
-
-static struct irq_chip netx_hif_chip = {
- .irq_ack = netx_hif_ack_irq,
- .irq_mask = netx_hif_mask_irq,
- .irq_unmask = netx_hif_unmask_irq,
- .irq_set_type = netx_hif_irq_type,
-};
-
-void __init netx_init_irq(void)
-{
- int irq;
-
- vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0);
-
- for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) {
- irq_set_chip_and_handler(irq, &netx_hif_chip,
- handle_level_irq);
- irq_clear_status_flags(irq, IRQ_NOREQUEST);
- }
-
- writel(NETX_DPMAS_INT_EN_GLB_EN, NETX_DPMAS_INT_EN);
- irq_set_chained_handler(NETX_IRQ_HIF, netx_hif_demux_handler);
-}
-
-static int __init netx_init(void)
-{
- return platform_add_devices(devices, ARRAY_SIZE(devices));
-}
-
-subsys_initcall(netx_init);
-
-void netx_restart(enum reboot_mode mode, const char *cmd)
-{
- writel(NETX_SYSTEM_RES_CR_FIRMW_RES_EN | NETX_SYSTEM_RES_CR_FIRMW_RES,
- NETX_SYSTEM_RES_CR);
-}
diff --git a/arch/arm/mach-netx/generic.h b/arch/arm/mach-netx/generic.h
deleted file mode 100644
index 223e304574a5..000000000000
--- a/arch/arm/mach-netx/generic.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-netx/generic.h
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#include <linux/reboot.h>
-
-extern void __init netx_map_io(void);
-extern void __init netx_init_irq(void);
-extern void netx_restart(enum reboot_mode, const char *);
-
-extern void netx_timer_init(void);
diff --git a/arch/arm/mach-netx/include/mach/hardware.h b/arch/arm/mach-netx/include/mach/hardware.h
deleted file mode 100644
index 84253993d1e0..000000000000
--- a/arch/arm/mach-netx/include/mach/hardware.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-netx/include/mach/hardware.h
- *
- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-#ifndef __ASM_ARCH_HARDWARE_H
-#define __ASM_ARCH_HARDWARE_H
-
-#define NETX_IO_PHYS 0x00100000
-#define NETX_IO_VIRT 0xe0000000
-#define NETX_IO_SIZE 0x00100000
-
-#define SRAM_INTERNAL_PHYS_0 0x00000
-#define SRAM_INTERNAL_PHYS_1 0x08000
-#define SRAM_INTERNAL_PHYS_2 0x10000
-#define SRAM_INTERNAL_PHYS_3 0x18000
-#define SRAM_INTERNAL_PHYS(no) ((no) * 0x8000)
-
-#define XPEC_MEM_SIZE 0x4000
-#define XMAC_MEM_SIZE 0x1000
-#define SRAM_MEM_SIZE 0x8000
-
-#define io_p2v(x) IOMEM((x) - NETX_IO_PHYS + NETX_IO_VIRT)
-#define io_v2p(x) ((x) - NETX_IO_VIRT + NETX_IO_PHYS)
-
-#endif
diff --git a/arch/arm/mach-netx/include/mach/irqs.h b/arch/arm/mach-netx/include/mach/irqs.h
deleted file mode 100644
index 540c92104fe8..000000000000
--- a/arch/arm/mach-netx/include/mach/irqs.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-netx/include/mach/irqs.h
- *
- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#define NETX_IRQ_VIC_START 64
-#define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0)
-#define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1)
-#define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2)
-#define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3)
-#define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4)
-#define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5)
-#define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6)
-#define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7)
-#define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8)
-#define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9)
-#define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10)
-#define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11)
-#define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12)
-#define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13)
-#define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14)
-#define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15)
-#define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16)
-#define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17)
-#define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18)
-#define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19)
-#define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20)
-#define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no))
-#define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21)
-#define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22)
-#define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23)
-#define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24)
-#define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25)
-#define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26)
-/* int 27 is reserved */
-/* int 28 is reserved */
-#define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29)
-#define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30)
-/* int 31 is reserved */
-
-#define NETX_IRQS (NETX_IRQ_VIC_START + 32)
-
-/* for multiplexed irqs on gpio 0..14 */
-#define NETX_IRQ_GPIO(x) (NETX_IRQS + (x))
-#define NETX_IRQ_GPIO_LAST NETX_IRQ_GPIO(14)
-
-/* Host interface interrupts */
-#define NETX_IRQ_HIF_CHAINED(x) (NETX_IRQ_GPIO_LAST + 1 + (x))
-#define NETX_IRQ_HIF_PIO35 NETX_IRQ_HIF_CHAINED(0)
-#define NETX_IRQ_HIF_PIO36 NETX_IRQ_HIF_CHAINED(1)
-#define NETX_IRQ_HIF_PIO40 NETX_IRQ_HIF_CHAINED(2)
-#define NETX_IRQ_HIF_PIO47 NETX_IRQ_HIF_CHAINED(3)
-#define NETX_IRQ_HIF_PIO72 NETX_IRQ_HIF_CHAINED(4)
-#define NETX_IRQ_HIF_LAST NETX_IRQ_HIF_CHAINED(4)
-
-#define NR_IRQS (NETX_IRQ_HIF_LAST + 1)
diff --git a/arch/arm/mach-netx/include/mach/netx-regs.h b/arch/arm/mach-netx/include/mach/netx-regs.h
deleted file mode 100644
index 7c356a6ab80b..000000000000
--- a/arch/arm/mach-netx/include/mach/netx-regs.h
+++ /dev/null
@@ -1,420 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-netx/include/mach/netx-regs.h
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#ifndef __ASM_ARCH_NETX_REGS_H
-#define __ASM_ARCH_NETX_REGS_H
-
-/* offsets relative to the beginning of the io space */
-#define NETX_OFS_SYSTEM 0x00000
-#define NETX_OFS_MEMCR 0x00100
-#define NETX_OFS_DPMAS 0x03000
-#define NETX_OFS_GPIO 0x00800
-#define NETX_OFS_PIO 0x00900
-#define NETX_OFS_UART0 0x00a00
-#define NETX_OFS_UART1 0x00a40
-#define NETX_OFS_UART2 0x00a80
-#define NETX_OF_MIIMU 0x00b00
-#define NETX_OFS_SPI 0x00c00
-#define NETX_OFS_I2C 0x00d00
-#define NETX_OFS_SYSTIME 0x01100
-#define NETX_OFS_RTC 0x01200
-#define NETX_OFS_EXTBUS 0x03600
-#define NETX_OFS_LCD 0x04000
-#define NETX_OFS_USB 0x20000
-#define NETX_OFS_XMAC0 0x60000
-#define NETX_OFS_XMAC1 0x61000
-#define NETX_OFS_XMAC2 0x62000
-#define NETX_OFS_XMAC3 0x63000
-#define NETX_OFS_XMAC(no) (0x60000 + (no) * 0x1000)
-#define NETX_OFS_PFIFO 0x64000
-#define NETX_OFS_XPEC0 0x70000
-#define NETX_OFS_XPEC1 0x74000
-#define NETX_OFS_XPEC2 0x78000
-#define NETX_OFS_XPEC3 0x7c000
-#define NETX_OFS_XPEC(no) (0x70000 + (no) * 0x4000)
-#define NETX_OFS_VIC 0xff000
-
-/* physical addresses */
-#define NETX_PA_SYSTEM (NETX_IO_PHYS + NETX_OFS_SYSTEM)
-#define NETX_PA_MEMCR (NETX_IO_PHYS + NETX_OFS_MEMCR)
-#define NETX_PA_DPMAS (NETX_IO_PHYS + NETX_OFS_DPMAS)
-#define NETX_PA_GPIO (NETX_IO_PHYS + NETX_OFS_GPIO)
-#define NETX_PA_PIO (NETX_IO_PHYS + NETX_OFS_PIO)
-#define NETX_PA_UART0 (NETX_IO_PHYS + NETX_OFS_UART0)
-#define NETX_PA_UART1 (NETX_IO_PHYS + NETX_OFS_UART1)
-#define NETX_PA_UART2 (NETX_IO_PHYS + NETX_OFS_UART2)
-#define NETX_PA_MIIMU (NETX_IO_PHYS + NETX_OF_MIIMU)
-#define NETX_PA_SPI (NETX_IO_PHYS + NETX_OFS_SPI)
-#define NETX_PA_I2C (NETX_IO_PHYS + NETX_OFS_I2C)
-#define NETX_PA_SYSTIME (NETX_IO_PHYS + NETX_OFS_SYSTIME)
-#define NETX_PA_RTC (NETX_IO_PHYS + NETX_OFS_RTC)
-#define NETX_PA_EXTBUS (NETX_IO_PHYS + NETX_OFS_EXTBUS)
-#define NETX_PA_LCD (NETX_IO_PHYS + NETX_OFS_LCD)
-#define NETX_PA_USB (NETX_IO_PHYS + NETX_OFS_USB)
-#define NETX_PA_XMAC0 (NETX_IO_PHYS + NETX_OFS_XMAC0)
-#define NETX_PA_XMAC1 (NETX_IO_PHYS + NETX_OFS_XMAC1)
-#define NETX_PA_XMAC2 (NETX_IO_PHYS + NETX_OFS_XMAC2)
-#define NETX_PA_XMAC3 (NETX_IO_PHYS + NETX_OFS_XMAC3)
-#define NETX_PA_XMAC(no) (NETX_IO_PHYS + NETX_OFS_XMAC(no))
-#define NETX_PA_PFIFO (NETX_IO_PHYS + NETX_OFS_PFIFO)
-#define NETX_PA_XPEC0 (NETX_IO_PHYS + NETX_OFS_XPEC0)
-#define NETX_PA_XPEC1 (NETX_IO_PHYS + NETX_OFS_XPEC1)
-#define NETX_PA_XPEC2 (NETX_IO_PHYS + NETX_OFS_XPEC2)
-#define NETX_PA_XPEC3 (NETX_IO_PHYS + NETX_OFS_XPEC3)
-#define NETX_PA_XPEC(no) (NETX_IO_PHYS + NETX_OFS_XPEC(no))
-#define NETX_PA_VIC (NETX_IO_PHYS + NETX_OFS_VIC)
-
-/* virtual addresses */
-#define NETX_VA_SYSTEM (NETX_IO_VIRT + NETX_OFS_SYSTEM)
-#define NETX_VA_MEMCR (NETX_IO_VIRT + NETX_OFS_MEMCR)
-#define NETX_VA_DPMAS (NETX_IO_VIRT + NETX_OFS_DPMAS)
-#define NETX_VA_GPIO (NETX_IO_VIRT + NETX_OFS_GPIO)
-#define NETX_VA_PIO (NETX_IO_VIRT + NETX_OFS_PIO)
-#define NETX_VA_UART0 (NETX_IO_VIRT + NETX_OFS_UART0)
-#define NETX_VA_UART1 (NETX_IO_VIRT + NETX_OFS_UART1)
-#define NETX_VA_UART2 (NETX_IO_VIRT + NETX_OFS_UART2)
-#define NETX_VA_MIIMU (NETX_IO_VIRT + NETX_OF_MIIMU)
-#define NETX_VA_SPI (NETX_IO_VIRT + NETX_OFS_SPI)
-#define NETX_VA_I2C (NETX_IO_VIRT + NETX_OFS_I2C)
-#define NETX_VA_SYSTIME (NETX_IO_VIRT + NETX_OFS_SYSTIME)
-#define NETX_VA_RTC (NETX_IO_VIRT + NETX_OFS_RTC)
-#define NETX_VA_EXTBUS (NETX_IO_VIRT + NETX_OFS_EXTBUS)
-#define NETX_VA_LCD (NETX_IO_VIRT + NETX_OFS_LCD)
-#define NETX_VA_USB (NETX_IO_VIRT + NETX_OFS_USB)
-#define NETX_VA_XMAC0 (NETX_IO_VIRT + NETX_OFS_XMAC0)
-#define NETX_VA_XMAC1 (NETX_IO_VIRT + NETX_OFS_XMAC1)
-#define NETX_VA_XMAC2 (NETX_IO_VIRT + NETX_OFS_XMAC2)
-#define NETX_VA_XMAC3 (NETX_IO_VIRT + NETX_OFS_XMAC3)
-#define NETX_VA_XMAC(no) (NETX_IO_VIRT + NETX_OFS_XMAC(no))
-#define NETX_VA_PFIFO (NETX_IO_VIRT + NETX_OFS_PFIFO)
-#define NETX_VA_XPEC0 (NETX_IO_VIRT + NETX_OFS_XPEC0)
-#define NETX_VA_XPEC1 (NETX_IO_VIRT + NETX_OFS_XPEC1)
-#define NETX_VA_XPEC2 (NETX_IO_VIRT + NETX_OFS_XPEC2)
-#define NETX_VA_XPEC3 (NETX_IO_VIRT + NETX_OFS_XPEC3)
-#define NETX_VA_XPEC(no) (NETX_IO_VIRT + NETX_OFS_XPEC(no))
-#define NETX_VA_VIC (NETX_IO_VIRT + NETX_OFS_VIC)
-
-/*********************************
- * System functions *
- *********************************/
-
-/* Registers */
-#define NETX_SYSTEM_REG(ofs) IOMEM(NETX_VA_SYSTEM + (ofs))
-#define NETX_SYSTEM_BOO_SR NETX_SYSTEM_REG(0x00)
-#define NETX_SYSTEM_IOC_CR NETX_SYSTEM_REG(0x04)
-#define NETX_SYSTEM_IOC_MR NETX_SYSTEM_REG(0x08)
-
-/* FIXME: Docs are not consistent */
-/* #define NETX_SYSTEM_RES_CR NETX_SYSTEM_REG(0x08) */
-#define NETX_SYSTEM_RES_CR NETX_SYSTEM_REG(0x0c)
-
-#define NETX_SYSTEM_PHY_CONTROL NETX_SYSTEM_REG(0x10)
-#define NETX_SYSTEM_REV NETX_SYSTEM_REG(0x34)
-#define NETX_SYSTEM_IOC_ACCESS_KEY NETX_SYSTEM_REG(0x70)
-#define NETX_SYSTEM_WDG_TR NETX_SYSTEM_REG(0x200)
-#define NETX_SYSTEM_WDG_CTR NETX_SYSTEM_REG(0x204)
-#define NETX_SYSTEM_WDG_IRQ_TIMEOUT NETX_SYSTEM_REG(0x208)
-#define NETX_SYSTEM_WDG_RES_TIMEOUT NETX_SYSTEM_REG(0x20c)
-
-/* Bits */
-#define NETX_SYSTEM_RES_CR_RSTIN (1<<0)
-#define NETX_SYSTEM_RES_CR_WDG_RES (1<<1)
-#define NETX_SYSTEM_RES_CR_HOST_RES (1<<2)
-#define NETX_SYSTEM_RES_CR_FIRMW_RES (1<<3)
-#define NETX_SYSTEM_RES_CR_XPEC0_RES (1<<4)
-#define NETX_SYSTEM_RES_CR_XPEC1_RES (1<<5)
-#define NETX_SYSTEM_RES_CR_XPEC2_RES (1<<6)
-#define NETX_SYSTEM_RES_CR_XPEC3_RES (1<<7)
-#define NETX_SYSTEM_RES_CR_DIS_XPEC0_RES (1<<16)
-#define NETX_SYSTEM_RES_CR_DIS_XPEC1_RES (1<<17)
-#define NETX_SYSTEM_RES_CR_DIS_XPEC2_RES (1<<18)
-#define NETX_SYSTEM_RES_CR_DIS_XPEC3_RES (1<<19)
-#define NETX_SYSTEM_RES_CR_FIRMW_FLG0 (1<<20)
-#define NETX_SYSTEM_RES_CR_FIRMW_FLG1 (1<<21)
-#define NETX_SYSTEM_RES_CR_FIRMW_FLG2 (1<<22)
-#define NETX_SYSTEM_RES_CR_FIRMW_FLG3 (1<<23)
-#define NETX_SYSTEM_RES_CR_FIRMW_RES_EN (1<<24)
-#define NETX_SYSTEM_RES_CR_RSTOUT (1<<25)
-#define NETX_SYSTEM_RES_CR_EN_RSTOUT (1<<26)
-
-#define PHY_CONTROL_RESET (1<<31)
-#define PHY_CONTROL_SIM_BYP (1<<30)
-#define PHY_CONTROL_CLK_XLATIN (1<<29)
-#define PHY_CONTROL_PHY1_EN (1<<21)
-#define PHY_CONTROL_PHY1_NP_MSG_CODE
-#define PHY_CONTROL_PHY1_AUTOMDIX (1<<17)
-#define PHY_CONTROL_PHY1_FIXMODE (1<<16)
-#define PHY_CONTROL_PHY1_MODE(mode) (((mode) & 0x7) << 13)
-#define PHY_CONTROL_PHY0_EN (1<<12)
-#define PHY_CONTROL_PHY0_NP_MSG_CODE
-#define PHY_CONTROL_PHY0_AUTOMDIX (1<<8)
-#define PHY_CONTROL_PHY0_FIXMODE (1<<7)
-#define PHY_CONTROL_PHY0_MODE(mode) (((mode) & 0x7) << 4)
-#define PHY_CONTROL_PHY_ADDRESS(adr) ((adr) & 0xf)
-
-#define PHY_MODE_10BASE_T_HALF 0
-#define PHY_MODE_10BASE_T_FULL 1
-#define PHY_MODE_100BASE_TX_FX_FULL 2
-#define PHY_MODE_100BASE_TX_FX_HALF 3
-#define PHY_MODE_100BASE_TX_HALF 4
-#define PHY_MODE_REPEATER 5
-#define PHY_MODE_POWER_DOWN 6
-#define PHY_MODE_ALL 7
-
-/* Bits */
-#define VECT_CNTL_ENABLE (1 << 5)
-
-/*******************************
- * GPIO and timer module *
- *******************************/
-
-/* Registers */
-#define NETX_GPIO_REG(ofs) IOMEM(NETX_VA_GPIO + (ofs))
-#define NETX_GPIO_CFG(gpio) NETX_GPIO_REG(0x0 + ((gpio)<<2))
-#define NETX_GPIO_THRESHOLD_CAPTURE(gpio) NETX_GPIO_REG(0x40 + ((gpio)<<2))
-#define NETX_GPIO_COUNTER_CTRL(counter) NETX_GPIO_REG(0x80 + ((counter)<<2))
-#define NETX_GPIO_COUNTER_MAX(counter) NETX_GPIO_REG(0x94 + ((counter)<<2))
-#define NETX_GPIO_COUNTER_CURRENT(counter) NETX_GPIO_REG(0xa8 + ((counter)<<2))
-#define NETX_GPIO_IRQ_ENABLE NETX_GPIO_REG(0xbc)
-#define NETX_GPIO_IRQ_DISABLE NETX_GPIO_REG(0xc0)
-#define NETX_GPIO_SYSTIME_NS_CMP NETX_GPIO_REG(0xc4)
-#define NETX_GPIO_LINE NETX_GPIO_REG(0xc8)
-#define NETX_GPIO_IRQ NETX_GPIO_REG(0xd0)
-
-/* Bits */
-#define NETX_GPIO_CFG_IOCFG_GP_INPUT (0x0)
-#define NETX_GPIO_CFG_IOCFG_GP_OUTPUT (0x1)
-#define NETX_GPIO_CFG_IOCFG_GP_UART (0x2)
-#define NETX_GPIO_CFG_INV (1<<2)
-#define NETX_GPIO_CFG_MODE_INPUT_READ (0<<3)
-#define NETX_GPIO_CFG_MODE_INPUT_CAPTURE_CONT_RISING (1<<3)
-#define NETX_GPIO_CFG_MODE_INPUT_CAPTURE_ONCE_RISING (2<<3)
-#define NETX_GPIO_CFG_MODE_INPUT_CAPTURE_HIGH_LEVEL (3<<3)
-#define NETX_GPIO_CFG_COUNT_REF_COUNTER0 (0<<5)
-#define NETX_GPIO_CFG_COUNT_REF_COUNTER1 (1<<5)
-#define NETX_GPIO_CFG_COUNT_REF_COUNTER2 (2<<5)
-#define NETX_GPIO_CFG_COUNT_REF_COUNTER3 (3<<5)
-#define NETX_GPIO_CFG_COUNT_REF_COUNTER4 (4<<5)
-#define NETX_GPIO_CFG_COUNT_REF_SYSTIME (7<<5)
-
-#define NETX_GPIO_COUNTER_CTRL_RUN (1<<0)
-#define NETX_GPIO_COUNTER_CTRL_SYM (1<<1)
-#define NETX_GPIO_COUNTER_CTRL_ONCE (1<<2)
-#define NETX_GPIO_COUNTER_CTRL_IRQ_EN (1<<3)
-#define NETX_GPIO_COUNTER_CTRL_CNT_EVENT (1<<4)
-#define NETX_GPIO_COUNTER_CTRL_RST_EN (1<<5)
-#define NETX_GPIO_COUNTER_CTRL_SEL_EVENT (1<<6)
-#define NETX_GPIO_COUNTER_CTRL_GPIO_REF /* FIXME */
-
-#define GPIO_BIT(gpio) (1<<(gpio))
-#define COUNTER_BIT(counter) ((1<<16)<<(counter))
-
-/*******************************
- * PIO *
- *******************************/
-
-/* Registers */
-#define NETX_PIO_REG(ofs) IOMEM(NETX_VA_PIO + (ofs))
-#define NETX_PIO_INPIO NETX_PIO_REG(0x0)
-#define NETX_PIO_OUTPIO NETX_PIO_REG(0x4)
-#define NETX_PIO_OEPIO NETX_PIO_REG(0x8)
-
-/*******************************
- * MII Unit *
- *******************************/
-
-/* Registers */
-#define NETX_MIIMU IOMEM(NETX_VA_MIIMU)
-
-/* Bits */
-#define MIIMU_SNRDY (1<<0)
-#define MIIMU_PREAMBLE (1<<1)
-#define MIIMU_OPMODE_WRITE (1<<2)
-#define MIIMU_MDC_PERIOD (1<<3)
-#define MIIMU_PHY_NRES (1<<4)
-#define MIIMU_RTA (1<<5)
-#define MIIMU_REGADDR(adr) (((adr) & 0x1f) << 6)
-#define MIIMU_PHYADDR(adr) (((adr) & 0x1f) << 11)
-#define MIIMU_DATA(data) (((data) & 0xffff) << 16)
-
-/*******************************
- * xmac / xpec *
- *******************************/
-
-/* XPEC register offsets relative to NETX_VA_XPEC(no) */
-#define NETX_XPEC_R0_OFS 0x00
-#define NETX_XPEC_R1_OFS 0x04
-#define NETX_XPEC_R2_OFS 0x08
-#define NETX_XPEC_R3_OFS 0x0c
-#define NETX_XPEC_R4_OFS 0x10
-#define NETX_XPEC_R5_OFS 0x14
-#define NETX_XPEC_R6_OFS 0x18
-#define NETX_XPEC_R7_OFS 0x1c
-#define NETX_XPEC_RANGE01_OFS 0x20
-#define NETX_XPEC_RANGE23_OFS 0x24
-#define NETX_XPEC_RANGE45_OFS 0x28
-#define NETX_XPEC_RANGE67_OFS 0x2c
-#define NETX_XPEC_PC_OFS 0x48
-#define NETX_XPEC_TIMER_OFS(timer) (0x30 + ((timer)<<2))
-#define NETX_XPEC_IRQ_OFS 0x8c
-#define NETX_XPEC_SYSTIME_NS_OFS 0x90
-#define NETX_XPEC_FIFO_DATA_OFS 0x94
-#define NETX_XPEC_SYSTIME_S_OFS 0x98
-#define NETX_XPEC_ADC_OFS 0x9c
-#define NETX_XPEC_URX_COUNT_OFS 0x40
-#define NETX_XPEC_UTX_COUNT_OFS 0x44
-#define NETX_XPEC_PC_OFS 0x48
-#define NETX_XPEC_ZERO_OFS 0x4c
-#define NETX_XPEC_STATCFG_OFS 0x50
-#define NETX_XPEC_EC_MASKA_OFS 0x54
-#define NETX_XPEC_EC_MASKB_OFS 0x58
-#define NETX_XPEC_EC_MASK0_OFS 0x5c
-#define NETX_XPEC_EC_MASK8_OFS 0x7c
-#define NETX_XPEC_EC_MASK9_OFS 0x80
-#define NETX_XPEC_XPU_HOLD_PC_OFS 0x100
-#define NETX_XPEC_RAM_START_OFS 0x2000
-
-/* Bits */
-#define XPU_HOLD_PC (1<<0)
-
-/* XMAC register offsets relative to NETX_VA_XMAC(no) */
-#define NETX_XMAC_RPU_PROGRAM_START_OFS 0x000
-#define NETX_XMAC_RPU_PROGRAM_END_OFS 0x3ff
-#define NETX_XMAC_TPU_PROGRAM_START_OFS 0x400
-#define NETX_XMAC_TPU_PROGRAM_END_OFS 0x7ff
-#define NETX_XMAC_RPU_HOLD_PC_OFS 0xa00
-#define NETX_XMAC_TPU_HOLD_PC_OFS 0xa04
-#define NETX_XMAC_STATUS_SHARED0_OFS 0x840
-#define NETX_XMAC_CONFIG_SHARED0_OFS 0x844
-#define NETX_XMAC_STATUS_SHARED1_OFS 0x848
-#define NETX_XMAC_CONFIG_SHARED1_OFS 0x84c
-#define NETX_XMAC_STATUS_SHARED2_OFS 0x850
-#define NETX_XMAC_CONFIG_SHARED2_OFS 0x854
-#define NETX_XMAC_STATUS_SHARED3_OFS 0x858
-#define NETX_XMAC_CONFIG_SHARED3_OFS 0x85c
-
-#define RPU_HOLD_PC (1<<15)
-#define TPU_HOLD_PC (1<<15)
-
-/*******************************
- * Pointer FIFO *
- *******************************/
-
-/* Registers */
-#define NETX_PFIFO_REG(ofs) IOMEM(NETX_VA_PFIFO + (ofs))
-#define NETX_PFIFO_BASE(pfifo) NETX_PFIFO_REG(0x00 + ((pfifo)<<2))
-#define NETX_PFIFO_BORDER_BASE(pfifo) NETX_PFIFO_REG(0x80 + ((pfifo)<<2))
-#define NETX_PFIFO_RESET NETX_PFIFO_REG(0x100)
-#define NETX_PFIFO_FULL NETX_PFIFO_REG(0x104)
-#define NETX_PFIFO_EMPTY NETX_PFIFO_REG(0x108)
-#define NETX_PFIFO_OVEFLOW NETX_PFIFO_REG(0x10c)
-#define NETX_PFIFO_UNDERRUN NETX_PFIFO_REG(0x110)
-#define NETX_PFIFO_FILL_LEVEL(pfifo) NETX_PFIFO_REG(0x180 + ((pfifo)<<2))
-#define NETX_PFIFO_XPEC_ISR(xpec) NETX_PFIFO_REG(0x400 + ((xpec) << 2))
-
-
-/*******************************
- * Memory Controller *
- *******************************/
-
-/* Registers */
-#define NETX_MEMCR_REG(ofs) IOMEM(NETX_VA_MEMCR + (ofs))
-#define NETX_MEMCR_SRAM_CTRL(cs) NETX_MEMCR_REG(0x0 + 4 * (cs)) /* SRAM for CS 0..2 */
-#define NETX_MEMCR_SDRAM_CFG_CTRL NETX_MEMCR_REG(0x40)
-#define NETX_MEMCR_SDRAM_TIMING_CTRL NETX_MEMCR_REG(0x44)
-#define NETX_MEMCR_SDRAM_MODE NETX_MEMCR_REG(0x48)
-#define NETX_MEMCR_SDRAM_EXT_MODE NETX_MEMCR_REG(0x4c)
-#define NETX_MEMCR_PRIO_TIMESLOT_CTRL NETX_MEMCR_REG(0x80)
-#define NETX_MEMCR_PRIO_ACCESS_CTRL NETX_MEMCR_REG(0x84)
-
-/* Bits */
-#define NETX_MEMCR_SRAM_CTRL_WIDTHEXTMEM(x) (((x) & 0x3) << 24)
-#define NETX_MEMCR_SRAM_CTRL_WSPOSTPAUSEEXTMEM(x) (((x) & 0x3) << 16)
-#define NETX_MEMCR_SRAM_CTRL_WSPREPASEEXTMEM(x) (((x) & 0x3) << 8)
-#define NETX_MEMCR_SRAM_CTRL_WSEXTMEM(x) (((x) & 0x1f) << 0)
-
-
-/*******************************
- * Dual Port Memory *
- *******************************/
-
-/* Registers */
-#define NETX_DPMAS_REG(ofs) IOMEM(NETX_VA_DPMAS + (ofs))
-#define NETX_DPMAS_SYS_STAT NETX_DPMAS_REG(0x4d8)
-#define NETX_DPMAS_INT_STAT NETX_DPMAS_REG(0x4e0)
-#define NETX_DPMAS_INT_EN NETX_DPMAS_REG(0x4f0)
-#define NETX_DPMAS_IF_CONF0 NETX_DPMAS_REG(0x608)
-#define NETX_DPMAS_IF_CONF1 NETX_DPMAS_REG(0x60c)
-#define NETX_DPMAS_EXT_CONFIG(cs) NETX_DPMAS_REG(0x610 + 4 * (cs))
-#define NETX_DPMAS_IO_MODE0 NETX_DPMAS_REG(0x620) /* I/O 32..63 */
-#define NETX_DPMAS_DRV_EN0 NETX_DPMAS_REG(0x624)
-#define NETX_DPMAS_DATA0 NETX_DPMAS_REG(0x628)
-#define NETX_DPMAS_IO_MODE1 NETX_DPMAS_REG(0x630) /* I/O 64..84 */
-#define NETX_DPMAS_DRV_EN1 NETX_DPMAS_REG(0x634)
-#define NETX_DPMAS_DATA1 NETX_DPMAS_REG(0x638)
-
-/* Bits */
-#define NETX_DPMAS_INT_EN_GLB_EN (1<<31)
-#define NETX_DPMAS_INT_EN_MEM_LCK (1<<30)
-#define NETX_DPMAS_INT_EN_WDG (1<<29)
-#define NETX_DPMAS_INT_EN_PIO72 (1<<28)
-#define NETX_DPMAS_INT_EN_PIO47 (1<<27)
-#define NETX_DPMAS_INT_EN_PIO40 (1<<26)
-#define NETX_DPMAS_INT_EN_PIO36 (1<<25)
-#define NETX_DPMAS_INT_EN_PIO35 (1<<24)
-
-#define NETX_DPMAS_IF_CONF0_HIF_DISABLED (0<<28)
-#define NETX_DPMAS_IF_CONF0_HIF_EXT_BUS (1<<28)
-#define NETX_DPMAS_IF_CONF0_HIF_UP_8BIT (2<<28)
-#define NETX_DPMAS_IF_CONF0_HIF_UP_16BIT (3<<28)
-#define NETX_DPMAS_IF_CONF0_HIF_IO (4<<28)
-#define NETX_DPMAS_IF_CONF0_WAIT_DRV_PP (1<<14)
-#define NETX_DPMAS_IF_CONF0_WAIT_DRV_OD (2<<14)
-#define NETX_DPMAS_IF_CONF0_WAIT_DRV_TRI (3<<14)
-
-#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO35 (1<<26)
-#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO36 (1<<27)
-#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO40 (1<<28)
-#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO47 (1<<29)
-#define NETX_DPMAS_IF_CONF1_IRQ_POL_PIO72 (1<<30)
-
-#define NETX_EXT_CONFIG_TALEWIDTH(x) (((x) & 0x7) << 29)
-#define NETX_EXT_CONFIG_TADRHOLD(x) (((x) & 0x7) << 26)
-#define NETX_EXT_CONFIG_TCSON(x) (((x) & 0x7) << 23)
-#define NETX_EXT_CONFIG_TRDON(x) (((x) & 0x7) << 20)
-#define NETX_EXT_CONFIG_TWRON(x) (((x) & 0x7) << 17)
-#define NETX_EXT_CONFIG_TWROFF(x) (((x) & 0x1f) << 12)
-#define NETX_EXT_CONFIG_TRDWRCYC(x) (((x) & 0x1f) << 7)
-#define NETX_EXT_CONFIG_WAIT_POL (1<<6)
-#define NETX_EXT_CONFIG_WAIT_EN (1<<5)
-#define NETX_EXT_CONFIG_NRD_MODE (1<<4)
-#define NETX_EXT_CONFIG_DS_MODE (1<<3)
-#define NETX_EXT_CONFIG_NWR_MODE (1<<2)
-#define NETX_EXT_CONFIG_16BIT (1<<1)
-#define NETX_EXT_CONFIG_CS_ENABLE (1<<0)
-
-#define NETX_DPMAS_IO_MODE0_WRL (1<<13)
-#define NETX_DPMAS_IO_MODE0_WAIT (1<<14)
-#define NETX_DPMAS_IO_MODE0_READY (1<<15)
-#define NETX_DPMAS_IO_MODE0_CS0 (1<<19)
-#define NETX_DPMAS_IO_MODE0_EXTRD (1<<20)
-
-#define NETX_DPMAS_IO_MODE1_CS2 (1<<15)
-#define NETX_DPMAS_IO_MODE1_CS1 (1<<16)
-#define NETX_DPMAS_IO_MODE1_SAMPLE_NPOR (0<<30)
-#define NETX_DPMAS_IO_MODE1_SAMPLE_100MHZ (1<<30)
-#define NETX_DPMAS_IO_MODE1_SAMPLE_NPIO36 (2<<30)
-#define NETX_DPMAS_IO_MODE1_SAMPLE_PIO36 (3<<30)
-
-/*******************************
- * I2C *
- *******************************/
-#define NETX_I2C_REG(ofs) IOMEM(NETX_VA_I2C, (ofs))
-#define NETX_I2C_CTRL NETX_I2C_REG(0x0)
-#define NETX_I2C_DATA NETX_I2C_REG(0x4)
-
-#endif /* __ASM_ARCH_NETX_REGS_H */
diff --git a/arch/arm/mach-netx/include/mach/pfifo.h b/arch/arm/mach-netx/include/mach/pfifo.h
deleted file mode 100644
index de23180bc937..000000000000
--- a/arch/arm/mach-netx/include/mach/pfifo.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-netx/include/mach/pfifo.h
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-
-#ifndef ASM_ARCH_PFIFO_H
-#define ASM_ARCH_PFIFO_H
-
-static inline int pfifo_push(int no, unsigned int pointer)
-{
- writel(pointer, NETX_PFIFO_BASE(no));
- return 0;
-}
-
-static inline unsigned int pfifo_pop(int no)
-{
- return readl(NETX_PFIFO_BASE(no));
-}
-
-static inline int pfifo_fill_level(int no)
-{
-
- return readl(NETX_PFIFO_FILL_LEVEL(no));
-}
-
-static inline int pfifo_full(int no)
-{
- return readl(NETX_PFIFO_FULL) & (1<<no) ? 1 : 0;
-}
-
-static inline int pfifo_empty(int no)
-{
- return readl(NETX_PFIFO_EMPTY) & (1<<no) ? 1 : 0;
-}
-
-int pfifo_request(unsigned int pfifo_mask);
-void pfifo_free(unsigned int pfifo_mask);
-
-#endif /* ASM_ARCH_PFIFO_H */
diff --git a/arch/arm/mach-netx/include/mach/uncompress.h b/arch/arm/mach-netx/include/mach/uncompress.h
deleted file mode 100644
index edc1ac997eab..000000000000
--- a/arch/arm/mach-netx/include/mach/uncompress.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-netx/include/mach/uncompress.h
- *
- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-/*
- * The following code assumes the serial port has already been
- * initialized by the bootloader. We search for the first enabled
- * port in the most probable order. If you didn't setup a port in
- * your bootloader then nothing will appear (which might be desired).
- *
- * This does not append a newline
- */
-
-#define REG(x) (*(volatile unsigned long *)(x))
-
-#define UART1_BASE 0x100a00
-#define UART2_BASE 0x100a80
-
-#define UART_DR 0x0
-
-#define UART_CR 0x14
-#define CR_UART_EN (1<<0)
-
-#define UART_FR 0x18
-#define FR_BUSY (1<<3)
-#define FR_TXFF (1<<5)
-
-static inline void putc(char c)
-{
- unsigned long base;
-
- if (REG(UART1_BASE + UART_CR) & CR_UART_EN)
- base = UART1_BASE;
- else if (REG(UART2_BASE + UART_CR) & CR_UART_EN)
- base = UART2_BASE;
- else
- return;
-
- while (REG(base + UART_FR) & FR_TXFF);
- REG(base + UART_DR) = c;
-}
-
-static inline void flush(void)
-{
- unsigned long base;
-
- if (REG(UART1_BASE + UART_CR) & CR_UART_EN)
- base = UART1_BASE;
- else if (REG(UART2_BASE + UART_CR) & CR_UART_EN)
- base = UART2_BASE;
- else
- return;
-
- while (REG(base + UART_FR) & FR_BUSY);
-}
-
-/*
- * nothing to do
- */
-#define arch_decomp_setup()
diff --git a/arch/arm/mach-netx/include/mach/xc.h b/arch/arm/mach-netx/include/mach/xc.h
deleted file mode 100644
index 465d5e250ab8..000000000000
--- a/arch/arm/mach-netx/include/mach/xc.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * arch/arm/mach-netx/include/mach/xc.h
- *
- * Copyright (C) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#ifndef __ASM_ARCH_XC_H
-#define __ASM_ARCH_XC_H
-
-struct xc {
- int no;
- unsigned int type;
- unsigned int version;
- void __iomem *xpec_base;
- void __iomem *xmac_base;
- void __iomem *sram_base;
- int irq;
- struct device *dev;
-};
-
-int xc_reset(struct xc *x);
-int xc_stop(struct xc* x);
-int xc_start(struct xc *x);
-int xc_running(struct xc *x);
-int xc_request_firmware(struct xc* x);
-struct xc* request_xc(int xcno, struct device *dev);
-void free_xc(struct xc *x);
-
-#endif /* __ASM_ARCH_XC_H */
diff --git a/arch/arm/mach-netx/nxdb500.c b/arch/arm/mach-netx/nxdb500.c
deleted file mode 100644
index ad5e6747b834..000000000000
--- a/arch/arm/mach-netx/nxdb500.c
+++ /dev/null
@@ -1,197 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-netx/nxdb500.c
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/mtd/plat-ram.h>
-#include <linux/platform_device.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <mach/netx-regs.h>
-#include <linux/platform_data/eth-netx.h>
-
-#include "generic.h"
-#include "fb.h"
-
-static struct clcd_panel qvga = {
- .mode = {
- .name = "QVGA",
- .refresh = 60,
- .xres = 240,
- .yres = 320,
- .pixclock = 187617,
- .left_margin = 6,
- .right_margin = 26,
- .upper_margin = 0,
- .lower_margin = 6,
- .hsync_len = 6,
- .vsync_len = 1,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = 16,
- .cntl = CNTL_LCDTFT | CNTL_BGR,
- .bpp = 16,
- .grayscale = 0,
-};
-
-static inline int nxdb500_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
-{
- var->green.length = 5;
- var->green.msb_right = 0;
-
- return clcdfb_check(fb, var);
-}
-
-static int nxdb500_clcd_setup(struct clcd_fb *fb)
-{
- unsigned int val;
-
- fb->fb.var.green.length = 5;
- fb->fb.var.green.msb_right = 0;
-
- /* enable asic control */
- val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
- writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
-
- writel(3, NETX_SYSTEM_IOC_CR);
-
- val = readl(NETX_PIO_OUTPIO);
- writel(val | 1, NETX_PIO_OUTPIO);
-
- val = readl(NETX_PIO_OEPIO);
- writel(val | 1, NETX_PIO_OEPIO);
- return netx_clcd_setup(fb);
-}
-
-static struct clcd_board clcd_data = {
- .name = "netX",
- .check = nxdb500_check,
- .decode = clcdfb_decode,
- .enable = netx_clcd_enable,
- .setup = nxdb500_clcd_setup,
- .mmap = netx_clcd_mmap,
- .remove = netx_clcd_remove,
-};
-
-static struct netxeth_platform_data eth0_platform_data = {
- .xcno = 0,
-};
-
-static struct platform_device netx_eth0_device = {
- .name = "netx-eth",
- .id = 0,
- .num_resources = 0,
- .resource = NULL,
- .dev = {
- .platform_data = &eth0_platform_data,
- }
-};
-
-static struct netxeth_platform_data eth1_platform_data = {
- .xcno = 1,
-};
-
-static struct platform_device netx_eth1_device = {
- .name = "netx-eth",
- .id = 1,
- .num_resources = 0,
- .resource = NULL,
- .dev = {
- .platform_data = &eth1_platform_data,
- }
-};
-
-static struct resource netx_uart0_resources[] = {
- [0] = {
- .start = 0x00100A00,
- .end = 0x00100A3F,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = (NETX_IRQ_UART0),
- .end = (NETX_IRQ_UART0),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device netx_uart0_device = {
- .name = "netx-uart",
- .id = 0,
- .num_resources = ARRAY_SIZE(netx_uart0_resources),
- .resource = netx_uart0_resources,
-};
-
-static struct resource netx_uart1_resources[] = {
- [0] = {
- .start = 0x00100A40,
- .end = 0x00100A7F,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = (NETX_IRQ_UART1),
- .end = (NETX_IRQ_UART1),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device netx_uart1_device = {
- .name = "netx-uart",
- .id = 1,
- .num_resources = ARRAY_SIZE(netx_uart1_resources),
- .resource = netx_uart1_resources,
-};
-
-static struct resource netx_uart2_resources[] = {
- [0] = {
- .start = 0x00100A80,
- .end = 0x00100ABF,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = (NETX_IRQ_UART2),
- .end = (NETX_IRQ_UART2),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device netx_uart2_device = {
- .name = "netx-uart",
- .id = 2,
- .num_resources = ARRAY_SIZE(netx_uart2_resources),
- .resource = netx_uart2_resources,
-};
-
-static struct platform_device *devices[] __initdata = {
- &netx_eth0_device,
- &netx_eth1_device,
- &netx_uart0_device,
- &netx_uart1_device,
- &netx_uart2_device,
-};
-
-static void __init nxdb500_init(void)
-{
- netx_fb_init(&clcd_data, &qvga);
- platform_add_devices(devices, ARRAY_SIZE(devices));
-}
-
-MACHINE_START(NXDB500, "Hilscher nxdb500")
- .atag_offset = 0x100,
- .map_io = netx_map_io,
- .init_irq = netx_init_irq,
- .init_time = netx_timer_init,
- .init_machine = nxdb500_init,
- .restart = netx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-netx/nxdkn.c b/arch/arm/mach-netx/nxdkn.c
deleted file mode 100644
index 917381559628..000000000000
--- a/arch/arm/mach-netx/nxdkn.c
+++ /dev/null
@@ -1,90 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-netx/nxdkn.c
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/mtd/plat-ram.h>
-#include <linux/platform_device.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <mach/netx-regs.h>
-#include <linux/platform_data/eth-netx.h>
-
-#include "generic.h"
-
-static struct netxeth_platform_data eth0_platform_data = {
- .xcno = 0,
-};
-
-static struct platform_device nxdkn_eth0_device = {
- .name = "netx-eth",
- .id = 0,
- .num_resources = 0,
- .resource = NULL,
- .dev = {
- .platform_data = &eth0_platform_data,
- }
-};
-
-static struct netxeth_platform_data eth1_platform_data = {
- .xcno = 1,
-};
-
-static struct platform_device nxdkn_eth1_device = {
- .name = "netx-eth",
- .id = 1,
- .num_resources = 0,
- .resource = NULL,
- .dev = {
- .platform_data = &eth1_platform_data,
- }
-};
-
-static struct resource netx_uart0_resources[] = {
- [0] = {
- .start = 0x00100A00,
- .end = 0x00100A3F,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = (NETX_IRQ_UART0),
- .end = (NETX_IRQ_UART0),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device netx_uart0_device = {
- .name = "netx-uart",
- .id = 0,
- .num_resources = ARRAY_SIZE(netx_uart0_resources),
- .resource = netx_uart0_resources,
-};
-
-static struct platform_device *devices[] __initdata = {
- &nxdkn_eth0_device,
- &nxdkn_eth1_device,
- &netx_uart0_device,
-};
-
-static void __init nxdkn_init(void)
-{
- platform_add_devices(devices, ARRAY_SIZE(devices));
-}
-
-MACHINE_START(NXDKN, "Hilscher nxdkn")
- .atag_offset = 0x100,
- .map_io = netx_map_io,
- .init_irq = netx_init_irq,
- .init_time = netx_timer_init,
- .init_machine = nxdkn_init,
- .restart = netx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-netx/nxeb500hmi.c b/arch/arm/mach-netx/nxeb500hmi.c
deleted file mode 100644
index aa0d5b2ca712..000000000000
--- a/arch/arm/mach-netx/nxeb500hmi.c
+++ /dev/null
@@ -1,174 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-netx/nxeb500hmi.c
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/mtd/plat-ram.h>
-#include <linux/platform_device.h>
-#include <linux/amba/bus.h>
-#include <linux/amba/clcd.h>
-
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/mach/arch.h>
-#include <mach/netx-regs.h>
-#include <linux/platform_data/eth-netx.h>
-
-#include "generic.h"
-#include "fb.h"
-
-static struct clcd_panel qvga = {
- .mode = {
- .name = "QVGA",
- .refresh = 60,
- .xres = 240,
- .yres = 320,
- .pixclock = 187617,
- .left_margin = 6,
- .right_margin = 26,
- .upper_margin = 0,
- .lower_margin = 6,
- .hsync_len = 6,
- .vsync_len = 1,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED,
- },
- .width = -1,
- .height = -1,
- .tim2 = 16,
- .cntl = CNTL_LCDTFT | CNTL_BGR,
- .bpp = 16,
- .grayscale = 0,
-};
-
-static inline int nxeb500hmi_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
-{
- var->green.length = 5;
- var->green.msb_right = 0;
-
- return clcdfb_check(fb, var);
-}
-
-static int nxeb500hmi_clcd_setup(struct clcd_fb *fb)
-{
- unsigned int val;
-
- fb->fb.var.green.length = 5;
- fb->fb.var.green.msb_right = 0;
-
- /* enable asic control */
- val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
- writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
-
- writel(3, NETX_SYSTEM_IOC_CR);
-
- /* GPIO 14 is used for display enable on newer boards */
- writel(9, NETX_GPIO_CFG(14));
-
- val = readl(NETX_PIO_OUTPIO);
- writel(val | 1, NETX_PIO_OUTPIO);
-
- val = readl(NETX_PIO_OEPIO);
- writel(val | 1, NETX_PIO_OEPIO);
- return netx_clcd_setup(fb);
-}
-
-static struct clcd_board clcd_data = {
- .name = "netX",
- .check = nxeb500hmi_check,
- .decode = clcdfb_decode,
- .enable = netx_clcd_enable,
- .setup = nxeb500hmi_clcd_setup,
- .mmap = netx_clcd_mmap,
- .remove = netx_clcd_remove,
-};
-
-static struct netxeth_platform_data eth0_platform_data = {
- .xcno = 0,
-};
-
-static struct platform_device netx_eth0_device = {
- .name = "netx-eth",
- .id = 0,
- .num_resources = 0,
- .resource = NULL,
- .dev = {
- .platform_data = &eth0_platform_data,
- }
-};
-
-static struct netxeth_platform_data eth1_platform_data = {
- .xcno = 1,
-};
-
-static struct platform_device netx_eth1_device = {
- .name = "netx-eth",
- .id = 1,
- .num_resources = 0,
- .resource = NULL,
- .dev = {
- .platform_data = &eth1_platform_data,
- }
-};
-
-static struct resource netx_cf_resources[] = {
- [0] = {
- .start = 0x20000000,
- .end = 0x25ffffff,
- .flags = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT,
- },
-};
-
-static struct platform_device netx_cf_device = {
- .name = "netx-cf",
- .id = 0,
- .resource = netx_cf_resources,
- .num_resources = ARRAY_SIZE(netx_cf_resources),
-};
-
-static struct resource netx_uart0_resources[] = {
- [0] = {
- .start = 0x00100A00,
- .end = 0x00100A3F,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = (NETX_IRQ_UART0),
- .end = (NETX_IRQ_UART0),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device netx_uart0_device = {
- .name = "netx-uart",
- .id = 0,
- .num_resources = ARRAY_SIZE(netx_uart0_resources),
- .resource = netx_uart0_resources,
-};
-
-static struct platform_device *devices[] __initdata = {
- &netx_eth0_device,
- &netx_eth1_device,
- &netx_cf_device,
- &netx_uart0_device,
-};
-
-static void __init nxeb500hmi_init(void)
-{
- netx_fb_init(&clcd_data, &qvga);
- platform_add_devices(devices, ARRAY_SIZE(devices));
-}
-
-MACHINE_START(NXEB500HMI, "Hilscher nxeb500hmi")
- .atag_offset = 0x100,
- .map_io = netx_map_io,
- .init_irq = netx_init_irq,
- .init_time = netx_timer_init,
- .init_machine = nxeb500hmi_init,
- .restart = netx_restart,
-MACHINE_END
diff --git a/arch/arm/mach-netx/pfifo.c b/arch/arm/mach-netx/pfifo.c
deleted file mode 100644
index 2e5cc777329f..000000000000
--- a/arch/arm/mach-netx/pfifo.c
+++ /dev/null
@@ -1,56 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-netx/pfifo.c
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <mach/netx-regs.h>
-#include <mach/pfifo.h>
-
-static DEFINE_MUTEX(pfifo_lock);
-
-static unsigned int pfifo_used = 0;
-
-int pfifo_request(unsigned int pfifo_mask)
-{
- int err = 0;
- unsigned int val;
-
- mutex_lock(&pfifo_lock);
-
- if (pfifo_mask & pfifo_used) {
- err = -EBUSY;
- goto out;
- }
-
- pfifo_used |= pfifo_mask;
-
- val = readl(NETX_PFIFO_RESET);
- writel(val | pfifo_mask, NETX_PFIFO_RESET);
- writel(val, NETX_PFIFO_RESET);
-
-out:
- mutex_unlock(&pfifo_lock);
- return err;
-}
-
-void pfifo_free(unsigned int pfifo_mask)
-{
- mutex_lock(&pfifo_lock);
- pfifo_used &= ~pfifo_mask;
- mutex_unlock(&pfifo_lock);
-}
-
-EXPORT_SYMBOL(pfifo_push);
-EXPORT_SYMBOL(pfifo_pop);
-EXPORT_SYMBOL(pfifo_fill_level);
-EXPORT_SYMBOL(pfifo_empty);
-EXPORT_SYMBOL(pfifo_request);
-EXPORT_SYMBOL(pfifo_free);
diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c
deleted file mode 100644
index d9defa1ab605..000000000000
--- a/arch/arm/mach-netx/time.c
+++ /dev/null
@@ -1,141 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-netx/time.c
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/clocksource.h>
-#include <linux/clockchips.h>
-#include <linux/io.h>
-
-#include <mach/hardware.h>
-#include <asm/mach/time.h>
-#include <mach/netx-regs.h>
-
-#define NETX_CLOCK_FREQ 100000000
-#define NETX_LATCH DIV_ROUND_CLOSEST(NETX_CLOCK_FREQ, HZ)
-
-#define TIMER_CLOCKEVENT 0
-#define TIMER_CLOCKSOURCE 1
-
-static inline void timer_shutdown(struct clock_event_device *evt)
-{
- /* disable timer */
- writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT));
-}
-
-static int netx_shutdown(struct clock_event_device *evt)
-{
- timer_shutdown(evt);
-
- return 0;
-}
-
-static int netx_set_oneshot(struct clock_event_device *evt)
-{
- u32 tmode = NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN;
-
- timer_shutdown(evt);
- writel(0, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT));
- writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT));
-
- return 0;
-}
-
-static int netx_set_periodic(struct clock_event_device *evt)
-{
- u32 tmode = NETX_GPIO_COUNTER_CTRL_RST_EN |
- NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN;
-
- timer_shutdown(evt);
- writel(NETX_LATCH, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKEVENT));
- writel(tmode, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKEVENT));
-
- return 0;
-}
-
-static int netx_set_next_event(unsigned long evt,
- struct clock_event_device *clk)
-{
- writel(0 - evt, NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKEVENT));
- return 0;
-}
-
-static struct clock_event_device netx_clockevent = {
- .name = "netx-timer" __stringify(TIMER_CLOCKEVENT),
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = netx_set_next_event,
- .set_state_shutdown = netx_shutdown,
- .set_state_periodic = netx_set_periodic,
- .set_state_oneshot = netx_set_oneshot,
- .tick_resume = netx_shutdown,
-};
-
-/*
- * IRQ handler for the timer
- */
-static irqreturn_t
-netx_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = &netx_clockevent;
-
- /* acknowledge interrupt */
- writel(COUNTER_BIT(0), NETX_GPIO_IRQ);
-
- evt->event_handler(evt);
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction netx_timer_irq = {
- .name = "NetX Timer Tick",
- .flags = IRQF_TIMER | IRQF_IRQPOLL,
- .handler = netx_timer_interrupt,
-};
-
-/*
- * Set up timer interrupt
- */
-void __init netx_timer_init(void)
-{
- /* disable timer initially */
- writel(0, NETX_GPIO_COUNTER_CTRL(0));
-
- /* Reset the timer value to zero */
- writel(0, NETX_GPIO_COUNTER_CURRENT(0));
-
- writel(NETX_LATCH, NETX_GPIO_COUNTER_MAX(0));
-
- /* acknowledge interrupt */
- writel(COUNTER_BIT(0), NETX_GPIO_IRQ);
-
- /* Enable the interrupt in the specific timer
- * register and start timer
- */
- writel(COUNTER_BIT(0), NETX_GPIO_IRQ_ENABLE);
- writel(NETX_GPIO_COUNTER_CTRL_IRQ_EN | NETX_GPIO_COUNTER_CTRL_RUN,
- NETX_GPIO_COUNTER_CTRL(0));
-
- setup_irq(NETX_IRQ_TIMER0, &netx_timer_irq);
-
- /* Setup timer one for clocksource */
- writel(0, NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKSOURCE));
- writel(0, NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE));
- writel(0xffffffff, NETX_GPIO_COUNTER_MAX(TIMER_CLOCKSOURCE));
-
- writel(NETX_GPIO_COUNTER_CTRL_RUN,
- NETX_GPIO_COUNTER_CTRL(TIMER_CLOCKSOURCE));
-
- clocksource_mmio_init(NETX_GPIO_COUNTER_CURRENT(TIMER_CLOCKSOURCE),
- "netx_timer", NETX_CLOCK_FREQ, 200, 32, clocksource_mmio_readl_up);
-
- /* with max_delta_ns >= delta2ns(0x800) the system currently runs fine.
- * Adding some safety ... */
- netx_clockevent.cpumask = cpumask_of(0);
- clockevents_config_and_register(&netx_clockevent, NETX_CLOCK_FREQ,
- 0xa00, 0xfffffffe);
-}
diff --git a/arch/arm/mach-netx/xc.c b/arch/arm/mach-netx/xc.c
deleted file mode 100644
index 885a618b2651..000000000000
--- a/arch/arm/mach-netx/xc.c
+++ /dev/null
@@ -1,246 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * arch/arm/mach-netx/xc.c
- *
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/firmware.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/export.h>
-
-#include <mach/hardware.h>
-#include <mach/irqs.h>
-#include <mach/netx-regs.h>
-
-#include <mach/xc.h>
-
-static DEFINE_MUTEX(xc_lock);
-
-static int xc_in_use = 0;
-
-struct fw_desc {
- unsigned int ofs;
- unsigned int size;
- unsigned int patch_ofs;
- unsigned int patch_entries;
-};
-
-struct fw_header {
- unsigned int magic;
- unsigned int type;
- unsigned int version;
- unsigned int reserved[5];
- struct fw_desc fw_desc[3];
-} __attribute__ ((packed));
-
-int xc_stop(struct xc *x)
-{
- writel(RPU_HOLD_PC, x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS);
- writel(TPU_HOLD_PC, x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS);
- writel(XPU_HOLD_PC, x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS);
- return 0;
-}
-
-int xc_start(struct xc *x)
-{
- writel(0, x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS);
- writel(0, x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS);
- writel(0, x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS);
- return 0;
-}
-
-int xc_running(struct xc *x)
-{
- return (readl(x->xmac_base + NETX_XMAC_RPU_HOLD_PC_OFS) & RPU_HOLD_PC)
- || (readl(x->xmac_base + NETX_XMAC_TPU_HOLD_PC_OFS) & TPU_HOLD_PC)
- || (readl(x->xpec_base + NETX_XPEC_XPU_HOLD_PC_OFS) & XPU_HOLD_PC) ?
- 0 : 1;
-}
-
-int xc_reset(struct xc *x)
-{
- writel(0, x->xpec_base + NETX_XPEC_PC_OFS);
- return 0;
-}
-
-static int xc_check_ptr(struct xc *x, unsigned long adr, unsigned int size)
-{
- if (adr >= NETX_PA_XMAC(x->no) &&
- adr + size < NETX_PA_XMAC(x->no) + XMAC_MEM_SIZE)
- return 0;
-
- if (adr >= NETX_PA_XPEC(x->no) &&
- adr + size < NETX_PA_XPEC(x->no) + XPEC_MEM_SIZE)
- return 0;
-
- dev_err(x->dev, "Illegal pointer in firmware found. aborting\n");
-
- return -1;
-}
-
-static int xc_patch(struct xc *x, const void *patch, int count)
-{
- unsigned int val, adr;
- const unsigned int *data = patch;
-
- int i;
- for (i = 0; i < count; i++) {
- adr = *data++;
- val = *data++;
- if (xc_check_ptr(x, adr, 4) < 0)
- return -EINVAL;
-
- writel(val, (void __iomem *)io_p2v(adr));
- }
- return 0;
-}
-
-int xc_request_firmware(struct xc *x)
-{
- int ret;
- char name[16];
- const struct firmware *fw;
- struct fw_header *head;
- unsigned int size;
- int i;
- const void *src;
- unsigned long dst;
-
- sprintf(name, "xc%d.bin", x->no);
-
- ret = request_firmware(&fw, name, x->dev);
-
- if (ret < 0) {
- dev_err(x->dev, "request_firmware failed\n");
- return ret;
- }
-
- head = (struct fw_header *)fw->data;
- if (head->magic != 0x4e657458) {
- if (head->magic == 0x5874654e) {
- dev_err(x->dev,
- "firmware magic is 'XteN'. Endianness problems?\n");
- ret = -ENODEV;
- goto exit_release_firmware;
- }
- dev_err(x->dev, "unrecognized firmware magic 0x%08x\n",
- head->magic);
- ret = -ENODEV;
- goto exit_release_firmware;
- }
-
- x->type = head->type;
- x->version = head->version;
-
- ret = -EINVAL;
-
- for (i = 0; i < 3; i++) {
- src = fw->data + head->fw_desc[i].ofs;
- dst = *(unsigned int *)src;
- src += sizeof (unsigned int);
- size = head->fw_desc[i].size - sizeof (unsigned int);
-
- if (xc_check_ptr(x, dst, size))
- goto exit_release_firmware;
-
- memcpy((void *)io_p2v(dst), src, size);
-
- src = fw->data + head->fw_desc[i].patch_ofs;
- size = head->fw_desc[i].patch_entries;
- ret = xc_patch(x, src, size);
- if (ret < 0)
- goto exit_release_firmware;
- }
-
- ret = 0;
-
- exit_release_firmware:
- release_firmware(fw);
-
- return ret;
-}
-
-struct xc *request_xc(int xcno, struct device *dev)
-{
- struct xc *x = NULL;
-
- mutex_lock(&xc_lock);
-
- if (xcno > 3)
- goto exit;
- if (xc_in_use & (1 << xcno))
- goto exit;
-
- x = kmalloc(sizeof (struct xc), GFP_KERNEL);
- if (!x)
- goto exit;
-
- if (!request_mem_region
- (NETX_PA_XPEC(xcno), XPEC_MEM_SIZE, kobject_name(&dev->kobj)))
- goto exit_free;
-
- if (!request_mem_region
- (NETX_PA_XMAC(xcno), XMAC_MEM_SIZE, kobject_name(&dev->kobj)))
- goto exit_release_1;
-
- if (!request_mem_region
- (SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE, kobject_name(&dev->kobj)))
- goto exit_release_2;
-
- x->xpec_base = (void * __iomem)io_p2v(NETX_PA_XPEC(xcno));
- x->xmac_base = (void * __iomem)io_p2v(NETX_PA_XMAC(xcno));
- x->sram_base = ioremap(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE);
- if (!x->sram_base)
- goto exit_release_3;
-
- x->irq = NETX_IRQ_XPEC(xcno);
-
- x->no = xcno;
- x->dev = dev;
-
- xc_in_use |= (1 << xcno);
-
- goto exit;
-
- exit_release_3:
- release_mem_region(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE);
- exit_release_2:
- release_mem_region(NETX_PA_XMAC(xcno), XMAC_MEM_SIZE);
- exit_release_1:
- release_mem_region(NETX_PA_XPEC(xcno), XPEC_MEM_SIZE);
- exit_free:
- kfree(x);
- x = NULL;
- exit:
- mutex_unlock(&xc_lock);
- return x;
-}
-
-void free_xc(struct xc *x)
-{
- int xcno = x->no;
-
- mutex_lock(&xc_lock);
-
- iounmap(x->sram_base);
- release_mem_region(SRAM_INTERNAL_PHYS(xcno), SRAM_MEM_SIZE);
- release_mem_region(NETX_PA_XMAC(xcno), XMAC_MEM_SIZE);
- release_mem_region(NETX_PA_XPEC(xcno), XPEC_MEM_SIZE);
- xc_in_use &= ~(1 << x->no);
- kfree(x);
-
- mutex_unlock(&xc_lock);
-}
-
-EXPORT_SYMBOL(free_xc);
-EXPORT_SYMBOL(request_xc);
-EXPORT_SYMBOL(xc_request_firmware);
-EXPORT_SYMBOL(xc_reset);
-EXPORT_SYMBOL(xc_running);
-EXPORT_SYMBOL(xc_start);
-EXPORT_SYMBOL(xc_stop);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 820b60a50125..c54cd7ed90ba 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -663,6 +663,11 @@ config ARM_LPAE
depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \
!CPU_32v4 && !CPU_32v3
select PHYS_ADDR_T_64BIT
+ select SWIOTLB
+ select ARCH_HAS_DMA_COHERENT_TO_PFN
+ select ARCH_HAS_DMA_MMAP_PGPROT
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
help
Say Y if you have an ARMv7 processor supporting the LPAE page
table format and you would like to access memory beyond the
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4789c60a86e3..6774b03aa405 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h>
#include <linux/highmem.h>
#include <linux/memblock.h>
@@ -1125,6 +1126,19 @@ int arm_dma_supported(struct device *dev, u64 mask)
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{
+ /*
+ * When CONFIG_ARM_LPAE is set, physical address can extend above
+ * 32-bits, which then can't be addressed by devices that only support
+ * 32-bit DMA.
+ * Use the generic dma-direct / swiotlb ops code in that case, as that
+ * handles bounce buffering for us.
+ *
+ * Note: this checks CONFIG_ARM_LPAE instead of CONFIG_SWIOTLB as the
+ * latter is also selected by the Xen code, but that code for now relies
+ * on non-NULL dev_dma_ops. To be cleaned up later.
+ */
+ if (IS_ENABLED(CONFIG_ARM_LPAE))
+ return NULL;
return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
}
@@ -2329,6 +2343,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct dma_map_ops *dma_ops;
dev->archdata.dma_coherent = coherent;
+#ifdef CONFIG_SWIOTLB
+ dev->dma_coherent = coherent;
+#endif
/*
* Don't override the dma_ops if they have already been set. Ideally
@@ -2363,3 +2380,47 @@ void arch_teardown_dma_ops(struct device *dev)
/* Let arch_setup_dma_ops() start again from scratch upon re-probe */
set_dma_ops(dev, NULL);
}
+
+#ifdef CONFIG_SWIOTLB
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
+ size, dir);
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
+{
+ __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
+ size, dir);
+}
+
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+ return dma_to_pfn(dev, dma_addr);
+}
+
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
+{
+ if (!dev_is_dma_coherent(dev))
+ return __get_dma_pgprot(attrs, prot);
+ return prot;
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ return __dma_alloc(dev, size, dma_handle, gfp,
+ __get_dma_pgprot(attrs, PAGE_KERNEL), false,
+ attrs, __builtin_return_address(0));
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
+}
+#endif /* CONFIG_SWIOTLB */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 4920a206dce9..16d373d587c4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -21,6 +21,7 @@
#include <linux/dma-contiguous.h>
#include <linux/sizes.h>
#include <linux/stop_machine.h>
+#include <linux/swiotlb.h>
#include <asm/cp15.h>
#include <asm/mach-types.h>
@@ -463,6 +464,10 @@ static void __init free_highpages(void)
*/
void __init mem_init(void)
{
+#ifdef CONFIG_ARM_LPAE
+ swiotlb_init(1);
+#endif
+
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
/* this will put all unused low memory onto the freelists */
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index bb1f1dbb34e8..61de992bbea3 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -52,7 +52,7 @@ ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y)
ifeq ($(CONFIG_CC_IS_CLANG), y)
$(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built)
- else ifeq ($(CROSS_COMPILE_COMPAT),)
+ else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),)
$(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built)
else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),)
$(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT)
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
index e25f7fcd7997..cffa8991880d 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
@@ -462,7 +462,7 @@
#define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0
#define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0
#define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0
-#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CLK 0x1D0 0x438 0x000 0x1 0x0
#define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2
#define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0
#define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0
@@ -472,7 +472,7 @@
#define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0
#define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0
#define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0
-#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0
+#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CAPTURE2 0x1D8 0x440 0x000 0x1 0x0
#define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2
#define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0
#define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index d09b808eff87..52aae341d0da 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -715,8 +715,7 @@
sai2: sai@308b0000 {
#sound-dai-cells = <0>;
- compatible = "fsl,imx8mq-sai",
- "fsl,imx6sx-sai";
+ compatible = "fsl,imx8mq-sai";
reg = <0x308b0000 0x10000>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MQ_CLK_SAI2_IPG>,
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 79155a8cfe7c..89e4c8b79349 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -155,6 +155,12 @@ static inline void gic_pmr_mask_irqs(void)
BUILD_BUG_ON(GICD_INT_DEF_PRI < (GIC_PRIO_IRQOFF |
GIC_PRIO_PSR_I_SET));
BUILD_BUG_ON(GICD_INT_DEF_PRI >= GIC_PRIO_IRQON);
+ /*
+ * Need to make sure IRQON allows IRQs when SCR_EL3.FIQ is cleared
+ * and non-secure PMR accesses are not subject to the shifts that
+ * are applied to IRQ priorities
+ */
+ BUILD_BUG_ON((0x80 | (GICD_INT_DEF_PRI >> 1)) >= GIC_PRIO_IRQON);
gic_write_pmr(GIC_PRIO_IRQOFF);
}
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 407e2bf23676..c96ffa4722d3 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -35,9 +35,10 @@
*/
enum ftr_type {
- FTR_EXACT, /* Use a predefined safe value */
- FTR_LOWER_SAFE, /* Smaller value is safe */
- FTR_HIGHER_SAFE,/* Bigger value is safe */
+ FTR_EXACT, /* Use a predefined safe value */
+ FTR_LOWER_SAFE, /* Smaller value is safe */
+ FTR_HIGHER_SAFE, /* Bigger value is safe */
+ FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
};
#define FTR_STRICT true /* SANITY check strict matching required */
diff --git a/arch/arm64/include/asm/daifflags.h b/arch/arm64/include/asm/daifflags.h
index 987926ed535e..063c964af705 100644
--- a/arch/arm64/include/asm/daifflags.h
+++ b/arch/arm64/include/asm/daifflags.h
@@ -13,6 +13,8 @@
#define DAIF_PROCCTX 0
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
+#define DAIF_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+
/* mask/save/unmask/restore all exceptions, including interrupts. */
static inline void local_daif_mask(void)
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 8e79ce9c3f5c..76a144702586 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -105,7 +105,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
#define alloc_screen_info(x...) &screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+ struct screen_info *si)
+{
+}
/* redeclare as 'hidden' so the compiler will generate relative references */
extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 3c7037c6ba9b..b618017205a3 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -202,7 +202,7 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG];
({ \
set_thread_flag(TIF_32BIT); \
})
-#ifdef CONFIG_GENERIC_COMPAT_VDSO
+#ifdef CONFIG_COMPAT_VDSO
#define COMPAT_ARCH_DLINFO \
do { \
/* \
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index b7ba75809751..fb04f10a78ab 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -210,7 +210,11 @@ extern u64 vabits_user;
#define __tag_reset(addr) untagged_addr(addr)
#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
#else
-#define __tag_set(addr, tag) (addr)
+static inline const void *__tag_set(const void *addr, u8 tag)
+{
+ return addr;
+}
+
#define __tag_reset(addr) (addr)
#define __tag_get(addr) 0
#endif
@@ -301,8 +305,8 @@ static inline void *phys_to_virt(phys_addr_t x)
#define page_to_virt(page) ({ \
unsigned long __addr = \
((__page_to_voff(page)) | PAGE_OFFSET); \
- unsigned long __addr_tag = \
- __tag_set(__addr, page_kasan_tag(page)); \
+ const void *__addr_tag = \
+ __tag_set((void *)__addr, page_kasan_tag(page)); \
((void *)__addr_tag); \
})
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 87a4b2ddc1a1..5fdcfe237338 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -301,7 +301,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
/*
* Huge pte definitions.
*/
-#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
/*
@@ -448,8 +447,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_SECT)
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud) (0)
-#define pud_table(pud) (1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
#else
#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
PUD_TYPE_SECT)
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fd5b1a4efc70..844e2964b0f5 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
regs->pmr_save = GIC_PRIO_IRQON;
}
+static inline void set_ssbs_bit(struct pt_regs *regs)
+{
+ regs->pstate |= PSR_SSBS_BIT;
+}
+
+static inline void set_compat_ssbs_bit(struct pt_regs *regs)
+{
+ regs->pstate |= PSR_AA32_SSBS_BIT;
+}
+
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = PSR_MODE_EL0t;
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- regs->pstate |= PSR_SSBS_BIT;
+ set_ssbs_bit(regs);
regs->sp = sp;
}
@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
#endif
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
- regs->pstate |= PSR_AA32_SSBS_BIT;
+ set_compat_ssbs_bit(regs);
regs->compat_sp = sp;
}
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index b1dd039023ef..1dcf63a9ac1f 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -30,7 +30,7 @@
* in the the priority mask, it indicates that PSR.I should be set and
* interrupt disabling temporarily does not rely on IRQ priorities.
*/
-#define GIC_PRIO_IRQON 0xc0
+#define GIC_PRIO_IRQON 0xe0
#define GIC_PRIO_IRQOFF (GIC_PRIO_IRQON & ~0x80)
#define GIC_PRIO_PSR_I_SET (1 << 4)
diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h
index df45af931459..4d9b1f48dc39 100644
--- a/arch/arm64/include/asm/stacktrace.h
+++ b/arch/arm64/include/asm/stacktrace.h
@@ -8,19 +8,12 @@
#include <linux/percpu.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
+#include <linux/types.h>
#include <asm/memory.h>
#include <asm/ptrace.h>
#include <asm/sdei.h>
-struct stackframe {
- unsigned long fp;
- unsigned long pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- int graph;
-#endif
-};
-
enum stack_type {
STACK_TYPE_UNKNOWN,
STACK_TYPE_TASK,
@@ -28,6 +21,7 @@ enum stack_type {
STACK_TYPE_OVERFLOW,
STACK_TYPE_SDEI_NORMAL,
STACK_TYPE_SDEI_CRITICAL,
+ __NR_STACK_TYPES
};
struct stack_info {
@@ -36,6 +30,37 @@ struct stack_info {
enum stack_type type;
};
+/*
+ * A snapshot of a frame record or fp/lr register values, along with some
+ * accounting information necessary for robust unwinding.
+ *
+ * @fp: The fp value in the frame record (or the real fp)
+ * @pc: The fp value in the frame record (or the real lr)
+ *
+ * @stacks_done: Stacks which have been entirely unwound, for which it is no
+ * longer valid to unwind to.
+ *
+ * @prev_fp: The fp that pointed to this frame record, or a synthetic value
+ * of 0. This is used to ensure that within a stack, each
+ * subsequent frame record is at an increasing address.
+ * @prev_type: The type of stack this frame record was on, or a synthetic
+ * value of STACK_TYPE_UNKNOWN. This is used to detect a
+ * transition from one stack to another.
+ *
+ * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
+ * replacement lr value in the ftrace graph stack.
+ */
+struct stackframe {
+ unsigned long fp;
+ unsigned long pc;
+ DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
+ unsigned long prev_fp;
+ enum stack_type prev_type;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ int graph;
+#endif
+};
+
extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame);
extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data);
@@ -64,8 +89,9 @@ static inline bool on_irq_stack(unsigned long sp,
return true;
}
-static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp,
- struct stack_info *info)
+static inline bool on_task_stack(const struct task_struct *tsk,
+ unsigned long sp,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
@@ -112,10 +138,13 @@ static inline bool on_overflow_stack(unsigned long sp,
* We can only safely access per-cpu stacks from current in a non-preemptible
* context.
*/
-static inline bool on_accessible_stack(struct task_struct *tsk,
- unsigned long sp,
- struct stack_info *info)
+static inline bool on_accessible_stack(const struct task_struct *tsk,
+ unsigned long sp,
+ struct stack_info *info)
{
+ if (info)
+ info->type = STACK_TYPE_UNKNOWN;
+
if (on_task_stack(tsk, sp, info))
return true;
if (tsk != current || preemptible())
@@ -130,4 +159,27 @@ static inline bool on_accessible_stack(struct task_struct *tsk,
return false;
}
+static inline void start_backtrace(struct stackframe *frame,
+ unsigned long fp, unsigned long pc)
+{
+ frame->fp = fp;
+ frame->pc = pc;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ frame->graph = 0;
+#endif
+
+ /*
+ * Prime the first unwind.
+ *
+ * In unwind_frame() we'll check that the FP points to a valid stack,
+ * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be
+ * treated as a transition to whichever stack that happens to be. The
+ * prev_fp value won't be used, but we set it to 0 such that it is
+ * definitely not an accessible stack address.
+ */
+ bitmap_zero(frame->stacks_done, __NR_STACK_TYPES);
+ frame->prev_fp = 0;
+ frame->prev_type = STACK_TYPE_UNKNOWN;
+}
+
#endif /* __ASM_STACKTRACE_H */
diff --git a/arch/arm64/include/asm/vdso/compat_gettimeofday.h b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
index f4812777f5c5..c50ee1b7d5cd 100644
--- a/arch/arm64/include/asm/vdso/compat_gettimeofday.h
+++ b/arch/arm64/include/asm/vdso/compat_gettimeofday.h
@@ -16,6 +16,8 @@
#define VDSO_HAS_CLOCK_GETRES 1
+#define VDSO_HAS_32BIT_FALLBACK 1
+
static __always_inline
int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
struct timezone *_tz)
@@ -52,6 +54,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
}
static __always_inline
+long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_compat_clock_gettime;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
register struct __kernel_timespec *ts asm("r1") = _ts;
@@ -72,6 +91,27 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
return ret;
}
+static __always_inline
+int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ register struct old_timespec32 *ts asm("r1") = _ts;
+ register clockid_t clkid asm("r0") = _clkid;
+ register long ret asm ("r0");
+ register long nr asm("r7") = __NR_compat_clock_getres;
+
+ /* The checks below are required for ABI consistency with arm */
+ if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
+ return -EINVAL;
+
+ asm volatile(
+ " swi #0\n"
+ : "=r" (ret)
+ : "r" (clkid), "r" (ts), "r" (nr)
+ : "memory");
+
+ return ret;
+}
+
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
{
u64 res;
diff --git a/arch/arm64/include/uapi/asm/bpf_perf_event.h b/arch/arm64/include/uapi/asm/bpf_perf_event.h
index b551b741653d..5e1e648aeec4 100644
--- a/arch/arm64/include/uapi/asm/bpf_perf_event.h
+++ b/arch/arm64/include/uapi/asm/bpf_perf_event.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
#define _UAPI__ASM_BPF_PERF_EVENT_H__
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index f29f36a65175..d19d14ba9ae4 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -225,8 +225,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/*
* Linux can handle differing I-cache policies. Userspace JITs will
@@ -468,6 +468,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
case FTR_LOWER_SAFE:
ret = new < cur ? new : cur;
break;
+ case FTR_HIGHER_OR_ZERO_SAFE:
+ if (!cur || !new)
+ break;
+ /* Fallthrough */
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index f8719bd30850..48222a4760c2 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -207,16 +207,16 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
list = user_mode(regs) ? &user_step_hook : &kernel_step_hook;
- rcu_read_lock();
-
+ /*
+ * Since single-step exception disables interrupt, this function is
+ * entirely not preemptible, and we can use rcu list safely here.
+ */
list_for_each_entry_rcu(hook, list, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
}
- rcu_read_unlock();
-
return retval;
}
NOKPROBE_SYMBOL(call_step_hook);
@@ -305,14 +305,16 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
list = user_mode(regs) ? &user_break_hook : &kernel_break_hook;
- rcu_read_lock();
+ /*
+ * Since brk exception disables interrupt, this function is
+ * entirely not preemptible, and we can use rcu list safely here.
+ */
list_for_each_entry_rcu(hook, list, node) {
unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
if ((comment & ~hook->mask) == hook->imm)
fn = hook->fn;
}
- rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 9cdc4592da3e..320a30dbe35e 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -586,10 +586,8 @@ el1_sync:
b.eq el1_ia
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef
- cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
- b.eq el1_sp_pc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el1_sp_pc
+ b.eq el1_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef
cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
@@ -611,9 +609,11 @@ el1_da:
bl do_mem_abort
kernel_exit 1
-el1_sp_pc:
+el1_pc:
/*
- * Stack or PC alignment exception handling
+ * PC alignment exception handling. We don't handle SP alignment faults,
+ * since we will have hit a recursive exception when trying to push the
+ * initial pt_regs.
*/
mrs x0, far_el1
inherit_daif pstate=x23, tmp=x2
@@ -732,9 +732,9 @@ el0_sync:
ccmp x24, #ESR_ELx_EC_WFx, #4, ne
b.eq el0_sys
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
- b.eq el0_sp_pc
+ b.eq el0_sp
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el0_sp_pc
+ b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
@@ -758,7 +758,7 @@ el0_sync_compat:
cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
- b.eq el0_sp_pc
+ b.eq el0_pc
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef
cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
@@ -858,11 +858,15 @@ el0_fpsimd_exc:
mov x1, sp
bl do_fpsimd_exc
b ret_to_user
+el0_sp:
+ ldr x26, [sp, #S_SP]
+ b el0_sp_pc
+el0_pc:
+ mrs x26, far_el1
el0_sp_pc:
/*
* Stack or PC alignment exception handling
*/
- mrs x26, far_el1
gic_prio_kentry_setup tmp=x0
enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index eec4776ae5f0..37d3912cfe06 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -406,6 +406,18 @@ static __uint128_t arm64_cpu_to_le128(__uint128_t x)
#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
+static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
+ unsigned int vq)
+{
+ unsigned int i;
+ __uint128_t *p;
+
+ for (i = 0; i < SVE_NUM_ZREGS; ++i) {
+ p = (__uint128_t *)ZREG(sst, vq, i);
+ *p = arm64_cpu_to_le128(fst->vregs[i]);
+ }
+}
+
/*
* Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
* task->thread.sve_state.
@@ -423,17 +435,12 @@ static void fpsimd_to_sve(struct task_struct *task)
unsigned int vq;
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
- unsigned int i;
- __uint128_t *p;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
- for (i = 0; i < 32; ++i) {
- p = (__uint128_t *)ZREG(sst, vq, i);
- *p = arm64_cpu_to_le128(fst->vregs[i]);
- }
+ __fpsimd_to_sve(sst, fst, vq);
}
/*
@@ -459,7 +466,7 @@ static void sve_to_fpsimd(struct task_struct *task)
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
- for (i = 0; i < 32; ++i) {
+ for (i = 0; i < SVE_NUM_ZREGS; ++i) {
p = (__uint128_t const *)ZREG(sst, vq, i);
fst->vregs[i] = arm64_le128_to_cpu(*p);
}
@@ -550,8 +557,6 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
unsigned int vq;
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
- unsigned int i;
- __uint128_t *p;
if (!test_tsk_thread_flag(task, TIF_SVE))
return;
@@ -559,11 +564,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
vq = sve_vq_from_vl(task->thread.sve_vl);
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
-
- for (i = 0; i < 32; ++i) {
- p = (__uint128_t *)ZREG(sst, vq, i);
- *p = arm64_cpu_to_le128(fst->vregs[i]);
- }
+ __fpsimd_to_sve(sst, fst, vq);
}
int sve_set_vector_length(struct task_struct *task,
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index dceb84520948..38ee1514cd9c 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -536,13 +536,18 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+
+ /* Fallthrough */
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
+
+ /* Fallthrough */
default:
return -EINVAL;
}
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 46e643e30708..03ff15bffbb6 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -314,18 +314,21 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
/* MOVW instruction relocations. */
case R_AARCH64_MOVW_UABS_G0_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
AARCH64_INSN_IMM_MOVKZ);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
AARCH64_INSN_IMM_MOVKZ);
@@ -393,6 +396,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
+ /* Fall through */
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_adrp(me, sechdrs, loc, val);
if (ovf && ovf != -ERANGE)
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index 9d63514b9836..b0e03e052dd1 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -154,12 +154,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
return;
}
- frame.fp = regs->regs[29];
- frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
-
+ start_backtrace(&frame, regs->regs[29], regs->pc);
walk_stackframe(current, &frame, callchain_trace, entry);
}
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index bd5dfffca272..c4452827419b 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -21,6 +21,7 @@
#include <asm/ptrace.h>
#include <asm/cacheflush.h>
#include <asm/debug-monitors.h>
+#include <asm/daifflags.h>
#include <asm/system_misc.h>
#include <asm/insn.h>
#include <linux/uaccess.h>
@@ -168,33 +169,6 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
}
/*
- * When PSTATE.D is set (masked), then software step exceptions can not be
- * generated.
- * SPSR's D bit shows the value of PSTATE.D immediately before the
- * exception was taken. PSTATE.D is set while entering into any exception
- * mode, however software clears it for any normal (none-debug-exception)
- * mode in the exception entry. Therefore, when we are entering into kprobe
- * breakpoint handler from any normal mode then SPSR.D bit is already
- * cleared, however it is set when we are entering from any debug exception
- * mode.
- * Since we always need to generate single step exception after a kprobe
- * breakpoint exception therefore we need to clear it unconditionally, when
- * we become sure that the current breakpoint exception is for kprobe.
- */
-static void __kprobes
-spsr_set_debug_flag(struct pt_regs *regs, int mask)
-{
- unsigned long spsr = regs->pstate;
-
- if (mask)
- spsr |= PSR_D_BIT;
- else
- spsr &= ~PSR_D_BIT;
-
- regs->pstate = spsr;
-}
-
-/*
* Interrupts need to be disabled before single-step mode is set, and not
* reenabled until after single-step mode ends.
* Without disabling interrupt on local CPU, there is a chance of
@@ -205,17 +179,17 @@ spsr_set_debug_flag(struct pt_regs *regs, int mask)
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- kcb->saved_irqflag = regs->pstate;
+ kcb->saved_irqflag = regs->pstate & DAIF_MASK;
regs->pstate |= PSR_I_BIT;
+ /* Unmask PSTATE.D for enabling software step exceptions. */
+ regs->pstate &= ~PSR_D_BIT;
}
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
struct pt_regs *regs)
{
- if (kcb->saved_irqflag & PSR_I_BIT)
- regs->pstate |= PSR_I_BIT;
- else
- regs->pstate &= ~PSR_I_BIT;
+ regs->pstate &= ~DAIF_MASK;
+ regs->pstate |= kcb->saved_irqflag;
}
static void __kprobes
@@ -252,8 +226,6 @@ static void __kprobes setup_singlestep(struct kprobe *p,
set_ss_context(kcb, slot); /* mark pending ss */
- spsr_set_debug_flag(regs, 0);
-
/* IRQs and single stepping do not mix well. */
kprobes_save_local_irqflag(kcb, regs);
kernel_enable_single_step(regs);
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6a869d9f304f..f674f28df663 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
childregs->pstate |= PSR_UAO_BIT;
if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
- childregs->pstate |= PSR_SSBS_BIT;
+ set_ssbs_bit(childregs);
if (system_uses_irq_prio_masking())
childregs->pmr_save = GIC_PRIO_IRQON;
@@ -443,6 +443,32 @@ void uao_thread_switch(struct task_struct *next)
}
/*
+ * Force SSBS state on context-switch, since it may be lost after migrating
+ * from a CPU which treats the bit as RES0 in a heterogeneous system.
+ */
+static void ssbs_thread_switch(struct task_struct *next)
+{
+ struct pt_regs *regs = task_pt_regs(next);
+
+ /*
+ * Nothing to do for kernel threads, but 'regs' may be junk
+ * (e.g. idle task) so check the flags and bail early.
+ */
+ if (unlikely(next->flags & PF_KTHREAD))
+ return;
+
+ /* If the mitigation is enabled, then we leave SSBS clear. */
+ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
+ test_tsk_thread_flag(next, TIF_SSBD))
+ return;
+
+ if (compat_user_mode(regs))
+ set_compat_ssbs_bit(regs);
+ else if (user_mode(regs))
+ set_ssbs_bit(regs);
+}
+
+/*
* We store our current task in sp_el0, which is clobbered by userspace. Keep a
* shadow copy so that we can restore this upon entry from userspace.
*
@@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
entry_task_switch(next);
uao_thread_switch(next);
ptrauth_thread_switch(next);
+ ssbs_thread_switch(next);
/*
* Complete any pending TLB or cache maintenance on this CPU in case
@@ -498,11 +525,8 @@ unsigned long get_wchan(struct task_struct *p)
if (!stack_page)
return 0;
- frame.fp = thread_saved_fp(p);
- frame.pc = thread_saved_pc(p);
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
+ start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p));
+
do {
if (unwind_frame(p, &frame))
goto out;
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index b21cba90f82d..a5e8b3b9d798 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -8,6 +8,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
@@ -29,6 +30,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
return 0;
}
}
+NOKPROBE_SYMBOL(save_return_addr);
void *return_address(unsigned int level)
{
@@ -38,12 +40,9 @@ void *return_address(unsigned int level)
data.level = level + 2;
data.addr = NULL;
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.pc = (unsigned long)return_address; /* dummy */
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
-
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)return_address);
walk_stackframe(current, &frame, save_return_addr, &data);
if (!data.level)
@@ -52,3 +51,4 @@ void *return_address(unsigned int level)
return NULL;
}
EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ea90d3bd9253..018a33e01b0e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -152,8 +152,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
pr_crit("CPU%u: died during early boot\n", cpu);
break;
}
- /* Fall through */
pr_crit("CPU%u: may not have shut down cleanly\n", cpu);
+ /* Fall through */
case CPU_STUCK_IN_KERNEL:
pr_crit("CPU%u: is stuck in kernel\n", cpu);
if (status & CPU_STUCK_REASON_52_BIT_VA)
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 62d395151abe..a336cb124320 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
@@ -29,9 +30,18 @@
* ldp x29, x30, [sp]
* add sp, sp, #0x10
*/
+
+/*
+ * Unwind from one frame record (A) to the next frame record (B).
+ *
+ * We terminate early if the location of B indicates a malformed chain of frame
+ * records (e.g. a cycle), determined based on the location and fp value of A
+ * and the location (but not the fp value) of B.
+ */
int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
{
unsigned long fp = frame->fp;
+ struct stack_info info;
if (fp & 0xf)
return -EINVAL;
@@ -39,11 +49,40 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
if (!tsk)
tsk = current;
- if (!on_accessible_stack(tsk, fp, NULL))
+ if (!on_accessible_stack(tsk, fp, &info))
+ return -EINVAL;
+
+ if (test_bit(info.type, frame->stacks_done))
return -EINVAL;
+ /*
+ * As stacks grow downward, any valid record on the same stack must be
+ * at a strictly higher address than the prior record.
+ *
+ * Stacks can nest in several valid orders, e.g.
+ *
+ * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL
+ * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW
+ *
+ * ... but the nesting itself is strict. Once we transition from one
+ * stack to another, it's never valid to unwind back to that first
+ * stack.
+ */
+ if (info.type == frame->prev_type) {
+ if (fp <= frame->prev_fp)
+ return -EINVAL;
+ } else {
+ set_bit(frame->prev_type, frame->stacks_done);
+ }
+
+ /*
+ * Record this frame record's values and location. The prev_fp and
+ * prev_type are only meaningful to the next unwind_frame() invocation.
+ */
frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
+ frame->prev_fp = fp;
+ frame->prev_type = info.type;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk->ret_stack &&
@@ -73,6 +112,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
return 0;
}
+NOKPROBE_SYMBOL(unwind_frame);
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
@@ -87,6 +127,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
break;
}
}
+NOKPROBE_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
@@ -122,12 +163,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
data.skip = trace->skip;
data.no_sched_functions = 0;
- frame.fp = regs->regs[29];
- frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
-
+ start_backtrace(&frame, regs->regs[29], regs->pc);
walk_stackframe(current, &frame, save_trace, &data);
}
EXPORT_SYMBOL_GPL(save_stack_trace_regs);
@@ -146,17 +182,15 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
data.no_sched_functions = nosched;
if (tsk != current) {
- frame.fp = thread_saved_fp(tsk);
- frame.pc = thread_saved_pc(tsk);
+ start_backtrace(&frame, thread_saved_fp(tsk),
+ thread_saved_pc(tsk));
} else {
/* We don't want this function nor the caller */
data.skip += 2;
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.pc = (unsigned long)__save_stack_trace;
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)__save_stack_trace);
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
walk_stackframe(tsk, &frame, save_trace, &data);
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 9f25aedeac9d..0b2946414dc9 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -38,11 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
if (!in_lock_functions(regs->pc))
return regs->pc;
- frame.fp = regs->regs[29];
- frame.pc = regs->pc;
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
+ start_backtrace(&frame, regs->regs[29], regs->pc);
+
do {
int ret = unwind_frame(NULL, &frame);
if (ret < 0)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 8c03456dade6..d3313797cca9 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -100,18 +100,17 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
return;
if (tsk == current) {
- frame.fp = (unsigned long)__builtin_frame_address(0);
- frame.pc = (unsigned long)dump_backtrace;
+ start_backtrace(&frame,
+ (unsigned long)__builtin_frame_address(0),
+ (unsigned long)dump_backtrace);
} else {
/*
* task blocked in __switch_to
*/
- frame.fp = thread_saved_fp(tsk);
- frame.pc = thread_saved_pc(tsk);
+ start_backtrace(&frame,
+ thread_saved_fp(tsk),
+ thread_saved_pc(tsk));
}
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- frame.graph = 0;
-#endif
printk("Call trace:\n");
do {
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 4ab863045188..dd2514bb1511 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -32,10 +32,10 @@ UBSAN_SANITIZE := n
OBJECT_FILES_NON_STANDARD := y
KCOV_INSTRUMENT := n
-ifeq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny
-else
-CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -include $(c-gettimeofday-y)
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
endif
# Clang versions less than 8 do not support -mcmodel=tiny
@@ -57,8 +57,7 @@ $(obj)/vdso.o : $(obj)/vdso.so
# Link rule for the .so file, .lds has to be first
$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
- $(call if_changed,ld)
- $(call if_changed,vdso_check)
+ $(call if_changed,vdsold_and_vdso_check)
# Strip rule for the .so file
$(obj)/%.so: OBJCOPYFLAGS := -S
@@ -74,8 +73,8 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
$(call if_changed,vdsosym)
# Actual build commands
-quiet_cmd_vdsocc = VDSOCC $@
- cmd_vdsocc = $(CC) $(a_flags) $(c_flags) -c -o $@ $<
+quiet_cmd_vdsold_and_vdso_check = LD $@
+ cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check)
# Install commands for the unstripped file
quiet_cmd_vdso_install = INSTALL $@
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 60a4c6239712..1fba0776ed40 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -144,8 +144,7 @@ $(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE
# Link rule for the .so file, .lds has to be first
$(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE
- $(call if_changed,vdsold)
- $(call if_changed,vdso_check)
+ $(call if_changed,vdsold_and_vdso_check)
# Compilation rules for the vDSO sources
$(c-obj-vdso): %.o: %.c FORCE
@@ -156,14 +155,17 @@ $(asm-obj-vdso): %.o: %.S FORCE
$(call if_changed_dep,vdsoas)
# Actual build commands
-quiet_cmd_vdsold = VDSOL $@
+quiet_cmd_vdsold_and_vdso_check = LD32 $@
+ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
+
+quiet_cmd_vdsold = LD32 $@
cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
-Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
-quiet_cmd_vdsocc = VDSOC $@
+quiet_cmd_vdsocc = CC32 $@
cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
-quiet_cmd_vdsocc_gettimeofday = VDSOC_GTD $@
+quiet_cmd_vdsocc_gettimeofday = CC32 $@
cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
-quiet_cmd_vdsoas = VDSOA $@
+quiet_cmd_vdsoas = AS32 $@
cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
quiet_cmd_vdsomunge = MUNGE $@
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 9568c116ac7f..cfd65b63f36f 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -777,6 +777,53 @@ void __init hook_debug_fault_code(int nr,
debug_fault_info[nr].name = name;
}
+/*
+ * In debug exception context, we explicitly disable preemption despite
+ * having interrupts disabled.
+ * This serves two purposes: it makes it much less likely that we would
+ * accidentally schedule in exception context and it will force a warning
+ * if we somehow manage to schedule by accident.
+ */
+static void debug_exception_enter(struct pt_regs *regs)
+{
+ /*
+ * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
+ * already disabled to preserve the last enabled/disabled addresses.
+ */
+ if (interrupts_enabled(regs))
+ trace_hardirqs_off();
+
+ if (user_mode(regs)) {
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+ } else {
+ /*
+ * We might have interrupted pretty much anything. In
+ * fact, if we're a debug exception, we can even interrupt
+ * NMI processing. We don't want this code makes in_nmi()
+ * to return true, but we need to notify RCU.
+ */
+ rcu_nmi_enter();
+ }
+
+ preempt_disable();
+
+ /* This code is a bit fragile. Test it. */
+ RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work");
+}
+NOKPROBE_SYMBOL(debug_exception_enter);
+
+static void debug_exception_exit(struct pt_regs *regs)
+{
+ preempt_enable_no_resched();
+
+ if (!user_mode(regs))
+ rcu_nmi_exit();
+
+ if (interrupts_enabled(regs))
+ trace_hardirqs_on();
+}
+NOKPROBE_SYMBOL(debug_exception_exit);
+
#ifdef CONFIG_ARM64_ERRATUM_1463225
DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
@@ -817,12 +864,7 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
if (cortex_a76_erratum_1463225_debug_handler(regs))
return;
- /*
- * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
- * already disabled to preserve the last enabled/disabled addresses.
- */
- if (interrupts_enabled(regs))
- trace_hardirqs_off();
+ debug_exception_enter(regs);
if (user_mode(regs) && !is_ttbr0_addr(pc))
arm64_apply_bp_hardening();
@@ -832,7 +874,6 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
inf->sig, inf->code, (void __user *)pc, esr);
}
- if (interrupts_enabled(regs))
- trace_hardirqs_on();
+ debug_exception_exit(regs);
}
NOKPROBE_SYMBOL(do_debug_exception);
diff --git a/arch/csky/include/uapi/asm/byteorder.h b/arch/csky/include/uapi/asm/byteorder.h
index b079ec715cdf..d150cd664873 100644
--- a/arch/csky/include/uapi/asm/byteorder.h
+++ b/arch/csky/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_BYTEORDER_H
diff --git a/arch/csky/include/uapi/asm/cachectl.h b/arch/csky/include/uapi/asm/cachectl.h
index ddf2f39aa925..ed7fad1ea20d 100644
--- a/arch/csky/include/uapi/asm/cachectl.h
+++ b/arch/csky/include/uapi/asm/cachectl.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_CSKY_CACHECTL_H
#define __ASM_CSKY_CACHECTL_H
diff --git a/arch/csky/include/uapi/asm/perf_regs.h b/arch/csky/include/uapi/asm/perf_regs.h
index ee323d818592..49d4e147a559 100644
--- a/arch/csky/include/uapi/asm/perf_regs.h
+++ b/arch/csky/include/uapi/asm/perf_regs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef _ASM_CSKY_PERF_REGS_H
diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h
index 4e248d5b86ef..66b2268e324e 100644
--- a/arch/csky/include/uapi/asm/ptrace.h
+++ b/arch/csky/include/uapi/asm/ptrace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef _CSKY_PTRACE_H
diff --git a/arch/csky/include/uapi/asm/sigcontext.h b/arch/csky/include/uapi/asm/sigcontext.h
index e81e7ff11e36..670c020f2cb8 100644
--- a/arch/csky/include/uapi/asm/sigcontext.h
+++ b/arch/csky/include/uapi/asm/sigcontext.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_SIGCONTEXT_H
diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
index ec60e49cea66..211c983c7282 100644
--- a/arch/csky/include/uapi/asm/unistd.h
+++ b/arch/csky/include/uapi/asm/unistd.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
index 1f730ded5224..cc88a08bc1f7 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -398,6 +398,7 @@ static int dwc3_octeon_clocks_start(struct device *dev, u64 base)
default:
dev_err(dev, "Invalid ref_clk %u, using 100000000 instead\n",
clock_rate);
+ /* fall through */
case 100000000:
mpll_mul = 0x19;
if (ref_clk_sel < 2)
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
index e0dd66881da6..f777e44653d5 100644
--- a/arch/mips/kernel/cacheinfo.c
+++ b/arch/mips/kernel/cacheinfo.c
@@ -69,6 +69,8 @@ static int __populate_cache_leaves(unsigned int cpu)
if (c->tcache.waysize)
populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
+ this_cpu_ci->cpu_map_populated = true;
+
return 0;
}
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 5f209f111e59..df7ddd246eaa 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
static int __init init_pit_clocksource(void)
{
- if (num_possible_cpus() > 1) /* PIT does not scale! */
+ if (num_possible_cpus() > 1 || /* PIT does not scale! */
+ !clockevent_state_periodic(&i8253_clockevent))
return 0;
return clocksource_i8253_init();
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index e5de6bac8197..754094b40a75 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -140,6 +140,7 @@ static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc,
/* These are unconditional and in j_format. */
case jal_op:
arch->gprs[31] = instpc + 8;
+ /* fall through */
case j_op:
epc += 4;
epc >>= 28;
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 7c04b17f4a48..96c13a0ab078 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -172,12 +172,15 @@ static void mipsxx_cpu_setup(void *args)
case 4:
w_c0_perfctrl3(0);
w_c0_perfcntr3(reg.counter[3]);
+ /* fall through */
case 3:
w_c0_perfctrl2(0);
w_c0_perfcntr2(reg.counter[2]);
+ /* fall through */
case 2:
w_c0_perfctrl1(0);
w_c0_perfcntr1(reg.counter[1]);
+ /* fall through */
case 1:
w_c0_perfctrl0(0);
w_c0_perfcntr0(reg.counter[0]);
@@ -195,10 +198,13 @@ static void mipsxx_cpu_start(void *args)
switch (counters) {
case 4:
w_c0_perfctrl3(WHAT | reg.control[3]);
+ /* fall through */
case 3:
w_c0_perfctrl2(WHAT | reg.control[2]);
+ /* fall through */
case 2:
w_c0_perfctrl1(WHAT | reg.control[1]);
+ /* fall through */
case 1:
w_c0_perfctrl0(WHAT | reg.control[0]);
}
@@ -215,10 +221,13 @@ static void mipsxx_cpu_stop(void *args)
switch (counters) {
case 4:
w_c0_perfctrl3(0);
+ /* fall through */
case 3:
w_c0_perfctrl2(0);
+ /* fall through */
case 2:
w_c0_perfctrl1(0);
+ /* fall through */
case 1:
w_c0_perfctrl0(0);
}
@@ -236,6 +245,7 @@ static int mipsxx_perfcount_handler(void)
switch (counters) {
#define HANDLE_COUNTER(n) \
+ /* fall through */ \
case n + 1: \
control = r_c0_perfctrl ## n(); \
counter = r_c0_perfcntr ## n(); \
@@ -297,12 +307,15 @@ static void reset_counters(void *arg)
case 4:
w_c0_perfctrl3(0);
w_c0_perfcntr3(0);
+ /* fall through */
case 3:
w_c0_perfctrl2(0);
w_c0_perfcntr2(0);
+ /* fall through */
case 2:
w_c0_perfctrl1(0);
w_c0_perfcntr1(0);
+ /* fall through */
case 1:
w_c0_perfctrl0(0);
w_c0_perfcntr0(0);
diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c
index d02eb9d16b55..925c72348fb6 100644
--- a/arch/mips/pci/ops-bcm63xx.c
+++ b/arch/mips/pci/ops-bcm63xx.c
@@ -474,6 +474,7 @@ static int bcm63xx_pcie_can_access(struct pci_bus *bus, int devfn)
if (PCI_SLOT(devfn) == 0)
return bcm_pcie_readl(PCIE_DLSTATUS_REG)
& DLSTATUS_PHYLINKUP;
+ /* else, fall through */
default:
return false;
}
diff --git a/arch/mips/vdso/vdso.h b/arch/mips/vdso/vdso.h
index 14b1931be69c..b65b169778e3 100644
--- a/arch/mips/vdso/vdso.h
+++ b/arch/mips/vdso/vdso.h
@@ -9,6 +9,7 @@
#if _MIPS_SIM != _MIPS_SIM_ABI64 && defined(CONFIG_64BIT)
/* Building 32-bit VDSO for the 64-bit kernel. Fake a 32-bit Kconfig. */
+#define BUILD_VDSO32_64
#undef CONFIG_64BIT
#define CONFIG_32BIT 1
#ifndef __ASSEMBLY__
diff --git a/arch/nds32/include/uapi/asm/auxvec.h b/arch/nds32/include/uapi/asm/auxvec.h
index b5d58ea8decb..bc0b92ab8c15 100644
--- a/arch/nds32/include/uapi/asm/auxvec.h
+++ b/arch/nds32/include/uapi/asm/auxvec.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_AUXVEC_H
diff --git a/arch/nds32/include/uapi/asm/byteorder.h b/arch/nds32/include/uapi/asm/byteorder.h
index 511e653c709d..c264ef12c49c 100644
--- a/arch/nds32/include/uapi/asm/byteorder.h
+++ b/arch/nds32/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __NDS32_BYTEORDER_H__
diff --git a/arch/nds32/include/uapi/asm/cachectl.h b/arch/nds32/include/uapi/asm/cachectl.h
index 73793662815c..31b9b439d819 100644
--- a/arch/nds32/include/uapi/asm/cachectl.h
+++ b/arch/nds32/include/uapi/asm/cachectl.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 1994, 1995, 1996 by Ralf Baechle
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASM_CACHECTL
diff --git a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h
index d54a5d6c6538..f17396db16ec 100644
--- a/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h
+++ b/arch/nds32/include/uapi/asm/fp_udfiex_crtl.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (C) 2005-2019 Andes Technology Corporation */
#ifndef _FP_UDF_IEX_CRTL_H
#define _FP_UDF_IEX_CRTL_H
diff --git a/arch/nds32/include/uapi/asm/param.h b/arch/nds32/include/uapi/asm/param.h
index 2977534a6bd3..48d00328d328 100644
--- a/arch/nds32/include/uapi/asm/param.h
+++ b/arch/nds32/include/uapi/asm/param.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_PARAM_H
diff --git a/arch/nds32/include/uapi/asm/ptrace.h b/arch/nds32/include/uapi/asm/ptrace.h
index 1a6e01c00e6f..d76217c7c010 100644
--- a/arch/nds32/include/uapi/asm/ptrace.h
+++ b/arch/nds32/include/uapi/asm/ptrace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __UAPI_ASM_NDS32_PTRACE_H
diff --git a/arch/nds32/include/uapi/asm/sigcontext.h b/arch/nds32/include/uapi/asm/sigcontext.h
index dc89af7ddcc3..6c1e6648878f 100644
--- a/arch/nds32/include/uapi/asm/sigcontext.h
+++ b/arch/nds32/include/uapi/asm/sigcontext.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef _ASMNDS32_SIGCONTEXT_H
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h
index a0b2f7b9c0f2..410795e280fe 100644
--- a/arch/nds32/include/uapi/asm/unistd.h
+++ b/arch/nds32/include/uapi/asm/unistd.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2005-2017 Andes Technology Corporation
#define __ARCH_WANT_STAT64
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 8acb8fa1f8d6..3b77d729057f 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -19,8 +19,6 @@
KBUILD_IMAGE := vmlinuz
-KBUILD_DEFCONFIG := default_defconfig
-
NM = sh $(srctree)/arch/parisc/nm
CHECKFLAGS += -D__hppa__=1
LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
@@ -182,5 +180,8 @@ define archhelp
@echo ' zinstall - Install compressed vmlinuz kernel'
endef
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
archheaders:
$(Q)$(MAKE) $(build)=arch/parisc/kernel/syscalls all
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
index 2da8624e5cf6..1e5879c6a752 100644
--- a/arch/parisc/boot/compressed/Makefile
+++ b/arch/parisc/boot/compressed/Makefile
@@ -12,6 +12,7 @@ UBSAN_SANITIZE := n
targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
targets += misc.o piggy.o sizes.h head.o real2.o firmware.o
+targets += real2.S firmware.c
KBUILD_CFLAGS := -D__KERNEL__ -O2 -DBOOTLOADER
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -55,7 +56,8 @@ $(obj)/misc.o: $(obj)/sizes.h
CPPFLAGS_vmlinux.lds += -I$(objtree)/$(obj) -DBOOTLOADER
$(obj)/vmlinux.lds: $(obj)/sizes.h
-$(obj)/vmlinux.bin: vmlinux
+OBJCOPYFLAGS_vmlinux.bin := -R .comment -R .note -S
+$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
vmlinux.bin.all-y := $(obj)/vmlinux.bin
diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S
index bfd7872739a3..2ac3a643f2eb 100644
--- a/arch/parisc/boot/compressed/vmlinux.lds.S
+++ b/arch/parisc/boot/compressed/vmlinux.lds.S
@@ -48,8 +48,8 @@ SECTIONS
*(.rodata.compressed)
}
- /* bootloader code and data starts behind area of extracted kernel */
- . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
+ /* bootloader code and data starts at least behind area of extracted kernel */
+ . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
/* align on next page boundary */
. = ALIGN(4096);
diff --git a/arch/parisc/configs/default_defconfig b/arch/parisc/configs/defconfig
index 5b877ca34ebf..5b877ca34ebf 100644
--- a/arch/parisc/configs/default_defconfig
+++ b/arch/parisc/configs/defconfig
diff --git a/arch/parisc/include/asm/kprobes.h b/arch/parisc/include/asm/kprobes.h
index e09cf2deeafe..904034da4974 100644
--- a/arch/parisc/include/asm/kprobes.h
+++ b/arch/parisc/include/asm/kprobes.h
@@ -50,6 +50,10 @@ struct kprobe_ctlblk {
int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs);
int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs);
+static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ return 0;
+}
#endif /* CONFIG_KPROBES */
#endif /* _PARISC_KPROBES_H */
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c
index d784ccdd8fef..b6fb30f2e4bf 100644
--- a/arch/parisc/kernel/ftrace.c
+++ b/arch/parisc/kernel/ftrace.c
@@ -181,8 +181,9 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
for (i = 0; i < ARRAY_SIZE(insn); i++)
insn[i] = INSN_NOP;
+ __patch_text((void *)rec->ip, INSN_NOP);
__patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
- insn, sizeof(insn));
+ insn, sizeof(insn)-4);
return 0;
}
#endif
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index ba67893a1d72..df46b0e5a915 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -63,7 +63,7 @@ ENTRY_CFI(flush_tlb_all_local)
/* Flush Instruction Tlb */
- LDREG ITLB_SID_BASE(%r1), %r20
+88: LDREG ITLB_SID_BASE(%r1), %r20
LDREG ITLB_SID_STRIDE(%r1), %r21
LDREG ITLB_SID_COUNT(%r1), %r22
LDREG ITLB_OFF_BASE(%r1), %arg0
@@ -103,6 +103,7 @@ fitonemiddle: /* Loop if LOOP = 1 */
add %r21, %r20, %r20 /* increment space */
fitdone:
+ ALTERNATIVE(88b, fitdone, ALT_COND_NO_SPLIT_TLB, INSN_NOP)
/* Flush Data Tlb */
diff --git a/arch/parisc/math-emu/Makefile b/arch/parisc/math-emu/Makefile
index b6c4b254901a..55c1396580a4 100644
--- a/arch/parisc/math-emu/Makefile
+++ b/arch/parisc/math-emu/Makefile
@@ -18,3 +18,4 @@ obj-y := frnd.o driver.o decode_exc.o fpudispatch.o denormal.o \
# other very old or stripped-down PA-RISC CPUs -- not currently supported
obj-$(CONFIG_MATH_EMULATION) += unimplemented-math-emulation.o
+CFLAGS_REMOVE_fpudispatch.o = -Wimplicit-fallthrough=3
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 6dd4669ce7a5..adbd5e2144a3 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -66,6 +66,7 @@ parisc_acctyp(unsigned long code, unsigned int inst)
case 0x30000000: /* coproc2 */
if (bit22set(inst))
return VM_WRITE;
+ /* fall through */
case 0x0: /* indexed/memory management */
if (bit22set(inst)) {
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index d8dcd8820369..77f6ebf97113 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -121,6 +121,7 @@ config PPC
select ARCH_32BIT_OFF_T if PPC32
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_MMAP_PGPROT
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 463c63a9fcf1..11112023e327 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -302,9 +302,14 @@
#define H_SCM_UNBIND_MEM 0x3F0
#define H_SCM_QUERY_BLOCK_MEM_BINDING 0x3F4
#define H_SCM_QUERY_LOGICAL_MEM_BINDING 0x3F8
-#define H_SCM_MEM_QUERY 0x3FC
-#define H_SCM_BLOCK_CLEAR 0x400
-#define MAX_HCALL_OPCODE H_SCM_BLOCK_CLEAR
+#define H_SCM_UNBIND_ALL 0x3FC
+#define H_SCM_HEALTH 0x400
+#define H_SCM_PERFORMANCE_STATS 0x418
+#define MAX_HCALL_OPCODE H_SCM_PERFORMANCE_STATS
+
+/* Scope args for H_SCM_UNBIND_ALL */
+#define H_UNBIND_SCOPE_ALL (0x1)
+#define H_UNBIND_SCOPE_DRC (0x2)
/* H_VIOCTL functions */
#define H_GET_VIOA_DUMP_SIZE 0x01
diff --git a/arch/powerpc/include/asm/pmc.h b/arch/powerpc/include/asm/pmc.h
index dc9a1ca70edf..c6bbe9778d3c 100644
--- a/arch/powerpc/include/asm/pmc.h
+++ b/arch/powerpc/include/asm/pmc.h
@@ -27,11 +27,10 @@ static inline void ppc_set_pmu_inuse(int inuse)
#ifdef CONFIG_PPC_PSERIES
get_lppaca()->pmcregs_in_use = inuse;
#endif
- } else {
+ }
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- get_paca()->pmcregs_in_use = inuse;
+ get_paca()->pmcregs_in_use = inuse;
#endif
- }
#endif
}
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 68473c3c471c..b0720c7c3fcf 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -49,6 +49,7 @@
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/include/uapi/asm/bpf_perf_event.h b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
index b551b741653d..5e1e648aeec4 100644
--- a/arch/powerpc/include/uapi/asm/bpf_perf_event.h
+++ b/arch/powerpc/include/uapi/asm/bpf_perf_event.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
#define _UAPI__ASM_BPF_PERF_EVENT_H__
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h
index 01555c6ae0f5..be48c2215fa2 100644
--- a/arch/powerpc/include/uapi/asm/kvm_para.h
+++ b/arch/powerpc/include/uapi/asm/kvm_para.h
@@ -31,7 +31,7 @@
* Struct fields are always 32 or 64 bit aligned, depending on them being 32
* or 64 bit wide respectively.
*
- * See Documentation/virtual/kvm/ppc-pv.txt
+ * See Documentation/virt/kvm/ppc-pv.txt
*/
struct kvm_vcpu_arch_shared {
__u64 scratch1;
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 56dfa7a2a6f2..ea0c69236789 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -49,7 +49,8 @@ obj-y := cputable.o ptrace.o syscalls.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o misc_$(BITS).o \
- of_platform.o prom_parse.o
+ of_platform.o prom_parse.o \
+ dma-common.o
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 7107ad86de65..92045ed64976 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -176,9 +176,11 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
ret |= __get_user_inatomic(temp.v[1], p++);
ret |= __get_user_inatomic(temp.v[2], p++);
ret |= __get_user_inatomic(temp.v[3], p++);
+ /* fall through */
case 4:
ret |= __get_user_inatomic(temp.v[4], p++);
ret |= __get_user_inatomic(temp.v[5], p++);
+ /* fall through */
case 2:
ret |= __get_user_inatomic(temp.v[6], p++);
ret |= __get_user_inatomic(temp.v[7], p++);
@@ -259,9 +261,11 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg,
ret |= __put_user_inatomic(data.v[1], p++);
ret |= __put_user_inatomic(data.v[2], p++);
ret |= __put_user_inatomic(data.v[3], p++);
+ /* fall through */
case 4:
ret |= __put_user_inatomic(data.v[4], p++);
ret |= __put_user_inatomic(data.v[5], p++);
+ /* fall through */
case 2:
ret |= __put_user_inatomic(data.v[6], p++);
ret |= __put_user_inatomic(data.v[7], p++);
diff --git a/arch/powerpc/kernel/dma-common.c b/arch/powerpc/kernel/dma-common.c
new file mode 100644
index 000000000000..dc7ef6b17b69
--- /dev/null
+++ b/arch/powerpc/kernel/dma-common.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Contains common dma routines for all powerpc platforms.
+ *
+ * Copyright (C) 2019 Shawn Anastasio.
+ */
+
+#include <linux/mm.h>
+#include <linux/dma-noncoherent.h>
+
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
+{
+ if (!dev_is_dma_coherent(dev))
+ return pgprot_noncached(prot);
+ return prot;
+}
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 85fdb6d879f1..54fab22c9a43 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -597,6 +597,14 @@ ppc_clone:
stw r0,_TRAP(r1) /* register set saved */
b sys_clone
+ .globl ppc_clone3
+ppc_clone3:
+ SAVE_NVGPRS(r1)
+ lwz r0,_TRAP(r1)
+ rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
+ stw r0,_TRAP(r1) /* register set saved */
+ b sys_clone3
+
.globl ppc_swapcontext
ppc_swapcontext:
SAVE_NVGPRS(r1)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d9105fcf4021..0a0b5310f54a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -487,6 +487,11 @@ _GLOBAL(ppc_clone)
bl sys_clone
b .Lsyscall_exit
+_GLOBAL(ppc_clone3)
+ bl save_nvgprs
+ bl sys_clone3
+ b .Lsyscall_exit
+
_GLOBAL(ppc32_swapcontext)
bl save_nvgprs
bl compat_sys_swapcontext
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index eee5bef736c8..6ba3cc2ef8ab 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1531,7 +1531,7 @@ EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
*
* Call convention:
*
- * syscall register convention is in Documentation/powerpc/syscall64-abi.txt
+ * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
*
* For hypercalls, the register convention is as follows:
* r0 volatile
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index f50b708d6d77..98600b276f76 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1198,6 +1198,9 @@ SYSCALL_DEFINE0(rt_sigreturn)
goto bad;
if (MSR_TM_ACTIVE(msr_hi<<32)) {
+ /* Trying to start TM on non TM system */
+ if (!cpu_has_feature(CPU_FTR_TM))
+ goto bad;
/* We only recheckpoint on return if we're
* transaction.
*/
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 2f80e270c7b0..117515564ec7 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -771,6 +771,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (MSR_TM_ACTIVE(msr)) {
/* We recheckpoint on return. */
struct ucontext __user *uc_transact;
+
+ /* Trying to start TM on non TM system */
+ if (!cpu_has_feature(CPU_FTR_TM))
+ goto badframe;
+
if (__get_user(uc_transact, &uc->uc_link))
goto badframe;
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 3331749aab20..43f736ed47f2 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -516,4 +516,4 @@
432 common fsmount sys_fsmount
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
-# 435 reserved for clone3
+435 nospu clone3 ppc_clone3
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 653936177857..18f244aad7aa 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -239,6 +239,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
case 2:
case 6:
pte->may_write = true;
+ /* fall through */
case 3:
case 5:
case 7:
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index ec1804f822af..cde3f5a4b3e4 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3569,9 +3569,18 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
if (kvmhv_on_pseries()) {
+ /*
+ * We need to save and restore the guest visible part of the
+ * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
+ * doesn't do this for us. Note only required if pseries since
+ * this is done in kvmhv_load_hv_regs_and_go() below otherwise.
+ */
+ unsigned long host_psscr;
/* call our hypervisor to load up HV regs and go */
struct hv_guest_state hvregs;
+ host_psscr = mfspr(SPRN_PSSCR_PR);
+ mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
kvmhv_save_hv_regs(vcpu, &hvregs);
hvregs.lpcr = lpcr;
vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
@@ -3590,6 +3599,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
+ vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
+ mtspr(SPRN_PSSCR_PR, host_psscr);
/* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
@@ -3654,6 +3665,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
vcpu->arch.vpa.dirty = 1;
save_pmu = lp->pmcregs_in_use;
}
+ /* Must save pmu if this guest is capable of running nested guests */
+ save_pmu |= nesting_enabled(vcpu->kvm);
kvmhv_save_guest_pmu(vcpu, save_pmu);
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 6ca0d7376a9f..e3ba67095895 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -1986,10 +1986,8 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
xive->single_escalation = xive_native_has_single_escalation();
- if (ret) {
- kfree(xive);
+ if (ret)
return ret;
- }
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 5596c8ec221a..a998823f68a3 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -1090,9 +1090,9 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
xive->ops = &kvmppc_xive_native_ops;
if (ret)
- kfree(xive);
+ return ret;
- return ret;
+ return 0;
}
/*
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index 9a5963e07a82..b8ad14bb1170 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1899,11 +1899,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
*
* For guests on platforms before POWER9, we clamp the it limit to 1G
* to avoid some funky things such as RTAS bugs etc...
+ *
+ * On POWER9 we limit to 1TB in case the host erroneously told us that
+ * the RMA was >1TB. Effective address bits 0:23 are treated as zero
+ * (meaning the access is aliased to zero i.e. addr = addr % 1TB)
+ * for virtual real mode addressing and so it doesn't make sense to
+ * have an area larger than 1TB as it can't be addressed.
*/
if (!early_cpu_has_feature(CPU_FTR_HVMODE)) {
ppc64_rma_size = first_memblock_size;
if (!early_cpu_has_feature(CPU_FTR_ARCH_300))
ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000);
+ else
+ ppc64_rma_size = min_t(u64, ppc64_rma_size,
+ 1UL << SID_SHIFT_1T);
/* Finally limit subsequent allocations */
memblock_set_current_limit(ppc64_rma_size);
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index 0d62be3cba47..74f4555a62ba 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -21,7 +21,7 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
}
-static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
{
pmd_t *pmd;
unsigned long k_cur, k_next;
@@ -35,7 +35,10 @@ static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_
if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
continue;
- new = pte_alloc_one_kernel(&init_mm);
+ if (slab_is_available())
+ new = pte_alloc_one_kernel(&init_mm);
+ else
+ new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
if (!new)
return -ENOMEM;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 9259337d7374..9191a66b3bc5 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -239,7 +239,7 @@ void __init paging_init(void)
#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
- ((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT);
+ 1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
index c8ec670ee924..a5ac371a3f06 100644
--- a/arch/powerpc/platforms/pseries/papr_scm.c
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/libnvdimm.h>
#include <linux/platform_device.h>
+#include <linux/delay.h>
#include <asm/plpar_wrappers.h>
@@ -43,8 +44,9 @@ struct papr_scm_priv {
static int drc_pmem_bind(struct papr_scm_priv *p)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
- uint64_t rc, token;
uint64_t saved = 0;
+ uint64_t token;
+ int64_t rc;
/*
* When the hypervisor cannot map all the requested memory in a single
@@ -64,6 +66,10 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
} while (rc == H_BUSY);
if (rc) {
+ /* H_OVERLAP needs a separate error path */
+ if (rc == H_OVERLAP)
+ return -EBUSY;
+
dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
return -ENXIO;
}
@@ -78,22 +84,36 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
static int drc_pmem_unbind(struct papr_scm_priv *p)
{
unsigned long ret[PLPAR_HCALL_BUFSIZE];
- uint64_t rc, token;
+ uint64_t token = 0;
+ int64_t rc;
- token = 0;
+ dev_dbg(&p->pdev->dev, "unbind drc %x\n", p->drc_index);
- /* NB: unbind has the same retry requirements mentioned above */
+ /* NB: unbind has the same retry requirements as drc_pmem_bind() */
do {
- rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
- p->bound_addr, p->blocks, token);
+
+ /* Unbind of all SCM resources associated with drcIndex */
+ rc = plpar_hcall(H_SCM_UNBIND_ALL, ret, H_UNBIND_SCOPE_DRC,
+ p->drc_index, token);
token = ret[0];
- cond_resched();
+
+ /* Check if we are stalled for some time */
+ if (H_IS_LONG_BUSY(rc)) {
+ msleep(get_longbusy_msecs(rc));
+ rc = H_BUSY;
+ } else if (rc == H_BUSY) {
+ cond_resched();
+ }
+
} while (rc == H_BUSY);
if (rc)
dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
+ else
+ dev_dbg(&p->pdev->dev, "unbind drc %x complete\n",
+ p->drc_index);
- return !!rc;
+ return rc == H_SUCCESS ? 0 : -ENXIO;
}
static int papr_scm_meta_get(struct papr_scm_priv *p,
@@ -255,12 +275,32 @@ static const struct attribute_group *papr_scm_dimm_groups[] = {
NULL,
};
+static inline int papr_scm_node(int node)
+{
+ int min_dist = INT_MAX, dist;
+ int nid, min_node;
+
+ if ((node == NUMA_NO_NODE) || node_online(node))
+ return node;
+
+ min_node = first_online_node;
+ for_each_online_node(nid) {
+ dist = node_distance(node, nid);
+ if (dist < min_dist) {
+ min_dist = dist;
+ min_node = nid;
+ }
+ }
+ return min_node;
+}
+
static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
{
struct device *dev = &p->pdev->dev;
struct nd_mapping_desc mapping;
struct nd_region_desc ndr_desc;
unsigned long dimm_flags;
+ int target_nid, online_nid;
p->bus_desc.ndctl = papr_scm_ndctl;
p->bus_desc.module = THIS_MODULE;
@@ -299,8 +339,10 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.attr_groups = region_attr_groups;
- ndr_desc.numa_node = dev_to_node(&p->pdev->dev);
- ndr_desc.target_node = ndr_desc.numa_node;
+ target_nid = dev_to_node(&p->pdev->dev);
+ online_nid = papr_scm_node(target_nid);
+ ndr_desc.numa_node = online_nid;
+ ndr_desc.target_node = target_nid;
ndr_desc.res = &p->res;
ndr_desc.of_node = p->dn;
ndr_desc.provider_data = p;
@@ -318,6 +360,9 @@ static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
ndr_desc.res, p->dn);
goto err;
}
+ if (target_nid != online_nid)
+ dev_info(dev, "Region registered with target node %d and online node %d",
+ target_nid, online_nid);
return 0;
@@ -389,6 +434,14 @@ static int papr_scm_probe(struct platform_device *pdev)
/* request the hypervisor to bind this region to somewhere in memory */
rc = drc_pmem_bind(p);
+
+ /* If phyp says drc memory still bound then force unbound and retry */
+ if (rc == -EBUSY) {
+ dev_warn(&pdev->dev, "Retrying bind after unbinding\n");
+ drc_pmem_unbind(p);
+ rc = drc_pmem_bind(p);
+ }
+
if (rc)
goto err;
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 082c7e1c20f0..1cdb39575eae 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -479,7 +479,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
* Now go through the entire mask until we find a valid
* target.
*/
- for (;;) {
+ do {
/*
* We re-check online as the fallback case passes us
* an untested affinity mask
@@ -487,12 +487,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
if (cpu_online(cpu) && xive_try_pick_target(cpu))
return cpu;
cpu = cpumask_next(cpu, mask);
- if (cpu == first)
- break;
/* Wrap around */
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(mask);
- }
+ } while (cpu != first);
+
return -1;
}
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index 40983491b95f..42b5ec223100 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -21,7 +21,6 @@
cpus {
#address-cells = <1>;
#size-cells = <0>;
- timebase-frequency = <1000000>;
cpu0: cpu@0 {
compatible = "sifive,e51", "sifive,rocket0", "riscv";
device_type = "cpu";
@@ -217,5 +216,20 @@
#size-cells = <0>;
status = "disabled";
};
+ eth0: ethernet@10090000 {
+ compatible = "sifive,fu540-c000-gem";
+ interrupt-parent = <&plic0>;
+ interrupts = <53>;
+ reg = <0x0 0x10090000 0x0 0x2000
+ 0x0 0x100a0000 0x0 0x1000>;
+ local-mac-address = [00 00 00 00 00 00];
+ clock-names = "pclk", "hclk";
+ clocks = <&prci PRCI_CLK_GEMGXLPLL>,
+ <&prci PRCI_CLK_GEMGXLPLL>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
};
};
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 0b55c53c08c7..93d68cbd64fe 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -76,3 +76,12 @@
disable-wp;
};
};
+
+&eth0 {
+ status = "okay";
+ phy-mode = "gmii";
+ phy-handle = <&phy0>;
+ phy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+};
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index b7b749b18853..93205c0bf71d 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -34,6 +34,7 @@ CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
@@ -53,6 +54,8 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
+CONFIG_SPI=y
+CONFIG_SPI_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
@@ -66,8 +69,9 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
CONFIG_VIRTIO_MMIO=y
-CONFIG_SPI_SIFIVE=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
@@ -83,8 +87,4 @@ CONFIG_ROOT_NFS=y
CONFIG_CRYPTO_USER_API_HASH=y
CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
-CONFIG_SPI=y
-CONFIG_MMC_SPI=y
-CONFIG_MMC=y
-CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_RCU_TRACE is not set
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 1efaeddf1e4b..16970f246860 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -22,6 +22,7 @@ generic-y += kvm_para.h
generic-y += local.h
generic-y += local64.h
generic-y += mm-arch-hooks.h
+generic-y += msi.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += sections.h
diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
index 62716653554b..d86cb17bbabe 100644
--- a/arch/riscv/include/uapi/asm/auxvec.h
+++ b/arch/riscv/include/uapi/asm/auxvec.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2015 Regents of the University of California
diff --git a/arch/riscv/include/uapi/asm/bitsperlong.h b/arch/riscv/include/uapi/asm/bitsperlong.h
index 0b9b58b57ff6..7d0b32e3b701 100644
--- a/arch/riscv/include/uapi/asm/bitsperlong.h
+++ b/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2015 Regents of the University of California
diff --git a/arch/riscv/include/uapi/asm/byteorder.h b/arch/riscv/include/uapi/asm/byteorder.h
index 1920debc09c0..f671e16bf6af 100644
--- a/arch/riscv/include/uapi/asm/byteorder.h
+++ b/arch/riscv/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2015 Regents of the University of California
diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h
index 7d786145183b..4e7646077056 100644
--- a/arch/riscv/include/uapi/asm/hwcap.h
+++ b/arch/riscv/include/uapi/asm/hwcap.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copied from arch/arm64/include/asm/hwcap.h
*
diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h
index 92d8f7cd8f84..882547f6bd5c 100644
--- a/arch/riscv/include/uapi/asm/ptrace.h
+++ b/arch/riscv/include/uapi/asm/ptrace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (C) 2012 Regents of the University of California
*/
diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h
index 053f809e52ce..84f2dfcfdbce 100644
--- a/arch/riscv/include/uapi/asm/sigcontext.h
+++ b/arch/riscv/include/uapi/asm/sigcontext.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (C) 2012 Regents of the University of California
*/
diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h
index b58e00cee2ec..411dd7b52ed6 100644
--- a/arch/riscv/include/uapi/asm/ucontext.h
+++ b/arch/riscv/include/uapi/asm/ucontext.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Copyright (C) 2012 ARM Ltd.
* Copyright (C) 2017 SiFive, Inc.
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
index 0e2eeeb1fd27..13ce76cc5aff 100644
--- a/arch/riscv/include/uapi/asm/unistd.h
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -18,6 +18,7 @@
#ifdef __LP64__
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SET_GET_RLIMIT
+#define __ARCH_WANT_SYS_CLONE3
#endif /* __LP64__ */
#include <asm-generic/unistd.h>
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index f1d6ffe43e42..49a5852fd07d 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -37,7 +37,7 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
# these symbols in the kernel code rather than hand-coded addresses.
SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
- -Wl,--hash-style=both
+ -Wl,--build-id -Wl,--hash-style=both
$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
$(call if_changed,vdsold)
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index 7cba96e7587b..4cf0bddb7d92 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -36,7 +36,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
-obj-y += ctype.o text_dma.o
+obj-y += version.o ctype.o text_dma.o
obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h
index ad57c2205a71..1c3b2b257637 100644
--- a/arch/s390/boot/boot.h
+++ b/arch/s390/boot/boot.h
@@ -8,10 +8,12 @@ void store_ipl_parmblock(void);
void setup_boot_command_line(void);
void parse_boot_command_line(void);
void setup_memory_end(void);
+void verify_facilities(void);
void print_missing_facilities(void);
unsigned long get_random_base(unsigned long safe_addr);
extern int kaslr_enabled;
+extern const char kernel_version[];
unsigned long read_ipl_report(unsigned long safe_offset);
diff --git a/arch/s390/boot/head.S b/arch/s390/boot/head.S
index 028aab03a9e7..2087bed6e60f 100644
--- a/arch/s390/boot/head.S
+++ b/arch/s390/boot/head.S
@@ -361,6 +361,7 @@ ENTRY(startup_kdump)
.quad 0 # INITRD_SIZE
.quad 0 # OLDMEM_BASE
.quad 0 # OLDMEM_SIZE
+ .quad kernel_version # points to kernel version string
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 3bdd8132e56b..c34a6387ce38 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -7,6 +7,7 @@
#include <asm/timex.h>
#include <asm/sclp.h>
#include "compressed/decompressor.h"
+#include "boot.h"
#define PRNG_MODE_TDES 1
#define PRNG_MODE_SHA512 2
diff --git a/arch/s390/boot/version.c b/arch/s390/boot/version.c
new file mode 100644
index 000000000000..d32e58bdda6a
--- /dev/null
+++ b/arch/s390/boot/version.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <generated/utsrelease.h>
+#include <generated/compile.h>
+#include "boot.h"
+
+const char kernel_version[] = UTS_RELEASE
+ " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") " UTS_VERSION;
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index e26d4413d34c..74e78ec5beb6 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -3,6 +3,7 @@ CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@@ -18,55 +19,71 @@ CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_TUNE_ZEC12=y
+CONFIG_NR_CPUS=512
+CONFIG_NUMA=y
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
+CONFIG_CHSC_SCH=y
+CONFIG_VFIO_CCW=m
+CONFIG_VFIO_AP=m
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
+CONFIG_REFCOUNT_FULL=y
+CONFIG_LOCK_EVENT_COUNTS=y
CONFIG_MODULES=y
CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
-CONFIG_BLK_WBT_SQ=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
CONFIG_MINIX_SUBPARTITION=y
CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_CFQ_GROUP_IOSCHED=y
-CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=512
-CONFIG_NUMA=y
-CONFIG_PREEMPT=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
-CONFIG_KEXEC_VERIFY_SIG=y
-CONFIG_EXPOLINE=y
-CONFIG_EXPOLINE_AUTO=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -82,17 +99,8 @@ CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_PCI=y
-CONFIG_PCI_DEBUG=y
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_S390=y
-CONFIG_CHSC_SCH=y
-CONFIG_VFIO_AP=m
-CONFIG_VFIO_CCW=m
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
+CONFIG_PERCPU_STATS=y
+CONFIG_GUP_BENCHMARK=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -121,9 +129,6 @@ CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_TCP_CONG_ADVANCED=y
@@ -139,10 +144,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_SIT=m
CONFIG_IPV6_GRE=m
@@ -264,11 +265,8 @@ CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=y
-CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=y
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -287,10 +285,7 @@ CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=y
-CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -309,7 +304,7 @@ CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_RDS=m
CONFIG_RDS_RDMA=m
CONFIG_RDS_TCP=m
@@ -375,9 +370,11 @@ CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
+CONFIG_PCI=y
+CONFIG_PCI_DEBUG=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
CONFIG_DEVTMPFS=y
-CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
@@ -395,7 +392,6 @@ CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
@@ -415,17 +411,19 @@ CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
-CONFIG_SCSI_OSD_INITIATOR=m
-CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
+CONFIG_MD_CLUSTER=m
+CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=m
+CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
@@ -445,23 +443,78 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
+# CONFIG_MLXFW is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m
CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
@@ -473,10 +526,13 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_NULL_TTY=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+CONFIG_PPS=m
+# CONFIG_PTP_1588_CLOCK is not set
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
@@ -498,8 +554,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
-CONFIG_S390_AP_IOMMU=y
CONFIG_S390_CCW_IOMMU=y
+CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -519,6 +575,7 @@ CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
+CONFIG_BTRFS_ASSERT=y
CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
@@ -552,8 +609,10 @@ CONFIG_ECRYPT_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFS_V3_ACL=y
@@ -564,7 +623,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
@@ -580,19 +638,112 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
CONFIG_DLM=m
+CONFIG_UNICODE=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
+CONFIG_ENCRYPTED_KEYS=m
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SELINUX_BOOTPARAM=y
+CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
+CONFIG_IMA=y
+CONFIG_IMA_DEFAULT_HASH_SHA256=y
+CONFIG_IMA_WRITE_POLICY=y
+CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_USER=m
+# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECRDSA=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
+CONFIG_CRYPTO_CFB=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_VMAC=m
+CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_XXHASH=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_RMD128=m
+CONFIG_CRYPTO_RMD160=m
+CONFIG_CRYPTO_RMD256=m
+CONFIG_CRYPTO_RMD320=m
+CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_AES_TI=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
+CONFIG_CRYPTO_LZ4=m
+CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
+CONFIG_CRYPTO_ANSI_CPRNG=m
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_STATS=y
+CONFIG_ZCRYPT=m
+CONFIG_PKEY=m
+CONFIG_CRYPTO_PAES_S390=m
+CONFIG_CRYPTO_SHA1_S390=m
+CONFIG_CRYPTO_SHA256_S390=m
+CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_DES_S390=m
+CONFIG_CRYPTO_AES_S390=m
+CONFIG_CRYPTO_GHASH_S390=m
+CONFIG_CRYPTO_CRC32_S390=y
+CONFIG_CORDIC=m
+CONFIG_CRC32_SELFTEST=y
+CONFIG_CRC4=m
+CONFIG_CRC7=m
+CONFIG_CRC8=m
+CONFIG_RANDOM32_SELFTEST=y
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
+CONFIG_DMA_API_DEBUG=y
+CONFIG_STRING_SELFTEST=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_GDB_SCRIPTS=y
CONFIG_FRAME_WARN=1024
-CONFIG_READABLE_ASM=y
CONFIG_UNUSED_SYMBOLS=y
CONFIG_HEADERS_INSTALL=y
CONFIG_HEADERS_CHECK=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_PAGE_OWNER=y
CONFIG_DEBUG_RODATA_TEST=y
CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_SELFTEST=y
@@ -645,7 +796,6 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
-CONFIG_DMA_API_DEBUG=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_TEST_SORT=y
@@ -657,85 +807,3 @@ CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
-CONFIG_PERSISTENT_KEYRINGS=y
-CONFIG_BIG_KEYS=y
-CONFIG_ENCRYPTED_KEYS=m
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_FORTIFY_SOURCE=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
-CONFIG_SECURITY_SELINUX_DISABLE=y
-CONFIG_INTEGRITY_SIGNATURE=y
-CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
-CONFIG_IMA=y
-CONFIG_IMA_DEFAULT_HASH_SHA256=y
-CONFIG_IMA_WRITE_POLICY=y
-CONFIG_IMA_APPRAISE=y
-CONFIG_CRYPTO_DH=m
-CONFIG_CRYPTO_ECDH=m
-CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
-CONFIG_CRYPTO_PCRYPT=m
-CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_CHACHA20POLY1305=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_KEYWRAP=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_VMAC=m
-CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_RMD128=m
-CONFIG_CRYPTO_RMD160=m
-CONFIG_CRYPTO_RMD256=m
-CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_SHA3=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_AES_TI=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SALSA20=m
-CONFIG_CRYPTO_SEED=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_842=m
-CONFIG_CRYPTO_LZ4=m
-CONFIG_CRYPTO_LZ4HC=m
-CONFIG_CRYPTO_ANSI_CPRNG=m
-CONFIG_CRYPTO_USER_API_HASH=m
-CONFIG_CRYPTO_USER_API_SKCIPHER=m
-CONFIG_CRYPTO_USER_API_RNG=m
-CONFIG_CRYPTO_USER_API_AEAD=m
-CONFIG_ZCRYPT=m
-CONFIG_PKEY=m
-CONFIG_CRYPTO_PAES_S390=m
-CONFIG_CRYPTO_SHA1_S390=m
-CONFIG_CRYPTO_SHA256_S390=m
-CONFIG_CRYPTO_SHA512_S390=m
-CONFIG_CRYPTO_DES_S390=m
-CONFIG_CRYPTO_AES_S390=m
-CONFIG_CRYPTO_GHASH_S390=m
-CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_PKCS7_MESSAGE_PARSER=y
-CONFIG_SYSTEM_TRUSTED_KEYRING=y
-CONFIG_CRC7=m
-CONFIG_CRC8=m
-CONFIG_RANDOM32_SELFTEST=y
-CONFIG_CORDIC=m
-CONFIG_CMM=m
-CONFIG_APPLDATA_BASE=y
-CONFIG_KVM=m
-CONFIG_KVM_S390_UCONTROL=y
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index e4bc40073003..68d3ca83302b 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -12,30 +12,51 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
-# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
CONFIG_CFS_BANDWIDTH=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_CGROUP_PIDS=y
+CONFIG_CGROUP_RDMA=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_HUGETLB=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
CONFIG_CGROUP_PERF=y
+CONFIG_CGROUP_BPF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_TUNE_ZEC12=y
+CONFIG_NR_CPUS=512
+CONFIG_NUMA=y
+# CONFIG_NUMA_EMU is not set
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
+CONFIG_CHSC_SCH=y
+CONFIG_VFIO_CCW=m
+CONFIG_VFIO_AP=m
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
+CONFIG_APPLDATA_BASE=y
+CONFIG_KVM=m
+CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
CONFIG_OPROFILE=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
@@ -47,27 +68,18 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG=y
CONFIG_MODULE_SIG_SHA256=y
-CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
-CONFIG_BLK_WBT_SQ=y
+CONFIG_BLK_CGROUP_IOLATENCY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y
CONFIG_MINIX_SUBPARTITION=y
CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
-CONFIG_CFQ_GROUP_IOSCHED=y
-CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=512
-CONFIG_NUMA=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
-CONFIG_KEXEC_VERIFY_SIG=y
-CONFIG_EXPOLINE=y
-CONFIG_EXPOLINE_AUTO=y
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+CONFIG_BINFMT_MISC=m
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
@@ -81,16 +93,8 @@ CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI=y
-CONFIG_HOTPLUG_PCI_S390=y
-CONFIG_CHSC_SCH=y
-CONFIG_VFIO_AP=m
-CONFIG_VFIO_CCW=m
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
-CONFIG_PM_DEBUG=y
+CONFIG_PERCPU_STATS=y
+CONFIG_GUP_BENCHMARK=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -119,9 +123,6 @@ CONFIG_NET_IPVTI=m
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
-CONFIG_INET_XFRM_MODE_TRANSPORT=m
-CONFIG_INET_XFRM_MODE_TUNNEL=m
-CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_INET_DIAG=m
CONFIG_INET_UDP_DIAG=m
CONFIG_TCP_CONG_ADVANCED=y
@@ -137,10 +138,6 @@ CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_MIP6=m
-CONFIG_INET6_XFRM_MODE_TRANSPORT=m
-CONFIG_INET6_XFRM_MODE_TUNNEL=m
-CONFIG_INET6_XFRM_MODE_BEET=m
-CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
CONFIG_IPV6_VTI=m
CONFIG_IPV6_SIT=m
CONFIG_IPV6_GRE=m
@@ -262,11 +259,8 @@ CONFIG_IP_VS_SED=m
CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
-CONFIG_NF_CONNTRACK_IPV4=m
CONFIG_NF_TABLES_IPV4=y
-CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=y
-CONFIG_NFT_CHAIN_NAT_IPV4=m
CONFIG_IP_NF_IPTABLES=m
CONFIG_IP_NF_MATCH_AH=m
CONFIG_IP_NF_MATCH_ECN=m
@@ -285,10 +279,7 @@ CONFIG_IP_NF_SECURITY=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_NF_CONNTRACK_IPV6=m
CONFIG_NF_TABLES_IPV6=y
-CONFIG_NFT_CHAIN_ROUTE_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV6=m
CONFIG_IP6_NF_IPTABLES=m
CONFIG_IP6_NF_MATCH_AH=m
CONFIG_IP6_NF_MATCH_EUI64=m
@@ -307,7 +298,7 @@ CONFIG_IP6_NF_RAW=m
CONFIG_IP6_NF_SECURITY=m
CONFIG_IP6_NF_NAT=m
CONFIG_IP6_NF_TARGET_MASQUERADE=m
-CONFIG_NF_TABLES_BRIDGE=y
+CONFIG_NF_TABLES_BRIDGE=m
CONFIG_RDS=m
CONFIG_RDS_RDMA=m
CONFIG_RDS_TCP=m
@@ -372,9 +363,11 @@ CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m
+CONFIG_PCI=y
+CONFIG_HOTPLUG_PCI=y
+CONFIG_HOTPLUG_PCI_S390=y
+CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
-CONFIG_DMA_CMA=y
-CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m
@@ -383,6 +376,7 @@ CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768
+# CONFIG_BLK_DEV_XPRAM is not set
CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_BLK_DEV_NVME=m
@@ -392,7 +386,6 @@ CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=m
-CONFIG_CHR_DEV_OSST=m
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=m
@@ -412,17 +405,19 @@ CONFIG_SCSI_DH_RDAC=m
CONFIG_SCSI_DH_HP_SW=m
CONFIG_SCSI_DH_EMC=m
CONFIG_SCSI_DH_ALUA=m
-CONFIG_SCSI_OSD_INITIATOR=m
-CONFIG_SCSI_OSD_ULD=m
CONFIG_MD=y
CONFIG_BLK_DEV_MD=y
CONFIG_MD_LINEAR=m
CONFIG_MD_MULTIPATH=m
CONFIG_MD_FAULTY=m
+CONFIG_MD_CLUSTER=m
+CONFIG_BCACHE=m
CONFIG_BLK_DEV_DM=m
+CONFIG_DM_UNSTRIPED=m
CONFIG_DM_CRYPT=m
CONFIG_DM_SNAPSHOT=m
CONFIG_DM_THIN_PROVISIONING=m
+CONFIG_DM_WRITECACHE=m
CONFIG_DM_MIRROR=m
CONFIG_DM_LOG_USERSPACE=m
CONFIG_DM_RAID=m
@@ -435,6 +430,7 @@ CONFIG_DM_UEVENT=y
CONFIG_DM_FLAKEY=m
CONFIG_DM_VERITY=m
CONFIG_DM_SWITCH=m
+CONFIG_DM_INTEGRITY=m
CONFIG_NETDEVICES=y
CONFIG_BONDING=m
CONFIG_DUMMY=m
@@ -442,23 +438,78 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
CONFIG_NLMON=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
+# CONFIG_MLXFW is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=m
+CONFIG_PPP_MULTILINK=y
CONFIG_PPPOE=m
CONFIG_PPTP=m
CONFIG_PPPOL2TP=m
@@ -470,17 +521,21 @@ CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set
CONFIG_LEGACY_PTY_COUNT=0
+CONFIG_NULL_TTY=m
CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_PTP_1588_CLOCK is not set
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
CONFIG_DRM=y
CONFIG_DRM_VIRTIO_GPU=y
+# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
@@ -495,8 +550,8 @@ CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
-CONFIG_S390_AP_IOMMU=y
CONFIG_S390_CCW_IOMMU=y
+CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
@@ -546,8 +601,10 @@ CONFIG_ECRYPT_FS=m
CONFIG_CRAMFS=m
CONFIG_SQUASHFS=m
CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
+CONFIG_SQUASHFS_ZSTD=y
CONFIG_ROMFS_FS=m
CONFIG_NFS_FS=m
CONFIG_NFS_V3_ACL=y
@@ -558,7 +615,6 @@ CONFIG_NFSD_V3_ACL=y
CONFIG_NFSD_V4=y
CONFIG_NFSD_V4_SECURITY_LABEL=y
CONFIG_CIFS=m
-CONFIG_CIFS_STATS=y
CONFIG_CIFS_STATS2=y
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_UPCALL=y
@@ -574,31 +630,7 @@ CONFIG_NLS_ISO8859_1=m
CONFIG_NLS_ISO8859_15=m
CONFIG_NLS_UTF8=m
CONFIG_DLM=m
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_DWARF4=y
-CONFIG_GDB_SCRIPTS=y
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_FRAME_WARN=1024
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_RCU_TORTURE_TEST=m
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_LATENCYTOP=y
-CONFIG_SCHED_TRACER=y
-CONFIG_FTRACE_SYSCALLS=y
-CONFIG_STACK_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_FUNCTION_PROFILER=y
-CONFIG_HIST_TRIGGERS=y
-CONFIG_LKDTM=m
-CONFIG_PERCPU_TEST=m
-CONFIG_ATOMIC64_SELFTEST=y
-CONFIG_TEST_BPF=m
-CONFIG_BUG_ON_DATA_CORRUPTION=y
-CONFIG_S390_PTDUMP=y
+CONFIG_UNICODE=y
CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
@@ -606,7 +638,6 @@ CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
-CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_INTEGRITY_SIGNATURE=y
CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
@@ -615,31 +646,42 @@ CONFIG_IMA_DEFAULT_HASH_SHA256=y
CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
CONFIG_CRYPTO_FIPS=y
-CONFIG_CRYPTO_DH=m
-CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
+CONFIG_CRYPTO_ECRDSA=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
+CONFIG_CRYPTO_AEGIS128=m
+CONFIG_CRYPTO_AEGIS128L=m
+CONFIG_CRYPTO_AEGIS256=m
+CONFIG_CRYPTO_MORUS640=m
+CONFIG_CRYPTO_MORUS1280=m
+CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m
+CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
+CONFIG_CRYPTO_XXHASH=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
-CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
+CONFIG_CRYPTO_SM3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_ARC4=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m
CONFIG_CRYPTO_CAST5=m
@@ -649,16 +691,19 @@ CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_SM4=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
+CONFIG_CRYPTO_ZSTD=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
+CONFIG_CRYPTO_STATS=y
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
@@ -669,12 +714,34 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
+CONFIG_CORDIC=m
+CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
-CONFIG_CORDIC=m
-CONFIG_CMM=m
-CONFIG_APPLDATA_BASE=y
-CONFIG_KVM=m
-CONFIG_KVM_S390_UCONTROL=y
-CONFIG_VHOST_NET=m
-CONFIG_VHOST_VSOCK=m
+CONFIG_DMA_CMA=y
+CONFIG_CMA_SIZE_MBYTES=0
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_FRAME_WARN=1024
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_RCU_TORTURE_TEST=m
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
+CONFIG_LKDTM=m
+CONFIG_PERCPU_TEST=m
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_S390_PTDUMP=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index d92bab844b73..be09a208b608 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,27 +1,33 @@
# CONFIG_SWAP is not set
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+# CONFIG_CPU_ISOLATION is not set
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
# CONFIG_COMPAT_BRK is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_IBM_PARTITION=y
-CONFIG_DEFAULT_DEADLINE=y
CONFIG_TUNE_ZEC12=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
-# CONFIG_HOTPLUG_CPU is not set
CONFIG_HZ_100=y
# CONFIG_ARCH_RANDOM is not set
-# CONFIG_COMPACTION is not set
-# CONFIG_MIGRATION is not set
-# CONFIG_BOUNCE is not set
-# CONFIG_CHECK_STACK is not set
+# CONFIG_RELOCATABLE is not set
# CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set
CONFIG_CRASH_DUMP=y
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
# CONFIG_SECCOMP is not set
+# CONFIG_PFAULT is not set
+# CONFIG_S390_HYPFS_FS is not set
+# CONFIG_VIRTUALIZATION is not set
+# CONFIG_S390_GUEST is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_IBM_PARTITION=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_COMPACTION is not set
+# CONFIG_MIGRATION is not set
+# CONFIG_BOUNCE is not set
CONFIG_NET=y
# CONFIG_IUCV is not set
CONFIG_DEVTMPFS=y
@@ -43,7 +49,6 @@ CONFIG_ZFCP=y
# CONFIG_HVC_IUCV is not set
# CONFIG_HW_RANDOM_S390 is not set
CONFIG_RAW_DRIVER=y
-# CONFIG_SCLP_ASYNC is not set
# CONFIG_HMC_DRV is not set
# CONFIG_S390_TAPE is not set
# CONFIG_VMCP is not set
@@ -56,6 +61,7 @@ CONFIG_RAW_DRIVER=y
CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
+# CONFIG_DIMLIB is not set
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
@@ -64,7 +70,4 @@ CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_RCU_CPU_STALL_TIMEOUT=60
# CONFIG_FTRACE is not set
-# CONFIG_PFAULT is not set
-# CONFIG_S390_HYPFS_FS is not set
-# CONFIG_VIRTUALIZATION is not set
-# CONFIG_S390_GUEST is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 42f2375c203e..e1fcc03159ef 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -118,7 +118,7 @@ do { \
return PTR_ERR(rc); \
} while(0)
-static int hpyfs_vm_create_guest(struct dentry *systems_dir,
+static int hypfs_vm_create_guest(struct dentry *systems_dir,
struct diag2fc_data *data)
{
char guest_name[NAME_LEN + 1] = {};
@@ -219,7 +219,7 @@ int hypfs_vm_create_files(struct dentry *root)
}
for (i = 0; i < count; i++) {
- rc = hpyfs_vm_create_guest(dir, &(data[i]));
+ rc = hypfs_vm_create_guest(dir, &(data[i]));
if (rc)
goto failed;
}
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 9900d655014c..b8833ac983fa 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -35,6 +35,7 @@
#include <linux/typecheck.h>
#include <linux/compiler.h>
+#include <linux/types.h>
#include <asm/atomic_ops.h>
#include <asm/barrier.h>
@@ -55,7 +56,7 @@ __bitops_byte(unsigned long nr, volatile unsigned long *ptr)
return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
}
-static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
@@ -76,7 +77,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
__atomic64_or(mask, (long *)addr);
}
-static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
@@ -97,7 +98,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
__atomic64_and(mask, (long *)addr);
}
-static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline void arch_change_bit(unsigned long nr,
+ volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long mask;
@@ -118,8 +120,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
__atomic64_xor(mask, (long *)addr);
}
-static inline int
-test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline bool arch_test_and_set_bit(unsigned long nr,
+ volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long old, mask;
@@ -129,8 +131,8 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
return (old & mask) != 0;
}
-static inline int
-test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline bool arch_test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long old, mask;
@@ -140,8 +142,8 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
return (old & ~mask) != 0;
}
-static inline int
-test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline bool arch_test_and_change_bit(unsigned long nr,
+ volatile unsigned long *ptr)
{
unsigned long *addr = __bitops_word(nr, ptr);
unsigned long old, mask;
@@ -151,30 +153,31 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
return (old & mask) != 0;
}
-static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline void arch___set_bit(unsigned long nr, volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
*addr |= 1 << (nr & 7);
}
-static inline void
-__clear_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline void arch___clear_bit(unsigned long nr,
+ volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
*addr &= ~(1 << (nr & 7));
}
-static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline void arch___change_bit(unsigned long nr,
+ volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
*addr ^= 1 << (nr & 7);
}
-static inline int
-__test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline bool arch___test_and_set_bit(unsigned long nr,
+ volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
@@ -184,8 +187,8 @@ __test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
return (ch >> (nr & 7)) & 1;
}
-static inline int
-__test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline bool arch___test_and_clear_bit(unsigned long nr,
+ volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
@@ -195,8 +198,8 @@ __test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
return (ch >> (nr & 7)) & 1;
}
-static inline int
-__test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
+static inline bool arch___test_and_change_bit(unsigned long nr,
+ volatile unsigned long *ptr)
{
unsigned char *addr = __bitops_byte(nr, ptr);
unsigned char ch;
@@ -206,7 +209,8 @@ __test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
return (ch >> (nr & 7)) & 1;
}
-static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
+static inline bool arch_test_bit(unsigned long nr,
+ const volatile unsigned long *ptr)
{
const volatile unsigned char *addr;
@@ -215,28 +219,30 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
return (*addr >> (nr & 7)) & 1;
}
-static inline int test_and_set_bit_lock(unsigned long nr,
- volatile unsigned long *ptr)
+static inline bool arch_test_and_set_bit_lock(unsigned long nr,
+ volatile unsigned long *ptr)
{
- if (test_bit(nr, ptr))
+ if (arch_test_bit(nr, ptr))
return 1;
- return test_and_set_bit(nr, ptr);
+ return arch_test_and_set_bit(nr, ptr);
}
-static inline void clear_bit_unlock(unsigned long nr,
- volatile unsigned long *ptr)
+static inline void arch_clear_bit_unlock(unsigned long nr,
+ volatile unsigned long *ptr)
{
smp_mb__before_atomic();
- clear_bit(nr, ptr);
+ arch_clear_bit(nr, ptr);
}
-static inline void __clear_bit_unlock(unsigned long nr,
- volatile unsigned long *ptr)
+static inline void arch___clear_bit_unlock(unsigned long nr,
+ volatile unsigned long *ptr)
{
smp_mb();
- __clear_bit(nr, ptr);
+ arch___clear_bit(nr, ptr);
}
+#include <asm-generic/bitops-instrumented.h>
+
/*
* Functions which use MSB0 bit numbering.
* The bits are numbered:
@@ -261,7 +267,8 @@ static inline void clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
return clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-static inline int test_and_clear_bit_inv(unsigned long nr, volatile unsigned long *ptr)
+static inline bool test_and_clear_bit_inv(unsigned long nr,
+ volatile unsigned long *ptr)
{
return test_and_clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
@@ -276,8 +283,8 @@ static inline void __clear_bit_inv(unsigned long nr, volatile unsigned long *ptr
return __clear_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
-static inline int test_bit_inv(unsigned long nr,
- const volatile unsigned long *ptr)
+static inline bool test_bit_inv(unsigned long nr,
+ const volatile unsigned long *ptr)
{
return test_bit(nr ^ (BITS_PER_LONG - 1), ptr);
}
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index a4d38092530a..823578c6b9e2 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -177,6 +177,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define ARCH_ZONE_DMA_BITS 31
+
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index db5ef22c46e4..f647d565bd6d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -28,7 +28,7 @@
* @sliba: storage list information block address
* @sla: storage list address
* @slsba: storage list state block address
- * @akey: access key for DLIB
+ * @akey: access key for SLIB
* @bkey: access key for SL
* @ckey: access key for SBALs
* @dkey: access key for SLSB
@@ -50,11 +50,10 @@ struct qdesfmt0 {
/**
* struct qdr - queue description record (QDR)
* @qfmt: queue format
- * @pfmt: implementation dependent parameter format
* @ac: adapter characteristics
* @iqdcnt: input queue descriptor count
* @oqdcnt: output queue descriptor count
- * @iqdsz: inpout queue descriptor size
+ * @iqdsz: input queue descriptor size
* @oqdsz: output queue descriptor size
* @qiba: queue information block address
* @qkey: queue information block key
@@ -62,8 +61,7 @@ struct qdesfmt0 {
*/
struct qdr {
u32 qfmt : 8;
- u32 pfmt : 8;
- u32 : 8;
+ u32 : 16;
u32 ac : 8;
u32 : 8;
u32 iqdcnt : 8;
@@ -327,6 +325,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* struct qdio_initialize - qdio initialization data
* @cdev: associated ccw device
* @q_format: queue format
+ * @qdr_ac: feature flags to set
* @adapter_name: name for the adapter
* @qib_param_field_format: format for qib_parm_field
* @qib_param_field: pointer to 128 bytes or NULL, if no param field
@@ -338,6 +337,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
* @input_handler: handler to be called for input queues
* @output_handler: handler to be called for output queues
* @queue_start_poll_array: polling handlers (one per input queue or NULL)
+ * @scan_threshold: # of in-use buffers that triggers scan on output queue
* @int_parm: interruption parameter
* @input_sbal_addr_array: address of no_input_qs * 128 pointers
* @output_sbal_addr_array: address of no_output_qs * 128 pointers
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 82deb8fc8319..70bd65724ec4 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -54,6 +54,7 @@
#define INITRD_SIZE_OFFSET 0x10410
#define OLDMEM_BASE_OFFSET 0x10418
#define OLDMEM_SIZE_OFFSET 0x10420
+#define KERNEL_VERSION_OFFSET 0x10428
#define COMMAND_LINE_OFFSET 0x10480
#ifndef __ASSEMBLY__
@@ -74,7 +75,8 @@ struct parmarea {
unsigned long initrd_size; /* 0x10410 */
unsigned long oldmem_base; /* 0x10418 */
unsigned long oldmem_size; /* 0x10420 */
- char pad1[0x10480 - 0x10428]; /* 0x10428 - 0x10480 */
+ unsigned long kernel_version; /* 0x10428 */
+ char pad1[0x10480 - 0x10430]; /* 0x10430 - 0x10480 */
char command_line[ARCH_COMMAND_LINE_SIZE]; /* 0x10480 */
};
@@ -82,6 +84,7 @@ extern int noexec_disabled;
extern int memory_end_set;
extern unsigned long memory_end;
extern unsigned long max_physmem_end;
+extern unsigned long __swsusp_reset_dma;
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index b6755685c7b8..9e9f75ef046a 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -34,5 +34,6 @@
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
#endif /* _ASM_S390_UNISTD_H_ */
diff --git a/arch/s390/include/uapi/asm/bpf_perf_event.h b/arch/s390/include/uapi/asm/bpf_perf_event.h
index cefe7c7cd4f6..3ed42ff6da94 100644
--- a/arch/s390/include/uapi/asm/bpf_perf_event.h
+++ b/arch/s390/include/uapi/asm/bpf_perf_event.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
#define _UAPI__ASM_BPF_PERF_EVENT_H__
diff --git a/arch/s390/include/uapi/asm/ipl.h b/arch/s390/include/uapi/asm/ipl.h
index fd32b1cd80d2..451ba7d08905 100644
--- a/arch/s390/include/uapi/asm/ipl.h
+++ b/arch/s390/include/uapi/asm/ipl.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_S390_UAPI_IPL_H
#define _ASM_S390_UAPI_IPL_H
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index 494c34c50716..8c5755f41dde 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -20,6 +20,7 @@
#include <linux/ioctl.h>
#include <linux/compiler.h>
+#include <linux/types.h>
/* Name of the zcrypt device driver. */
#define ZCRYPT_NAME "zcrypt"
@@ -160,17 +161,17 @@ struct ica_xcRB {
* @payload_len: Payload length
*/
struct ep11_cprb {
- uint16_t cprb_len;
+ __u16 cprb_len;
unsigned char cprb_ver_id;
unsigned char pad_000[2];
unsigned char flags;
unsigned char func_id[2];
- uint32_t source_id;
- uint32_t target_id;
- uint32_t ret_code;
- uint32_t reserved1;
- uint32_t reserved2;
- uint32_t payload_len;
+ __u32 source_id;
+ __u32 target_id;
+ __u32 ret_code;
+ __u32 reserved1;
+ __u32 reserved2;
+ __u32 payload_len;
} __attribute__((packed));
/**
@@ -179,8 +180,8 @@ struct ep11_cprb {
* @dom_id: Usage domain id
*/
struct ep11_target_dev {
- uint16_t ap_id;
- uint16_t dom_id;
+ __u16 ap_id;
+ __u16 dom_id;
};
/**
@@ -195,14 +196,14 @@ struct ep11_target_dev {
* @resp: Addr to response block
*/
struct ep11_urb {
- uint16_t targets_num;
- uint64_t targets;
- uint64_t weight;
- uint64_t req_no;
- uint64_t req_len;
- uint64_t req;
- uint64_t resp_len;
- uint64_t resp;
+ __u16 targets_num;
+ __u64 targets;
+ __u64 weight;
+ __u64 req_no;
+ __u64 req_len;
+ __u64 req;
+ __u64 resp_len;
+ __u64 resp;
} __attribute__((packed));
/**
diff --git a/arch/s390/kernel/machine_kexec_reloc.c b/arch/s390/kernel/machine_kexec_reloc.c
index 1dded39239f8..3b664cb3ec4d 100644
--- a/arch/s390/kernel/machine_kexec_reloc.c
+++ b/arch/s390/kernel/machine_kexec_reloc.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/elf.h>
+#include <asm/kexec.h>
int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
unsigned long addr)
diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
index d4e031f7b9c8..5f1fd1581330 100644
--- a/arch/s390/kernel/perf_cpum_cf_diag.c
+++ b/arch/s390/kernel/perf_cpum_cf_diag.c
@@ -34,7 +34,7 @@ struct cf_diag_csd { /* Counter set data per CPU */
unsigned char start[PAGE_SIZE]; /* Counter set at event start */
unsigned char data[PAGE_SIZE]; /* Counter set at event delete */
};
-DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
+static DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
/* Counter sets are stored as data stream in a page sized memory buffer and
* exported to user space via raw data attached to the event sample data.
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index a90d3e945445..3054e9c035a3 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -437,4 +437,4 @@
432 common fsmount sys_fsmount sys_fsmount
433 common fspick sys_fspick sys_fspick
434 common pidfd_open sys_pidfd_open sys_pidfd_open
-# 435 reserved for clone3
+435 common clone3 sys_clone3 sys_clone3
diff --git a/arch/s390/lib/xor.c b/arch/s390/lib/xor.c
index 96580590ccaf..29d9470dbceb 100644
--- a/arch/s390/lib/xor.c
+++ b/arch/s390/lib/xor.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/export.h>
#include <linux/raid/xor.h>
+#include <asm/xor.h>
static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
{
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 63507662828f..7b0bb475c166 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -327,6 +327,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
case VM_FAULT_BADACCESS:
if (access == VM_EXEC && signal_return(regs) == 0)
break;
+ /* fallthrough */
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (user_mode(regs)) {
@@ -336,7 +337,9 @@ static noinline void do_fault_error(struct pt_regs *regs, int access,
do_sigsegv(regs, si_code);
break;
}
+ /* fallthrough */
case VM_FAULT_BADCONTEXT:
+ /* fallthrough */
case VM_FAULT_PFAULT:
do_no_context(regs);
break;
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 1e668b95e0c6..39c3a6e3d262 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -2424,8 +2424,8 @@ EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
* This function is assumed to be called with the guest_table_lock
* held.
*/
-bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
- unsigned long gaddr)
+static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
+ unsigned long gaddr)
{
if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
return false;
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 99e06213a22b..54fcdf66ae96 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -17,8 +17,6 @@
#ifdef CONFIG_PGSTE
-static int page_table_allocate_pgste_min = 0;
-static int page_table_allocate_pgste_max = 1;
int page_table_allocate_pgste = 0;
EXPORT_SYMBOL(page_table_allocate_pgste);
@@ -29,8 +27,8 @@ static struct ctl_table page_table_sysctl[] = {
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &page_table_allocate_pgste_min,
- .extra2 = &page_table_allocate_pgste_max,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};
diff --git a/arch/sh/include/uapi/asm/setup.h b/arch/sh/include/uapi/asm/setup.h
index 1170dd2fb998..4bd19f80f9b0 100644
--- a/arch/sh/include/uapi/asm/setup.h
+++ b/arch/sh/include/uapi/asm/setup.h
@@ -1,2 +1,2 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#include <asm-generic/setup.h>
diff --git a/arch/sh/include/uapi/asm/types.h b/arch/sh/include/uapi/asm/types.h
index f83795fdc0da..68100e108ea6 100644
--- a/arch/sh/include/uapi/asm/types.h
+++ b/arch/sh/include/uapi/asm/types.h
@@ -1,2 +1,2 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#include <asm-generic/types.h>
diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h
index 64c67f2ea33f..0dace69058ab 100644
--- a/arch/sparc/include/uapi/asm/oradax.h
+++ b/arch/sparc/include/uapi/asm/oradax.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
/*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
*/
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 830bd984182b..515c0ceeb4a3 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -314,6 +314,23 @@ For 32-bit we have the following conventions - kernel is built with
#endif
+/*
+ * Mitigate Spectre v1 for conditional swapgs code paths.
+ *
+ * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
+ * prevent a speculative swapgs when coming from kernel space.
+ *
+ * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
+ * to prevent the swapgs from getting speculatively skipped when coming from
+ * user space.
+ */
+.macro FENCE_SWAPGS_USER_ENTRY
+ ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
+.endm
+.macro FENCE_SWAPGS_KERNEL_ENTRY
+ ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
+.endm
+
.macro STACKLEAK_ERASE_NOCLOBBER
#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
PUSH_AND_CLEAR_REGS
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 2bb986f305ac..4f86928246e7 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -1443,8 +1443,12 @@ BUILD_INTERRUPT3(hv_stimer0_callback_vector, HYPERV_STIMER0_VECTOR,
ENTRY(page_fault)
ASM_CLAC
- pushl $0; /* %gs's slot on the stack */
+ pushl $do_page_fault
+ jmp common_exception_read_cr2
+END(page_fault)
+common_exception_read_cr2:
+ /* the function address is in %gs's slot on the stack */
SAVE_ALL switch_stacks=1 skip_gs=1
ENCODE_FRAME_POINTER
@@ -1452,6 +1456,7 @@ ENTRY(page_fault)
/* fixup %gs */
GS_TO_REG %ecx
+ movl PT_GS(%esp), %edi
REG_TO_PTGS %ecx
SET_KERNEL_GS %ecx
@@ -1463,9 +1468,9 @@ ENTRY(page_fault)
TRACE_IRQS_OFF
movl %esp, %eax # pt_regs pointer
- call do_page_fault
+ CALL_NOSPEC %edi
jmp ret_from_exception
-END(page_fault)
+END(common_exception_read_cr2)
common_exception:
/* the function address is in %gs's slot on the stack */
@@ -1595,7 +1600,7 @@ END(general_protection)
ENTRY(async_page_fault)
ASM_CLAC
pushl $do_async_page_fault
- jmp common_exception
+ jmp common_exception_read_cr2
END(async_page_fault)
#endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 3f5a978a02a7..be9ca198c581 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -519,7 +519,7 @@ ENTRY(interrupt_entry)
testb $3, CS-ORIG_RAX+8(%rsp)
jz 1f
SWAPGS
-
+ FENCE_SWAPGS_USER_ENTRY
/*
* Switch to the thread stack. The IRET frame and orig_ax are
* on the stack, as well as the return address. RDI..R12 are
@@ -549,8 +549,10 @@ ENTRY(interrupt_entry)
UNWIND_HINT_FUNC
movq (%rdi), %rdi
+ jmp 2f
1:
-
+ FENCE_SWAPGS_KERNEL_ENTRY
+2:
PUSH_AND_CLEAR_REGS save_ret=1
ENCODE_FRAME_POINTER 8
@@ -1238,6 +1240,13 @@ ENTRY(paranoid_entry)
*/
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
+ /*
+ * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
+ * unconditional CR3 write, even in the PTI case. So do an lfence
+ * to prevent GS speculation, regardless of whether PTI is enabled.
+ */
+ FENCE_SWAPGS_KERNEL_ENTRY
+
ret
END(paranoid_entry)
@@ -1288,6 +1297,7 @@ ENTRY(error_entry)
* from user mode due to an IRET fault.
*/
SWAPGS
+ FENCE_SWAPGS_USER_ENTRY
/* We have user CR3. Change to kernel CR3. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
@@ -1301,6 +1311,8 @@ ENTRY(error_entry)
pushq %r12
ret
+.Lerror_entry_done_lfence:
+ FENCE_SWAPGS_KERNEL_ENTRY
.Lerror_entry_done:
ret
@@ -1318,7 +1330,7 @@ ENTRY(error_entry)
cmpq %rax, RIP+8(%rsp)
je .Lbstep_iret
cmpq $.Lgs_change, RIP+8(%rsp)
- jne .Lerror_entry_done
+ jne .Lerror_entry_done_lfence
/*
* hack: .Lgs_change can fail with user gsbase. If this happens, fix up
@@ -1326,6 +1338,7 @@ ENTRY(error_entry)
* .Lgs_change's error handler with kernel gsbase.
*/
SWAPGS
+ FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
jmp .Lerror_entry_done
@@ -1340,6 +1353,7 @@ ENTRY(error_entry)
* gsbase and CR3. Switch to kernel gsbase and CR3:
*/
SWAPGS
+ FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
/*
@@ -1431,6 +1445,7 @@ ENTRY(nmi)
swapgs
cld
+ FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 9e911a96972b..648260b5f367 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -20,7 +20,6 @@
#include <asm/intel-family.h>
#include <asm/apic.h>
#include <asm/cpu_device_id.h>
-#include <asm/hypervisor.h>
#include "../perf_event.h"
@@ -263,8 +262,8 @@ static struct event_constraint intel_icl_event_constraints[] = {
};
static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
- INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0),
- INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1),
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
EVENT_EXTRA_END
@@ -4053,7 +4052,7 @@ static bool check_msr(unsigned long msr, u64 mask)
* Disable the check for real HW, so we don't
* mess with potentionaly enabled registers:
*/
- if (hypervisor_is_type(X86_HYPER_NATIVE))
+ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
return true;
/*
@@ -4955,6 +4954,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_SKYLAKE_X:
pmem = true;
+ /* fall through */
case INTEL_FAM6_SKYLAKE_MOBILE:
case INTEL_FAM6_SKYLAKE_DESKTOP:
case INTEL_FAM6_KABYLAKE_MOBILE:
@@ -5004,6 +5004,7 @@ __init int intel_pmu_init(void)
case INTEL_FAM6_ICELAKE_X:
case INTEL_FAM6_ICELAKE_XEON_D:
pmem = true;
+ /* fall through */
case INTEL_FAM6_ICELAKE_MOBILE:
case INTEL_FAM6_ICELAKE_DESKTOP:
x86_pmu.late_ack = true;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 2c8db2c19328..f1269e804e9b 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -851,7 +851,7 @@ struct event_constraint intel_skl_pebs_event_constraints[] = {
struct event_constraint intel_icl_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
- INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 998c2cc08363..e880f2408e29 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -281,6 +281,8 @@
#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
+#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
+#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
@@ -394,5 +396,6 @@
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
+#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8282b8d41209..7b0a4ee77313 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -607,15 +607,16 @@ struct kvm_vcpu_arch {
/*
* QEMU userspace and the guest each have their own FPU state.
- * In vcpu_run, we switch between the user, maintained in the
- * task_struct struct, and guest FPU contexts. While running a VCPU,
- * the VCPU thread will have the guest FPU context.
+ * In vcpu_run, we switch between the user and guest FPU contexts.
+ * While running a VCPU, the VCPU thread will have the guest FPU
+ * context.
*
* Note that while the PKRU state lives inside the fpu registers,
* it is switched out separately at VMENTER and VMEXIT time. The
* "guest_fpu" state here contains the guest FPU context, with the
* host PRKU bits.
*/
+ struct fpu *user_fpu;
struct fpu *guest_fpu;
u64 xcr0;
diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
index ae91429129a6..ba71a63cdac4 100644
--- a/arch/x86/include/asm/vdso/gettimeofday.h
+++ b/arch/x86/include/asm/vdso/gettimeofday.h
@@ -96,6 +96,8 @@ long clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
#else
+#define VDSO_HAS_32BIT_FALLBACK 1
+
static __always_inline
long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
@@ -114,6 +116,23 @@ long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
}
static __always_inline
+long clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ long ret;
+
+ asm (
+ "mov %%ebx, %%edx \n"
+ "mov %[clock], %%ebx \n"
+ "call __kernel_vsyscall \n"
+ "mov %%edx, %%ebx \n"
+ : "=a" (ret), "=m" (*_ts)
+ : "0" (__NR_clock_gettime), [clock] "g" (_clkid), "c" (_ts)
+ : "edx");
+
+ return ret;
+}
+
+static __always_inline
long gettimeofday_fallback(struct __kernel_old_timeval *_tv,
struct timezone *_tz)
{
@@ -148,6 +167,23 @@ clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
return ret;
}
+static __always_inline
+long clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
+{
+ long ret;
+
+ asm (
+ "mov %%ebx, %%edx \n"
+ "mov %[clock], %%ebx \n"
+ "call __kernel_vsyscall \n"
+ "mov %%edx, %%ebx \n"
+ : "=a" (ret), "=m" (*_ts)
+ : "0" (__NR_clock_getres), [clock] "g" (_clkid), "c" (_ts)
+ : "edx");
+
+ return ret;
+}
+
#endif
#ifdef CONFIG_PARAVIRT_CLOCK
diff --git a/arch/x86/include/uapi/asm/byteorder.h b/arch/x86/include/uapi/asm/byteorder.h
index 484e3cfd7ef2..149143cab9ff 100644
--- a/arch/x86/include/uapi/asm/byteorder.h
+++ b/arch/x86/include/uapi/asm/byteorder.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_X86_BYTEORDER_H
#define _ASM_X86_BYTEORDER_H
diff --git a/arch/x86/include/uapi/asm/hwcap2.h b/arch/x86/include/uapi/asm/hwcap2.h
index 6ebaae90e207..8b2effe6efb8 100644
--- a/arch/x86/include/uapi/asm/hwcap2.h
+++ b/arch/x86/include/uapi/asm/hwcap2.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_X86_HWCAP2_H
#define _ASM_X86_HWCAP2_H
diff --git a/arch/x86/include/uapi/asm/sigcontext32.h b/arch/x86/include/uapi/asm/sigcontext32.h
index 6b18e88de8a6..7114801d0499 100644
--- a/arch/x86/include/uapi/asm/sigcontext32.h
+++ b/arch/x86/include/uapi/asm/sigcontext32.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_X86_SIGCONTEXT32_H
#define _ASM_X86_SIGCONTEXT32_H
diff --git a/arch/x86/include/uapi/asm/types.h b/arch/x86/include/uapi/asm/types.h
index df55e1ddb0c9..9d5c11a24279 100644
--- a/arch/x86/include/uapi/asm/types.h
+++ b/arch/x86/include/uapi/asm/types.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_X86_TYPES_H
#define _ASM_X86_TYPES_H
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 66ca906aa790..c6fa3ef10b4e 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -34,6 +34,7 @@
#include "cpu.h"
+static void __init spectre_v1_select_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
@@ -98,17 +99,11 @@ void __init check_bugs(void)
if (boot_cpu_has(X86_FEATURE_STIBP))
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
- /* Select the proper spectre mitigation before patching alternatives */
+ /* Select the proper CPU mitigations before patching alternatives: */
+ spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
-
- /*
- * Select proper mitigation for any exposure to the Speculative Store
- * Bypass vulnerability.
- */
ssb_select_mitigation();
-
l1tf_select_mitigation();
-
mds_select_mitigation();
arch_smt_update();
@@ -274,6 +269,98 @@ static int __init mds_cmdline(char *str)
early_param("mds", mds_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) "Spectre V1 : " fmt
+
+enum spectre_v1_mitigation {
+ SPECTRE_V1_MITIGATION_NONE,
+ SPECTRE_V1_MITIGATION_AUTO,
+};
+
+static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
+ SPECTRE_V1_MITIGATION_AUTO;
+
+static const char * const spectre_v1_strings[] = {
+ [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
+ [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
+};
+
+/*
+ * Does SMAP provide full mitigation against speculative kernel access to
+ * userspace?
+ */
+static bool smap_works_speculatively(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_SMAP))
+ return false;
+
+ /*
+ * On CPUs which are vulnerable to Meltdown, SMAP does not
+ * prevent speculative access to user data in the L1 cache.
+ * Consider SMAP to be non-functional as a mitigation on these
+ * CPUs.
+ */
+ if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
+ return false;
+
+ return true;
+}
+
+static void __init spectre_v1_select_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
+ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+ return;
+ }
+
+ if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
+ /*
+ * With Spectre v1, a user can speculatively control either
+ * path of a conditional swapgs with a user-controlled GS
+ * value. The mitigation is to add lfences to both code paths.
+ *
+ * If FSGSBASE is enabled, the user can put a kernel address in
+ * GS, in which case SMAP provides no protection.
+ *
+ * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
+ * FSGSBASE enablement patches have been merged. ]
+ *
+ * If FSGSBASE is disabled, the user can only put a user space
+ * address in GS. That makes an attack harder, but still
+ * possible if there's no SMAP protection.
+ */
+ if (!smap_works_speculatively()) {
+ /*
+ * Mitigation can be provided from SWAPGS itself or
+ * PTI as the CR3 write in the Meltdown mitigation
+ * is serializing.
+ *
+ * If neither is there, mitigate with an LFENCE to
+ * stop speculation through swapgs.
+ */
+ if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
+ !boot_cpu_has(X86_FEATURE_PTI))
+ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
+
+ /*
+ * Enable lfences in the kernel entry (non-swapgs)
+ * paths, to prevent user entry from speculatively
+ * skipping swapgs.
+ */
+ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
+ }
+ }
+
+ pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
+}
+
+static int __init nospectre_v1_cmdline(char *str)
+{
+ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+ return 0;
+}
+early_param("nospectre_v1", nospectre_v1_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
@@ -1226,7 +1313,7 @@ static ssize_t l1tf_show_state(char *buf)
static ssize_t mds_show_state(char *buf)
{
- if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
return sprintf(buf, "%s; SMT Host state unknown\n",
mds_strings[mds_mitigation]);
}
@@ -1290,7 +1377,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
break;
case X86_BUG_SPECTRE_V1:
- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
case X86_BUG_SPECTRE_V2:
return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 11472178e17f..f125bf7ecb6f 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1022,6 +1022,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#define NO_L1TF BIT(3)
#define NO_MDS BIT(4)
#define MSBDS_ONLY BIT(5)
+#define NO_SWAPGS BIT(6)
#define VULNWL(_vendor, _family, _model, _whitelist) \
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -1048,30 +1049,38 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
- VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
VULNWL_INTEL(CORE_YONAH, NO_SSB),
- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
- VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
+
+ /*
+ * Technically, swapgs isn't serializing on AMD (despite it previously
+ * being documented as such in the APM). But according to AMD, %gs is
+ * updated non-speculatively, and the issuing of %gs-relative memory
+ * operands will be blocked until the %gs update completes, which is
+ * good enough for our purposes.
+ */
/* AMD Family 0xf - 0x12 */
- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
{}
};
@@ -1108,6 +1117,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
}
+ if (!cpu_matches(NO_SWAPGS))
+ setup_force_cpu_bug(X86_BUG_SWAPGS);
+
if (cpu_matches(NO_MELTDOWN))
return;
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index a6342c899be5..f3d3e9646a99 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -193,10 +193,10 @@ ENTRY(secondary_startup_64)
/* Set up %gs.
*
- * The base of %gs always points to the bottom of the irqstack
- * union. If the stack protector canary is enabled, it is
- * located at %gs:40. Note that, on SMP, the boot cpu uses
- * init data section till per cpu areas are set up.
+ * The base of %gs always points to fixed_percpu_data. If the
+ * stack protector canary is enabled, it is located at %gs:40.
+ * Note that, on SMP, the boot cpu uses init data section until
+ * the per cpu areas are set up.
*/
movl $MSR_GS_BASE,%ecx
movl initial_gs(%rip),%eax
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index c43e96a938d0..c6f791bc481e 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -827,10 +827,6 @@ int __init hpet_enable(void)
if (!hpet_cfg_working())
goto out_nohpet;
- /* Validate that the counter is counting */
- if (!hpet_counting())
- goto out_nohpet;
-
/*
* Read the period and check for a sane value:
*/
@@ -896,6 +892,14 @@ int __init hpet_enable(void)
}
hpet_print_config();
+ /*
+ * Validate that the counter is counting. This needs to be done
+ * after sanitizing the config registers to properly deal with
+ * force enabled HPETs.
+ */
+ if (!hpet_counting())
+ goto out_nohpet;
+
clocksource_register_hz(&clocksource_hpet, (u32)hpet_freq);
if (id & HPET_ID_LEGSUP) {
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c
index 4f36d3241faf..2d6898c2cb64 100644
--- a/arch/x86/kernel/stacktrace.c
+++ b/arch/x86/kernel/stacktrace.c
@@ -100,7 +100,7 @@ copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
{
int ret;
- if (!access_ok(fp, sizeof(*frame)))
+ if (__range_not_ok(fp, sizeof(*frame), TASK_SIZE))
return 0;
ret = 1;
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index 8eb67a670b10..653b7f617b61 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -230,9 +230,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
{},
};
+/*
+ * Some devices have a portrait LCD but advertise a landscape resolution (and
+ * pitch). We simply swap width and height for these devices so that we can
+ * correctly deal with some of them coming with multiple resolutions.
+ */
+static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
+ {
+ /*
+ * Lenovo MIIX310-10ICR, only some batches have the troublesome
+ * 800x1280 portrait screen. Luckily the portrait version has
+ * its own BIOS version, so we match on that.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
+ },
+ },
+ {
+ /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "Lenovo MIIX 320-10ICR"),
+ },
+ },
+ {
+ /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "Lenovo ideapad D330-10IGM"),
+ },
+ },
+ {},
+};
+
__init void sysfb_apply_efi_quirks(void)
{
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
dmi_check_system(efifb_dmi_system_table);
+
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+ dmi_check_system(efifb_dmi_swap_width_height)) {
+ u16 temp = screen_info.lfb_width;
+
+ screen_info.lfb_width = screen_info.lfb_height;
+ screen_info.lfb_height = temp;
+ screen_info.lfb_linelength = 4 * screen_info.lfb_width;
+ }
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8f72526e2f68..24843cf49579 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3466,7 +3466,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
/*
* Currently, fast page fault only works for direct mapping
* since the gfn is not stable for indirect shadow page. See
- * Documentation/virtual/kvm/locking.txt to get more detail.
+ * Documentation/virt/kvm/locking.txt to get more detail.
*/
fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
iterator.sptep, spte,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 19f69df96758..7eafc6907861 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2143,12 +2143,20 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
goto out;
}
+ svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
+ GFP_KERNEL_ACCOUNT);
+ if (!svm->vcpu.arch.user_fpu) {
+ printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
+ err = -ENOMEM;
+ goto free_partial_svm;
+ }
+
svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
GFP_KERNEL_ACCOUNT);
if (!svm->vcpu.arch.guest_fpu) {
printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
err = -ENOMEM;
- goto free_partial_svm;
+ goto free_user_fpu;
}
err = kvm_vcpu_init(&svm->vcpu, kvm, id);
@@ -2211,6 +2219,8 @@ uninit:
kvm_vcpu_uninit(&svm->vcpu);
free_svm:
kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
+free_user_fpu:
+ kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
free_partial_svm:
kmem_cache_free(kvm_vcpu_cache, svm);
out:
@@ -2241,6 +2251,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_page(virt_to_page(svm->nested.hsave));
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu);
kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu);
kmem_cache_free(kvm_vcpu_cache, svm);
}
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 0f1378789bd0..ced9fba32598 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -220,6 +220,8 @@ static void free_nested(struct kvm_vcpu *vcpu)
if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
return;
+ kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+
vmx->nested.vmxon = false;
vmx->nested.smm.vmxon = false;
free_vpid(vmx->nested.vpid02);
@@ -232,7 +234,9 @@ static void free_nested(struct kvm_vcpu *vcpu)
vmx->vmcs01.shadow_vmcs = NULL;
}
kfree(vmx->nested.cached_vmcs12);
+ vmx->nested.cached_vmcs12 = NULL;
kfree(vmx->nested.cached_shadow_vmcs12);
+ vmx->nested.cached_shadow_vmcs12 = NULL;
/* Unpin physical memory we referred to in the vmcs02 */
if (vmx->nested.apic_access_page) {
kvm_release_page_dirty(vmx->nested.apic_access_page);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index a279447eb75b..074385c86c09 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6598,6 +6598,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
free_loaded_vmcs(vmx->loaded_vmcs);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
+ kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
}
@@ -6613,12 +6614,20 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
if (!vmx)
return ERR_PTR(-ENOMEM);
+ vmx->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache,
+ GFP_KERNEL_ACCOUNT);
+ if (!vmx->vcpu.arch.user_fpu) {
+ printk(KERN_ERR "kvm: failed to allocate kvm userspace's fpu\n");
+ err = -ENOMEM;
+ goto free_partial_vcpu;
+ }
+
vmx->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache,
GFP_KERNEL_ACCOUNT);
if (!vmx->vcpu.arch.guest_fpu) {
printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n");
err = -ENOMEM;
- goto free_partial_vcpu;
+ goto free_user_fpu;
}
vmx->vpid = allocate_vpid();
@@ -6721,6 +6730,8 @@ uninit_vcpu:
free_vcpu:
free_vpid(vmx->vpid);
kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.guest_fpu);
+free_user_fpu:
+ kmem_cache_free(x86_fpu_cache, vmx->vcpu.arch.user_fpu);
free_partial_vcpu:
kmem_cache_free(kvm_vcpu_cache, vmx);
return ERR_PTR(err);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 58305cf81182..c6d951cbd76c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3306,6 +3306,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_x86_ops->vcpu_load(vcpu, cpu);
+ fpregs_assert_state_consistent();
+ if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ switch_fpu_return();
+
/* Apply any externally detected TSC adjustments (due to suspend) */
if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
@@ -7202,7 +7206,7 @@ static void kvm_sched_yield(struct kvm *kvm, unsigned long dest_id)
rcu_read_unlock();
- if (target)
+ if (target && READ_ONCE(target->ready))
kvm_vcpu_yield_to(target);
}
@@ -7242,6 +7246,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
break;
case KVM_HC_KICK_CPU:
kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
+ kvm_sched_yield(vcpu->kvm, a1);
ret = 0;
break;
#ifdef CONFIG_X86_64
@@ -7990,9 +7995,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
trace_kvm_entry(vcpu->vcpu_id);
guest_enter_irqoff();
- fpregs_assert_state_consistent();
- if (test_thread_flag(TIF_NEED_FPU_LOAD))
- switch_fpu_return();
+ /* The preempt notifier should have taken care of the FPU already. */
+ WARN_ON_ONCE(test_thread_flag(TIF_NEED_FPU_LOAD));
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
@@ -8270,7 +8274,7 @@ static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
fpregs_lock();
- copy_fpregs_to_fpstate(&current->thread.fpu);
+ copy_fpregs_to_fpstate(vcpu->arch.user_fpu);
/* PKRU is separately restored in kvm_x86_ops->run. */
__copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state,
~XFEATURE_MASK_PKRU);
@@ -8287,7 +8291,7 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
fpregs_lock();
copy_fpregs_to_fpstate(vcpu->arch.guest_fpu);
- copy_kernel_to_fpregs(&current->thread.fpu.state);
+ copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
fpregs_mark_activate();
fpregs_unlock();
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 6c46095cd0d9..9ceacd1156db 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -177,13 +177,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
- return NULL;
- if (!pmd_present(*pmd))
+ if (pmd_present(*pmd) != pmd_present(*pmd_k))
set_pmd(pmd, *pmd_k);
+
+ if (!pmd_present(*pmd_k))
+ return NULL;
else
- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+ BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
return pmd_k;
}
@@ -203,17 +204,13 @@ void vmalloc_sync_all(void)
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
spinlock_t *pgt_lock;
- pmd_t *ret;
/* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
- ret = vmalloc_sync_one(page_address(page), address);
+ vmalloc_sync_one(page_address(page), address);
spin_unlock(pgt_lock);
-
- if (!ret)
- break;
}
spin_unlock(&pgd_lock);
}
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
index 60c220020054..80828b95a51f 100644
--- a/arch/xtensa/kernel/coprocessor.S
+++ b/arch/xtensa/kernel/coprocessor.S
@@ -14,6 +14,7 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
#include <asm/processor.h>
#include <asm/coprocessor.h>
#include <asm/thread_info.h>
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 72860325245a..586fcfe227ea 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -3354,38 +3354,57 @@ static void bfq_dispatch_remove(struct request_queue *q, struct request *rq)
* there is no active group, then the primary expectation for
* this device is probably a high throughput.
*
- * We are now left only with explaining the additional
- * compound condition that is checked below for deciding
- * whether the scenario is asymmetric. To explain this
- * compound condition, we need to add that the function
+ * We are now left only with explaining the two sub-conditions in the
+ * additional compound condition that is checked below for deciding
+ * whether the scenario is asymmetric. To explain the first
+ * sub-condition, we need to add that the function
* bfq_asymmetric_scenario checks the weights of only
- * non-weight-raised queues, for efficiency reasons (see
- * comments on bfq_weights_tree_add()). Then the fact that
- * bfqq is weight-raised is checked explicitly here. More
- * precisely, the compound condition below takes into account
- * also the fact that, even if bfqq is being weight-raised,
- * the scenario is still symmetric if all queues with requests
- * waiting for completion happen to be
- * weight-raised. Actually, we should be even more precise
- * here, and differentiate between interactive weight raising
- * and soft real-time weight raising.
+ * non-weight-raised queues, for efficiency reasons (see comments on
+ * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised
+ * is checked explicitly here. More precisely, the compound condition
+ * below takes into account also the fact that, even if bfqq is being
+ * weight-raised, the scenario is still symmetric if all queues with
+ * requests waiting for completion happen to be
+ * weight-raised. Actually, we should be even more precise here, and
+ * differentiate between interactive weight raising and soft real-time
+ * weight raising.
+ *
+ * The second sub-condition checked in the compound condition is
+ * whether there is a fair amount of already in-flight I/O not
+ * belonging to bfqq. If so, I/O dispatching is to be plugged, for the
+ * following reason. The drive may decide to serve in-flight
+ * non-bfqq's I/O requests before bfqq's ones, thereby delaying the
+ * arrival of new I/O requests for bfqq (recall that bfqq is sync). If
+ * I/O-dispatching is not plugged, then, while bfqq remains empty, a
+ * basically uncontrolled amount of I/O from other queues may be
+ * dispatched too, possibly causing the service of bfqq's I/O to be
+ * delayed even longer in the drive. This problem gets more and more
+ * serious as the speed and the queue depth of the drive grow,
+ * because, as these two quantities grow, the probability to find no
+ * queue busy but many requests in flight grows too. By contrast,
+ * plugging I/O dispatching minimizes the delay induced by already
+ * in-flight I/O, and enables bfqq to recover the bandwidth it may
+ * lose because of this delay.
*
* As a side note, it is worth considering that the above
- * device-idling countermeasures may however fail in the
- * following unlucky scenario: if idling is (correctly)
- * disabled in a time period during which all symmetry
- * sub-conditions hold, and hence the device is allowed to
- * enqueue many requests, but at some later point in time some
- * sub-condition stops to hold, then it may become impossible
- * to let requests be served in the desired order until all
- * the requests already queued in the device have been served.
+ * device-idling countermeasures may however fail in the following
+ * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled
+ * in a time period during which all symmetry sub-conditions hold, and
+ * therefore the device is allowed to enqueue many requests, but at
+ * some later point in time some sub-condition stops to hold, then it
+ * may become impossible to make requests be served in the desired
+ * order until all the requests already queued in the device have been
+ * served. The last sub-condition commented above somewhat mitigates
+ * this problem for weight-raised queues.
*/
static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd,
struct bfq_queue *bfqq)
{
return (bfqq->wr_coeff > 1 &&
- bfqd->wr_busy_queues <
- bfq_tot_busy_queues(bfqd)) ||
+ (bfqd->wr_busy_queues <
+ bfq_tot_busy_queues(bfqd) ||
+ bfqd->rq_in_driver >=
+ bfqq->dispatched + 4)) ||
bfq_asymmetric_scenario(bfqd, bfqq);
}
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 24ed26957367..55a7dc227dfb 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -54,7 +54,7 @@ static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
-static bool blkcg_debug_stats = false;
+bool blkcg_debug_stats = false;
static struct workqueue_struct *blkcg_punt_bio_wq;
static bool blkcg_policy_enabled(struct request_queue *q,
@@ -944,10 +944,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
dbytes, dios);
}
- if (!blkcg_debug_stats)
- goto next;
-
- if (atomic_read(&blkg->use_delay)) {
+ if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
has_stats = true;
off += scnprintf(buf+off, size-off,
" use_delay=%d delay_nsec=%llu",
@@ -967,7 +964,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
has_stats = true;
off += written;
}
-next:
+
if (has_stats) {
if (off < size - 1) {
off += scnprintf(buf+off, size-off, "\n");
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index d973c38ee4fd..0fff7b56df0e 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -917,6 +917,9 @@ static size_t iolatency_pd_stat(struct blkg_policy_data *pd, char *buf,
unsigned long long avg_lat;
unsigned long long cur_win;
+ if (!blkcg_debug_stats)
+ return 0;
+
if (iolat->ssd)
return iolatency_ssd_stat(iolat, buf, size);
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index cf22ab00fefb..126021fc3a11 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -61,15 +61,6 @@ static inline void blk_mq_sched_completed_request(struct request *rq, u64 now)
e->type->ops.completed_request(rq, now);
}
-static inline void blk_mq_sched_started_request(struct request *rq)
-{
- struct request_queue *q = rq->q;
- struct elevator_queue *e = q->elevator;
-
- if (e && e->type->ops.started_request)
- e->type->ops.started_request(rq);
-}
-
static inline void blk_mq_sched_requeue_request(struct request *rq)
{
struct request_queue *q = rq->q;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b038ec680e84..f78d3287dd82 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -669,8 +669,6 @@ void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
- blk_mq_sched_started_request(rq);
-
trace_block_rq_issue(q, rq);
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
@@ -1960,9 +1958,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq = blk_mq_get_request(q, bio, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
- if (bio->bi_opf & REQ_NOWAIT)
+
+ cookie = BLK_QC_T_NONE;
+ if (bio->bi_opf & REQ_NOWAIT_INLINE)
+ cookie = BLK_QC_T_EAGAIN;
+ else if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
- return BLK_QC_T_NONE;
+ return cookie;
}
trace_block_getrq(q, bio, bio->bi_opf);
diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
index 659ccb8b693f..3954c0dc1443 100644
--- a/block/blk-rq-qos.c
+++ b/block/blk-rq-qos.c
@@ -202,6 +202,7 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
return -1;
data->got_token = true;
+ smp_wmb();
list_del_init(&curr->entry);
wake_up_process(data->task);
return 1;
@@ -244,7 +245,9 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
return;
prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
+ has_sleeper = !wq_has_single_sleeper(&rqw->wait);
do {
+ /* The memory barrier in set_task_state saves us here. */
if (data.got_token)
break;
if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
@@ -255,12 +258,14 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
* which means we now have two. Put our local token
* and wake anyone else potentially waiting for one.
*/
+ smp_rmb();
if (data.got_token)
cleanup_cb(rqw, private_data);
break;
}
io_schedule();
- has_sleeper = false;
+ has_sleeper = true;
+ set_current_state(TASK_UNINTERRUPTIBLE);
} while (1);
finish_wait(&rqw->wait, &data.wq);
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 2ae348c101a0..2c1831207a8f 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -752,7 +752,8 @@ void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
* page (which might not be idential to the Linux PAGE_SIZE). Because
* of that they are not limited by our notion of "segment size".
*/
- q->limits.max_segment_size = UINT_MAX;
+ if (mask)
+ q->limits.max_segment_size = UINT_MAX;
}
EXPORT_SYMBOL(blk_queue_virt_boundary);
diff --git a/block/genhd.c b/block/genhd.c
index 97887e59f3b2..54f1f0d381f4 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1969,7 +1969,7 @@ static const struct attribute *disk_events_attrs[] = {
* The default polling interval can be specified by the kernel
* parameter block.events_dfl_poll_msecs which defaults to 0
* (disable). This can also be modified runtime by writing to
- * /sys/module/block/events_dfl_poll_msecs.
+ * /sys/module/block/parameters/events_dfl_poll_msecs.
*/
static int disk_events_set_dfl_poll_msecs(const char *val,
const struct kernel_param *kp)
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index d4551e33fa71..8569b79e8b58 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)node->node_data;
- if (idx > its->its_count) {
- dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
+ if (idx >= its->its_count) {
+ dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
idx, its->its_count);
return -ENXIO;
}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 28cffaaf9d82..f616b16c1f0b 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -232,13 +232,15 @@ int acpi_device_set_power(struct acpi_device *device, int state)
if (device->power.flags.power_resources)
result = acpi_power_transition(device, target_state);
} else {
+ int cur_state = device->power.state;
+
if (device->power.flags.power_resources) {
result = acpi_power_transition(device, ACPI_STATE_D0);
if (result)
goto end;
}
- if (device->power.state == ACPI_STATE_D0) {
+ if (cur_state == ACPI_STATE_D0) {
int psc;
/* Nothing to do here if _PSC is not present. */
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index c02fa27dd3f3..1413324982f0 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -1282,7 +1282,7 @@ static ssize_t hw_error_scrub_store(struct device *dev,
if (rc)
return rc;
- device_lock(dev);
+ nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
@@ -1299,7 +1299,7 @@ static ssize_t hw_error_scrub_store(struct device *dev,
break;
}
}
- device_unlock(dev);
+ nfit_device_unlock(dev);
if (rc)
return rc;
return size;
@@ -1319,7 +1319,7 @@ static ssize_t scrub_show(struct device *dev,
ssize_t rc = -ENXIO;
bool busy;
- device_lock(dev);
+ nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (!nd_desc) {
device_unlock(dev);
@@ -1339,7 +1339,7 @@ static ssize_t scrub_show(struct device *dev,
}
mutex_unlock(&acpi_desc->init_mutex);
- device_unlock(dev);
+ nfit_device_unlock(dev);
return rc;
}
@@ -1356,14 +1356,14 @@ static ssize_t scrub_store(struct device *dev,
if (val != 1)
return -EINVAL;
- device_lock(dev);
+ nfit_device_lock(dev);
nd_desc = dev_get_drvdata(dev);
if (nd_desc) {
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
rc = acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
}
- device_unlock(dev);
+ nfit_device_unlock(dev);
if (rc)
return rc;
return size;
@@ -1749,9 +1749,9 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
struct acpi_device *adev = data;
struct device *dev = &adev->dev;
- device_lock(dev->parent);
+ nfit_device_lock(dev->parent);
__acpi_nvdimm_notify(dev, event);
- device_unlock(dev->parent);
+ nfit_device_unlock(dev->parent);
}
static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
@@ -3457,8 +3457,8 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
struct device *dev = acpi_desc->dev;
/* Bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
- device_lock(dev);
- device_unlock(dev);
+ nfit_device_lock(dev);
+ nfit_device_unlock(dev);
/* Bounce the init_mutex to complete initial registration */
mutex_lock(&acpi_desc->init_mutex);
@@ -3602,8 +3602,8 @@ void acpi_nfit_shutdown(void *data)
* acpi_nfit_ars_rescan() submissions have had a chance to
* either submit or see ->cancel set.
*/
- device_lock(bus_dev);
- device_unlock(bus_dev);
+ nfit_device_lock(bus_dev);
+ nfit_device_unlock(bus_dev);
flush_workqueue(nfit_wq);
}
@@ -3746,9 +3746,9 @@ EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
{
- device_lock(&adev->dev);
+ nfit_device_lock(&adev->dev);
__acpi_nfit_notify(&adev->dev, adev->handle, event);
- device_unlock(&adev->dev);
+ nfit_device_unlock(&adev->dev);
}
static const struct acpi_device_id acpi_nfit_ids[] = {
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 6ee2b02af73e..24241941181c 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -312,6 +312,30 @@ static inline struct acpi_nfit_desc *to_acpi_desc(
return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
}
+#ifdef CONFIG_PROVE_LOCKING
+static inline void nfit_device_lock(struct device *dev)
+{
+ device_lock(dev);
+ mutex_lock(&dev->lockdep_mutex);
+}
+
+static inline void nfit_device_unlock(struct device *dev)
+{
+ mutex_unlock(&dev->lockdep_mutex);
+ device_unlock(dev);
+}
+#else
+static inline void nfit_device_lock(struct device *dev)
+{
+ device_lock(dev);
+}
+
+static inline void nfit_device_unlock(struct device *dev)
+{
+ device_unlock(dev);
+}
+#endif
+
const guid_t *to_nfit_uuid(enum nfit_uuids id);
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *nfit, acpi_size sz);
void acpi_nfit_shutdown(void *data);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0e28270b0fd8..aad6be5c0af0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2204,6 +2204,12 @@ int __init acpi_scan_init(void)
acpi_gpe_apply_masked_gpes();
acpi_update_all_gpes();
+ /*
+ * Although we call __add_memory() that is documented to require the
+ * device_hotplug_lock, it is not necessary here because this is an
+ * early code when userspace or any other code path cannot trigger
+ * hotplug/hotunplug operations.
+ */
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 38a59a630cd4..dc1c83eafc22 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2988,7 +2988,7 @@ static void binder_transaction(struct binder_proc *proc,
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
- if (target_node && target_proc == proc) {
+ if (target_node && target_proc->pid == proc->pid) {
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
@@ -3239,7 +3239,8 @@ static void binder_transaction(struct binder_proc *proc,
buffer_offset = off_start_offset;
off_end_offset = off_start_offset + tr->offsets_size;
sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
- sg_buf_end_offset = sg_buf_offset + extra_buffers_size;
+ sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
+ ALIGN(secctx_sz, sizeof(u64));
off_min = 0;
for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
buffer_offset += sizeof(binder_size_t)) {
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 72312ad2e142..9e9583a6bba9 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -338,6 +338,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
hpriv->phys[port] = NULL;
rc = 0;
break;
+ case -EPROBE_DEFER:
+ /* Do not complain yet */
+ break;
default:
dev_err(dev,
@@ -408,7 +411,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
hpriv->mmio = devm_ioremap_resource(dev,
platform_get_resource(pdev, IORESOURCE_MEM, 0));
if (IS_ERR(hpriv->mmio)) {
- dev_err(dev, "no mmio space\n");
rc = PTR_ERR(hpriv->mmio);
goto err_out;
}
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 173e6f2dd9af..eefda51f97d3 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
unsigned int ret;
struct rm_feature_desc *desc;
struct ata_taskfile tf;
- static const char cdb[] = { GPCMD_GET_CONFIGURATION,
+ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 302cf0ba1600..8c7a996d1f16 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -63,6 +63,7 @@
#include <asm/byteorder.h>
#include <linux/vmalloc.h>
#include <linux/jiffies.h>
+#include <linux/nospec.h>
#include "iphase.h"
#include "suni.h"
#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
}
if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
board = ia_cmds.status;
- if ((board < 0) || (board > iadev_count))
- board = 0;
+
+ if ((board < 0) || (board > iadev_count))
+ board = 0;
+ board = array_index_nospec(board, iadev_count + 1);
+
iadev = ia_dev[board];
switch (ia_cmds.cmd) {
case MEMDUMP:
diff --git a/drivers/base/core.c b/drivers/base/core.c
index da84a73f2ba6..636058bbf48a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1663,6 +1663,9 @@ void device_initialize(struct device *dev)
kobject_init(&dev->kobj, &device_ktype);
INIT_LIST_HEAD(&dev->dma_pools);
mutex_init(&dev->mutex);
+#ifdef CONFIG_PROVE_LOCKING
+ mutex_init(&dev->lockdep_mutex);
+#endif
lockdep_set_novalidate_class(&dev->mutex);
spin_lock_init(&dev->devres_lock);
INIT_LIST_HEAD(&dev->devres_head);
@@ -2211,6 +2214,24 @@ void put_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(put_device);
+bool kill_device(struct device *dev)
+{
+ /*
+ * Require the device lock and set the "dead" flag to guarantee that
+ * the update behavior is consistent with the other bitfields near
+ * it and that we cannot have an asynchronous probe routine trying
+ * to run while we are tearing out the bus/class/sysfs from
+ * underneath the device.
+ */
+ lockdep_assert_held(&dev->mutex);
+
+ if (dev->p->dead)
+ return false;
+ dev->p->dead = true;
+ return true;
+}
+EXPORT_SYMBOL_GPL(kill_device);
+
/**
* device_del - delete device from system.
* @dev: device.
@@ -2230,15 +2251,8 @@ void device_del(struct device *dev)
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
- /*
- * Hold the device lock and set the "dead" flag to guarantee that
- * the update behavior is consistent with the other bitfields near
- * it and that we cannot have an asynchronous probe routine trying
- * to run while we are tearing out the bus/class/sysfs from
- * underneath the device.
- */
device_lock(dev);
- dev->p->dead = true;
+ kill_device(dev);
device_unlock(dev);
/* Notify clients of device removal. This call must come
diff --git a/drivers/base/firmware_loader/firmware.h b/drivers/base/firmware_loader/firmware.h
index 7048a41973ed..7ecd590e67fe 100644
--- a/drivers/base/firmware_loader/firmware.h
+++ b/drivers/base/firmware_loader/firmware.h
@@ -141,8 +141,8 @@ int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed);
int fw_map_paged_buf(struct fw_priv *fw_priv);
#else
static inline void fw_free_paged_buf(struct fw_priv *fw_priv) {}
-int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
-int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
+static inline int fw_grow_paged_buf(struct fw_priv *fw_priv, int pages_needed) { return -ENXIO; }
+static inline int fw_map_paged_buf(struct fw_priv *fw_priv) { return -ENXIO; }
#endif
#endif /* __FIRMWARE_LOADER_H */
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 85f20e371f2f..bd7d3bb8b890 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1726,6 +1726,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* MSch: invalidate default_params */
default_params[drive].blocks = 0;
set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
+ /* Fall through */
case FDFMTEND:
case FDFLUSH:
/* invalidate the buffer track to force a reread */
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 90ebfcae0ce6..2b3103c30857 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -5417,7 +5417,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
unsigned int key_len;
char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
- SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
+ struct shash_desc *desc;
struct packet_info pi;
struct net_conf *nc;
int err, rv;
@@ -5430,6 +5430,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
memcpy(secret, nc->shared_secret, key_len);
rcu_read_unlock();
+ desc = kmalloc(sizeof(struct shash_desc) +
+ crypto_shash_descsize(connection->cram_hmac_tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ rv = -1;
+ goto fail;
+ }
desc->tfm = connection->cram_hmac_tfm;
rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
@@ -5571,7 +5578,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
kfree(peers_ch);
kfree(response);
kfree(right_response);
- shash_desc_zero(desc);
+ if (desc) {
+ shash_desc_zero(desc);
+ kfree(desc);
+ }
return rv;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 44c9985f352a..3036883fc9f8 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -924,6 +924,7 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct file *file;
struct inode *inode;
struct address_space *mapping;
+ struct block_device *claimed_bdev = NULL;
int lo_flags = 0;
int error;
loff_t size;
@@ -942,10 +943,11 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
* here to avoid changing device under exclusive owner.
*/
if (!(mode & FMODE_EXCL)) {
- bdgrab(bdev);
- error = blkdev_get(bdev, mode | FMODE_EXCL, loop_set_fd);
- if (error)
+ claimed_bdev = bd_start_claiming(bdev, loop_set_fd);
+ if (IS_ERR(claimed_bdev)) {
+ error = PTR_ERR(claimed_bdev);
goto out_putf;
+ }
}
error = mutex_lock_killable(&loop_ctl_mutex);
@@ -1015,15 +1017,15 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
mutex_unlock(&loop_ctl_mutex);
if (partscan)
loop_reread_partitions(lo, bdev);
- if (!(mode & FMODE_EXCL))
- blkdev_put(bdev, mode | FMODE_EXCL);
+ if (claimed_bdev)
+ bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
return 0;
out_unlock:
mutex_unlock(&loop_ctl_mutex);
out_bdev:
- if (!(mode & FMODE_EXCL))
- blkdev_put(bdev, mode | FMODE_EXCL);
+ if (claimed_bdev)
+ bd_abort_claiming(bdev, claimed_bdev, loop_set_fd);
out_putf:
fput(file);
out:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 9bcde2325893..e21d2ded732b 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1231,7 +1231,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
struct block_device *bdev)
{
sock_shutdown(nbd);
- kill_bdev(bdev);
+ __invalidate_device(bdev, true);
nbd_bdev_reset(bdev);
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index a55be205b91a..dbfe34664633 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -98,6 +98,9 @@ static int ath_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
ath = kzalloc(sizeof(*ath), GFP_KERNEL);
if (!ath)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 8905ad2edde7..ae2624fce913 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -406,6 +406,9 @@ static int bcm_open(struct hci_uart *hu)
bt_dev_dbg(hu->hdev, "hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
if (!bcm)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 207bae5e0d46..31f25153087d 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -391,6 +391,9 @@ static int intel_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
intel = kzalloc(sizeof(*intel), GFP_KERNEL);
if (!intel)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 8950e07889fe..85a30fb9177b 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -292,6 +292,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+/* Check the underlying device or tty has flow control support */
+bool hci_uart_has_flow_control(struct hci_uart *hu)
+{
+ /* serdev nodes check if the needed operations are present */
+ if (hu->serdev)
+ return true;
+
+ if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
+ return true;
+
+ return false;
+}
+
/* Flow control or un-flow control the device */
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
{
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index f98e5cc343b2..fbc3f7c3a5c7 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -59,6 +59,9 @@ static int mrvl_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
if (!mrvl)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 9a5c9c1f9484..82a0a3691a63 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -473,6 +473,9 @@ static int qca_open(struct hci_uart *hu)
BT_DBG("hu %p qca_open", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
if (!qca)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index f11af3912ce6..6ab631101019 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -104,6 +104,7 @@ int hci_uart_wait_until_sent(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
void hci_uart_init_work(struct work_struct *work);
void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+bool hci_uart_has_flow_control(struct hci_uart *hu);
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 5c39f20378b8..9ac6671bb514 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -567,8 +567,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
unsigned long long m;
m = hpets->hp_tick_freq + (dis >> 1);
- do_div(m, dis);
- return (unsigned long)m;
+ return div64_ul(m, dis);
}
static int
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 57204335c5f5..285e0b8f9a97 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -76,7 +76,7 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
struct ipmb_dev *ipmb_dev = to_ipmb_dev(file);
struct ipmb_request_elem *queue_elem;
struct ipmb_msg msg;
- ssize_t ret;
+ ssize_t ret = 0;
memset(&msg, 0, sizeof(msg));
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index d47ad10a35fe..4838c6a9f0f2 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -77,6 +77,18 @@ static int tpm_go_idle(struct tpm_chip *chip)
return chip->ops->go_idle(chip);
}
+static void tpm_clk_enable(struct tpm_chip *chip)
+{
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, true);
+}
+
+static void tpm_clk_disable(struct tpm_chip *chip)
+{
+ if (chip->ops->clk_enable)
+ chip->ops->clk_enable(chip, false);
+}
+
/**
* tpm_chip_start() - power on the TPM
* @chip: a TPM chip to use
@@ -89,13 +101,12 @@ int tpm_chip_start(struct tpm_chip *chip)
{
int ret;
- if (chip->ops->clk_enable)
- chip->ops->clk_enable(chip, true);
+ tpm_clk_enable(chip);
if (chip->locality == -1) {
ret = tpm_request_locality(chip);
if (ret) {
- chip->ops->clk_enable(chip, false);
+ tpm_clk_disable(chip);
return ret;
}
}
@@ -103,8 +114,7 @@ int tpm_chip_start(struct tpm_chip *chip)
ret = tpm_cmd_ready(chip);
if (ret) {
tpm_relinquish_locality(chip);
- if (chip->ops->clk_enable)
- chip->ops->clk_enable(chip, false);
+ tpm_clk_disable(chip);
return ret;
}
@@ -124,8 +134,7 @@ void tpm_chip_stop(struct tpm_chip *chip)
{
tpm_go_idle(chip);
tpm_relinquish_locality(chip);
- if (chip->ops->clk_enable)
- chip->ops->clk_enable(chip, false);
+ tpm_clk_disable(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_stop);
@@ -545,6 +554,20 @@ static int tpm_add_hwrng(struct tpm_chip *chip)
return hwrng_register(&chip->hwrng);
}
+static int tpm_get_pcr_allocation(struct tpm_chip *chip)
+{
+ int rc;
+
+ rc = (chip->flags & TPM_CHIP_FLAG_TPM2) ?
+ tpm2_get_pcr_allocation(chip) :
+ tpm1_get_pcr_allocation(chip);
+
+ if (rc > 0)
+ return -ENODEV;
+
+ return rc;
+}
+
/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
@@ -564,6 +587,12 @@ int tpm_chip_register(struct tpm_chip *chip)
if (rc)
return rc;
rc = tpm_auto_startup(chip);
+ if (rc) {
+ tpm_chip_stop(chip);
+ return rc;
+ }
+
+ rc = tpm_get_pcr_allocation(chip);
tpm_chip_stop(chip);
if (rc)
return rc;
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index e503ffc3aa39..a7fea3e0ca86 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -394,6 +394,7 @@ int tpm1_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf);
ssize_t tpm1_getcap(struct tpm_chip *chip, u32 subcap_id, cap_t *cap,
const char *desc, size_t min_cap_length);
int tpm1_get_random(struct tpm_chip *chip, u8 *out, size_t max);
+int tpm1_get_pcr_allocation(struct tpm_chip *chip);
unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
int tpm_pm_suspend(struct device *dev);
int tpm_pm_resume(struct device *dev);
@@ -449,6 +450,7 @@ int tpm2_unseal_trusted(struct tpm_chip *chip,
ssize_t tpm2_get_tpm_pt(struct tpm_chip *chip, u32 property_id,
u32 *value, const char *desc);
+ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip);
int tpm2_auto_startup(struct tpm_chip *chip);
void tpm2_shutdown(struct tpm_chip *chip, u16 shutdown_type);
unsigned long tpm2_calc_ordinal_duration(struct tpm_chip *chip, u32 ordinal);
diff --git a/drivers/char/tpm/tpm1-cmd.c b/drivers/char/tpm/tpm1-cmd.c
index faacbe1ffa1a..149e953ca369 100644
--- a/drivers/char/tpm/tpm1-cmd.c
+++ b/drivers/char/tpm/tpm1-cmd.c
@@ -699,18 +699,6 @@ int tpm1_auto_startup(struct tpm_chip *chip)
goto out;
}
- chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
- GFP_KERNEL);
- if (!chip->allocated_banks) {
- rc = -ENOMEM;
- goto out;
- }
-
- chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
- chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
- chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
- chip->nr_allocated_banks = 1;
-
return rc;
out:
if (rc > 0)
@@ -779,3 +767,27 @@ int tpm1_pm_suspend(struct tpm_chip *chip, u32 tpm_suspend_pcr)
return rc;
}
+/**
+ * tpm1_get_pcr_allocation() - initialize the allocated bank
+ * @chip: TPM chip to use.
+ *
+ * The function initializes the SHA1 allocated bank to extend PCR
+ *
+ * Return:
+ * * 0 on success,
+ * * < 0 on error.
+ */
+int tpm1_get_pcr_allocation(struct tpm_chip *chip)
+{
+ chip->allocated_banks = kcalloc(1, sizeof(*chip->allocated_banks),
+ GFP_KERNEL);
+ if (!chip->allocated_banks)
+ return -ENOMEM;
+
+ chip->allocated_banks[0].alg_id = TPM_ALG_SHA1;
+ chip->allocated_banks[0].digest_size = hash_digest_size[HASH_ALGO_SHA1];
+ chip->allocated_banks[0].crypto_id = HASH_ALGO_SHA1;
+ chip->nr_allocated_banks = 1;
+
+ return 0;
+}
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index d103545e4055..ba9acae83bff 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -840,7 +840,7 @@ struct tpm2_pcr_selection {
u8 pcr_select[3];
} __packed;
-static ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
+ssize_t tpm2_get_pcr_allocation(struct tpm_chip *chip)
{
struct tpm2_pcr_selection pcr_selection;
struct tpm_buf buf;
@@ -1040,10 +1040,6 @@ int tpm2_auto_startup(struct tpm_chip *chip)
goto out;
}
- rc = tpm2_get_pcr_allocation(chip);
- if (rc)
- goto out;
-
rc = tpm2_get_cc_attrs_tbl(chip);
out:
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 44db83a6d01c..44a46dcc0518 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -141,6 +141,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
continue;
div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
+ if (div > GENERATED_MAX_DIV + 1)
+ div = GENERATED_MAX_DIV + 1;
clk_generated_best_diff(req, parent, parent_rate, div,
&best_diff, &best_rate);
diff --git a/drivers/clk/mediatek/clk-mt8183.c b/drivers/clk/mediatek/clk-mt8183.c
index 1aa5f4059251..73b7e238eee7 100644
--- a/drivers/clk/mediatek/clk-mt8183.c
+++ b/drivers/clk/mediatek/clk-mt8183.c
@@ -25,9 +25,11 @@ static const struct mtk_fixed_clk top_fixed_clks[] = {
FIXED_CLK(CLK_TOP_UNIVP_192M, "univpll_192m", "univpll", 192000000),
};
+static const struct mtk_fixed_factor top_early_divs[] = {
+ FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1, 2),
+};
+
static const struct mtk_fixed_factor top_divs[] = {
- FACTOR(CLK_TOP_CLK13M, "clk13m", "clk26m", 1,
- 2),
FACTOR(CLK_TOP_F26M_CK_D2, "csw_f26m_ck_d2", "clk26m", 1,
2),
FACTOR(CLK_TOP_SYSPLL_CK, "syspll_ck", "mainpll", 1,
@@ -1148,37 +1150,57 @@ static int clk_mt8183_apmixed_probe(struct platform_device *pdev)
return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
}
+static struct clk_onecell_data *top_clk_data;
+
+static void clk_mt8183_top_init_early(struct device_node *node)
+{
+ int i;
+
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+
+ for (i = 0; i < CLK_TOP_NR_CLK; i++)
+ top_clk_data->clks[i] = ERR_PTR(-EPROBE_DEFER);
+
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ top_clk_data);
+
+ of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+}
+
+CLK_OF_DECLARE_DRIVER(mt8183_topckgen, "mediatek,mt8183-topckgen",
+ clk_mt8183_top_init_early);
+
static int clk_mt8183_top_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
void __iomem *base;
- struct clk_onecell_data *clk_data;
struct device_node *node = pdev->dev.of_node;
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
- clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
-
mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
- clk_data);
+ top_clk_data);
+
+ mtk_clk_register_factors(top_early_divs, ARRAY_SIZE(top_early_divs),
+ top_clk_data);
- mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes),
- node, &mt8183_clk_lock, clk_data);
+ node, &mt8183_clk_lock, top_clk_data);
mtk_clk_register_composites(top_aud_muxes, ARRAY_SIZE(top_aud_muxes),
- base, &mt8183_clk_lock, clk_data);
+ base, &mt8183_clk_lock, top_clk_data);
mtk_clk_register_composites(top_aud_divs, ARRAY_SIZE(top_aud_divs),
- base, &mt8183_clk_lock, clk_data);
+ base, &mt8183_clk_lock, top_clk_data);
mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks),
- clk_data);
+ top_clk_data);
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
}
static int clk_mt8183_infra_probe(struct platform_device *pdev)
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 52bbb9ce3807..d4075b130674 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -572,17 +572,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- unsigned long flags;
- u32 value;
dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
/* Reset module */
- spin_lock_irqsave(&priv->rmw_lock, flags);
- value = readl(priv->base + SRCR(reg));
- value |= bitmask;
- writel(value, priv->base + SRCR(reg));
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ writel(bitmask, priv->base + SRCR(reg));
/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
udelay(35);
@@ -599,16 +593,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- unsigned long flags;
- u32 value;
dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
- spin_lock_irqsave(&priv->rmw_lock, flags);
- value = readl(priv->base + SRCR(reg));
- value |= bitmask;
- writel(value, priv->base + SRCR(reg));
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ writel(bitmask, priv->base + SRCR(reg));
return 0;
}
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index 91d3d721c801..3c219af25100 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -3,6 +3,7 @@ config SPRD_COMMON_CLK
tristate "Clock support for Spreadtrum SoCs"
depends on ARCH_SPRD || COMPILE_TEST
default ARCH_SPRD
+ select REGMAP_MMIO
if SPRD_COMMON_CLK
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index 93f39a1d4c3d..c66f566a854c 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -131,10 +131,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
int err = -ENODEV;
cpu = of_get_cpu_node(policy->cpu, NULL);
+ if (!cpu)
+ goto out;
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
of_node_put(cpu);
- if (!cpu)
+ if (!max_freqp) {
+ err = -EINVAL;
goto out;
+ }
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
if (!dn)
@@ -171,16 +179,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
- if (!max_freqp) {
- err = -EINVAL;
- goto out_unmap_sdcpwr;
- }
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
pr_debug("max clock-frequency is at %u kHz\n", max_freq);
pr_debug("initializing frequency table\n");
@@ -199,9 +197,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
return 0;
-out_unmap_sdcpwr:
- iounmap(sdcpwr_mapbase);
-
out_unmap_sdcasr:
iounmap(sdcasr_mapbase);
out:
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 3dc1cbf849db..b785e936244f 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -957,7 +957,7 @@ static void set_broadcast_channel(struct fw_device *device, int generation)
device->bc_implemented = BC_IMPLEMENTED;
break;
}
- /* else fall through to case address error */
+ /* else, fall through - to case address error */
case RCODE_ADDRESS_ERROR:
device->bc_implemented = BC_UNIMPLEMENTED;
}
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 42566b7be8f5..df8a56a979b9 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -284,7 +284,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation,
if ((data[0] & bit) == (data[1] & bit))
continue;
- /* 1394-1995 IRM, fall through to retry. */
+ /* fall through - It's a 1394-1995 IRM, retry. */
default:
if (retry) {
retry--;
diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c
index 46bd22dde535..94a13fca8267 100644
--- a/drivers/firewire/core-topology.c
+++ b/drivers/firewire/core-topology.c
@@ -54,6 +54,7 @@ static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
switch (port_type) {
case SELFID_PORT_CHILD:
(*child_port_count)++;
+ /* fall through */
case SELFID_PORT_PARENT:
case SELFID_PORT_NCONN:
(*total_port_count)++;
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 53446e39a32c..ba8d3d0ef32c 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -157,7 +157,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
config ISCSI_IBFT_FIND
bool "iSCSI Boot Firmware Table Attributes"
- depends on X86 && ACPI
+ depends on X86 && ISCSI_IBFT
default n
help
This option enables the kernel to find the region of memory
@@ -168,7 +168,8 @@ config ISCSI_IBFT_FIND
config ISCSI_IBFT
tristate "iSCSI Boot Firmware Table Attributes module"
select ISCSI_BOOT_SYSFS
- depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+ select ISCSI_IBFT_FIND if X86
+ depends on ACPI && SCSI && SCSI_LOWLEVEL
default n
help
This option enables support for detection and exposing of iSCSI
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index ab3aa3983833..7e12cbdf957c 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -84,6 +84,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBFT_ISCSI_VERSION);
+#ifndef CONFIG_ISCSI_IBFT_FIND
+struct acpi_table_ibft *ibft_addr;
+#endif
+
struct ibft_hdr {
u8 id;
u8 version;
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 474f304ec109..cdd4f73b4869 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -40,6 +40,7 @@ config ALTERA_PR_IP_CORE_PLAT
config FPGA_MGR_ALTERA_PS_SPI
tristate "Altera FPGA Passive Serial over SPI"
depends on SPI
+ select BITREVERSE
help
FPGA manager driver support for Altera Arria/Cyclone/Stratix
using the passive serial interface over SPI.
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 3ee99d070608..f497003f119c 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -956,9 +956,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
- irqflags |= IRQF_TRIGGER_RISING;
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
- irqflags |= IRQF_TRIGGER_FALLING;
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
INIT_KFIFO(le->events);
@@ -1392,12 +1394,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
for (i = 0; i < chip->ngpio; i++) {
struct gpio_desc *desc = &gdev->descs[i];
- if (chip->get_direction && gpiochip_line_is_valid(chip, i))
- desc->flags = !chip->get_direction(chip, i) ?
- (1 << FLAG_IS_OUT) : 0;
- else
- desc->flags = !chip->direction_input ?
- (1 << FLAG_IS_OUT) : 0;
+ if (chip->get_direction && gpiochip_line_is_valid(chip, i)) {
+ if (!chip->get_direction(chip, i))
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ } else {
+ if (!chip->direction_input)
+ set_bit(FLAG_IS_OUT, &desc->flags);
+ else
+ clear_bit(FLAG_IS_OUT, &desc->flags);
+ }
}
acpi_gpiochip_add(chip);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1d80222587ad..3c88420e3497 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -394,7 +394,7 @@ config DRM_R128
config DRM_I810
tristate "Intel I810"
# !PREEMPT because of missing ioctl locking
- depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
+ depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
help
Choose this option if you have an Intel I810 graphics card. If M is
selected, the module will be called i810. AGP support is required
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1d3ee9c42f7e..6a5c96e519b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1140,7 +1140,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
adev->asic_type != CHIP_FIJI &&
adev->asic_type != CHIP_POLARIS10 &&
adev->asic_type != CHIP_POLARIS11 &&
- adev->asic_type != CHIP_POLARIS12) ?
+ adev->asic_type != CHIP_POLARIS12 &&
+ adev->asic_type != CHIP_VEGAM) ?
VI_BO_SIZE_ALIGN : 1;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index e069de8b54e6..4e4094f842e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1044,29 +1044,27 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
return r;
}
- fence = amdgpu_ctx_get_fence(ctx, entity,
- deps[i].handle);
+ fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
+ amdgpu_ctx_put(ctx);
+
+ if (IS_ERR(fence))
+ return PTR_ERR(fence);
+ else if (!fence)
+ continue;
if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
- struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+ struct drm_sched_fence *s_fence;
struct dma_fence *old = fence;
+ s_fence = to_drm_sched_fence(fence);
fence = dma_fence_get(&s_fence->scheduled);
dma_fence_put(old);
}
- if (IS_ERR(fence)) {
- r = PTR_ERR(fence);
- amdgpu_ctx_put(ctx);
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
+ dma_fence_put(fence);
+ if (r)
return r;
- } else if (fence) {
- r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
- true);
- dma_fence_put(fence);
- amdgpu_ctx_put(ctx);
- if (r)
- return r;
- }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 6d54decef7f8..5652cc72ed3a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+ data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f2e8b4238efd..5376328d3fd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -148,7 +148,7 @@ struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
int amdgpu_ras_enable = -1;
-uint amdgpu_ras_mask = 0xffffffff;
+uint amdgpu_ras_mask = 0xfffffffb;
/**
* DOC: vramlimit (int)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 8b7efd0a7028..2b546567853b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -159,12 +159,16 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
- if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
- pm = amdgpu_smu_get_current_power_state(adev);
- else if (adev->powerplay.pp_funcs->get_current_power_state)
+ if (is_support_sw_smu(adev)) {
+ if (adev->smu.ppt_funcs->get_current_power_state)
+ pm = amdgpu_smu_get_current_power_state(adev);
+ else
+ pm = adev->pm.dpm.user_state;
+ } else if (adev->powerplay.pp_funcs->get_current_power_state) {
pm = amdgpu_dpm_get_current_power_state(adev);
- else
+ } else {
pm = adev->pm.dpm.user_state;
+ }
return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
@@ -191,7 +195,11 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
goto fail;
}
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.user_state = state;
+ mutex_unlock(&adev->pm.mutex);
+ } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
} else {
mutex_lock(&adev->pm.mutex);
@@ -1734,7 +1742,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return -EINVAL;
if (is_support_sw_smu(adev)) {
- err = smu_get_current_rpm(&adev->smu, &speed);
+ err = smu_get_fan_speed_rpm(&adev->smu, &speed);
if (err)
return err;
} else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
@@ -1794,7 +1802,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
return -EINVAL;
if (is_support_sw_smu(adev)) {
- err = smu_get_current_rpm(&adev->smu, &rpm);
+ err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
if (err)
return err;
} else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
@@ -3067,28 +3075,44 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
- /* UVD clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "UVD: Disabled\n");
- } else {
- seq_printf(m, "UVD: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ if (adev->asic_type > CHIP_VEGA20) {
+ /* VCN clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
+ if (!value) {
+ seq_printf(m, "VCN: Disabled\n");
+ } else {
+ seq_printf(m, "VCN: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ }
}
- }
- seq_printf(m, "\n");
+ seq_printf(m, "\n");
+ } else {
+ /* UVD clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
+ if (!value) {
+ seq_printf(m, "UVD: Disabled\n");
+ } else {
+ seq_printf(m, "UVD: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
+ }
+ }
+ seq_printf(m, "\n");
- /* VCE clocks */
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
- if (!value) {
- seq_printf(m, "VCE: Disabled\n");
- } else {
- seq_printf(m, "VCE: Enabled\n");
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
- seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+ /* VCE clocks */
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
+ if (!value) {
+ seq_printf(m, "VCE: Disabled\n");
+ } else {
+ seq_printf(m, "VCE: Enabled\n");
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
+ seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
+ }
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 1a4412e47810..fac7aa2c244f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -136,11 +136,6 @@ static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
struct amdgpu_bo **bo_ptr);
-static void amdgpu_ras_self_test(struct amdgpu_device *adev)
-{
- /* TODO */
-}
-
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -689,6 +684,12 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
if (!obj)
return -EINVAL;
+ if (block_info.block_id != TA_RAS_BLOCK__UMC) {
+ DRM_INFO("%s error injection is not supported yet\n",
+ ras_block_str(info->head.block));
+ return -EINVAL;
+ }
+
ret = psp_ras_trigger_error(&adev->psp, &block_info);
if (ret)
DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
@@ -1557,6 +1558,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
amdgpu_ras_check_supported(adev, &con->hw_supported,
&con->supported);
+ if (!con->hw_supported) {
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+ return 0;
+ }
+
con->features = 0;
INIT_LIST_HEAD(&con->head);
/* Might need get this flag from vbios. */
@@ -1570,8 +1577,6 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (amdgpu_ras_fs_init(adev))
goto fs_out;
- amdgpu_ras_self_test(adev);
-
DRM_INFO("RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 1675d5837c3c..32773b7523d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -1441,6 +1441,15 @@ static void gfx_v10_0_init_compute_vmid(struct amdgpu_device *adev)
}
nv_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
+ acccess. These should be enabled by FW for target VMIDs. */
+ for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
+ }
}
static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
@@ -4611,6 +4620,7 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
TIME_STAMP_INT_ENABLE, 0);
WREG32(cp_int_cntl_reg, cp_int_cntl);
+ break;
case AMDGPU_IRQ_STATE_ENABLE:
cp_int_cntl = RREG32(cp_int_cntl_reg);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 0db9f488da7e..21187275dfd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1879,6 +1879,15 @@ static void gfx_v7_0_init_compute_vmid(struct amdgpu_device *adev)
}
cik_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
+ acccess. These should be enabled by FW for target VMIDs. */
+ for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
+ WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
+ WREG32(amdgpu_gds_reg_offset[i].gws, 0);
+ WREG32(amdgpu_gds_reg_offset[i].oa, 0);
+ }
}
static void gfx_v7_0_config_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5f401b41ef7c..751567f78567 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3706,6 +3706,15 @@ static void gfx_v8_0_init_compute_vmid(struct amdgpu_device *adev)
}
vi_srbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
+ acccess. These should be enabled by FW for target VMIDs. */
+ for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ WREG32(amdgpu_gds_reg_offset[i].mem_base, 0);
+ WREG32(amdgpu_gds_reg_offset[i].mem_size, 0);
+ WREG32(amdgpu_gds_reg_offset[i].gws, 0);
+ WREG32(amdgpu_gds_reg_offset[i].oa, 0);
+ }
}
static void gfx_v8_0_config_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index f4c4eea62526..1cf639a51178 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1918,6 +1918,15 @@ static void gfx_v9_0_init_compute_vmid(struct amdgpu_device *adev)
}
soc15_grbm_select(adev, 0, 0, 0, 0);
mutex_unlock(&adev->srbm_mutex);
+
+ /* Initialize all compute VMIDs to have no GDS, GWS, or OA
+ acccess. These should be enabled by FW for target VMIDs. */
+ for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) {
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_BASE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_VMID0_SIZE, 2 * i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_GWS_VMID0, i, 0);
+ WREG32_SOC15_OFFSET(GC, 0, mmGDS_OA_VMID0, i, 0);
+ }
}
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 988c0adaca91..1cfc2620b2dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -372,11 +372,8 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.gpu_addr));
offset = size;
- /* No signed header for now from firmware
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
- */
- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
}
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 792371442195..4e3fc284f6ac 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -668,6 +668,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
case CHIP_RAVEN:
pcache_info = raven_cache_info;
num_of_cache_types = ARRAY_SIZE(raven_cache_info);
+ break;
case CHIP_NAVI10:
pcache_info = navi10_cache_info;
num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 4f8a6ffc5775..9cd3eb2d90bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -429,7 +429,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- pr_debug("%s@%i\n", __func__, __LINE__);
case KFD_MQD_TYPE_COMPUTE:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_mqd;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
index c1a92c16535c..5cc3acccda2a 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
@@ -262,12 +262,12 @@ void dce110_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
+ dce_clk_mgr_construct(ctx, clk_mgr);
+
memcpy(clk_mgr->max_clks_by_state,
dce110_max_clks_by_state,
sizeof(dce110_max_clks_by_state));
- dce_clk_mgr_construct(ctx, clk_mgr);
-
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
index 778392c73187..7c746ef1e32e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
@@ -226,12 +226,12 @@ void dce112_clk_mgr_construct(
struct dc_context *ctx,
struct clk_mgr_internal *clk_mgr)
{
+ dce_clk_mgr_construct(ctx, clk_mgr);
+
memcpy(clk_mgr->max_clks_by_state,
dce112_max_clks_by_state,
sizeof(dce112_max_clks_by_state));
- dce_clk_mgr_construct(ctx, clk_mgr);
-
clk_mgr->regs = &disp_clk_regs;
clk_mgr->clk_mgr_shift = &disp_clk_shift;
clk_mgr->clk_mgr_mask = &disp_clk_mask;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
index 906310c3e2eb..5399b8cf6b75 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce120/dce120_clk_mgr.c
@@ -127,12 +127,12 @@ static struct clk_mgr_funcs dce120_funcs = {
void dce120_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr)
{
+ dce_clk_mgr_construct(ctx, clk_mgr);
+
memcpy(clk_mgr->max_clks_by_state,
dce120_max_clks_by_state,
sizeof(dce120_max_clks_by_state));
- dce_clk_mgr_construct(ctx, clk_mgr);
-
clk_mgr->base.dprefclk_khz = 600000;
clk_mgr->base.funcs = &dce120_funcs;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 08a774fc7b67..50bfb5921de0 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -301,6 +301,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr,
void dcn2_init_clocks(struct clk_mgr *clk_mgr)
{
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.p_state_change_support = true;
}
void dcn2_enable_pme_wa(struct clk_mgr *clk_mgr_base)
@@ -331,6 +333,7 @@ void dcn20_clk_mgr_construct(
struct dccg *dccg)
{
clk_mgr->base.ctx = ctx;
+ clk_mgr->pp_smu = pp_smu;
clk_mgr->base.funcs = &dcn2_funcs;
clk_mgr->regs = &clk_mgr_regs;
clk_mgr->clk_mgr_shift = &clk_mgr_shift;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 4ef4dc63e221..fa20201eef3a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -502,8 +502,10 @@ void dc_stream_set_static_screen_events(struct dc *dc,
static void destruct(struct dc *dc)
{
- dc_release_state(dc->current_state);
- dc->current_state = NULL;
+ if (dc->current_state) {
+ dc_release_state(dc->current_state);
+ dc->current_state = NULL;
+ }
destroy_links(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 8dbf759eba45..355b4ba12796 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -532,6 +532,7 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
uint32_t read_dpcd_retry_cnt = 10;
enum dc_status status = DC_ERROR_UNEXPECTED;
int i;
+ union max_down_spread max_down_spread = { {0} };
// Read DPCD 00101h to find out the number of lanes currently set
for (i = 0; i < read_dpcd_retry_cnt; i++) {
@@ -553,8 +554,6 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
msleep(8);
}
- ASSERT(status == DC_OK);
-
// Read DPCD 00100h to find if standard link rates are set
core_link_read_dpcd(link, DP_LINK_BW_SET,
&link_bw_set, sizeof(link_bw_set));
@@ -576,6 +575,12 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link)
link->cur_link_settings.link_rate = link_bw_set;
link->cur_link_settings.use_link_rate_set = false;
}
+ // Read DPCD 00003h to find the max down spread.
+ core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
+ &max_down_spread.raw, sizeof(max_down_spread));
+ link->cur_link_settings.link_spread =
+ max_down_spread.bits.MAX_DOWN_SPREAD ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
}
static bool detect_dp(
@@ -717,13 +722,6 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
return false;
}
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
- /* On detect, we want to make sure current link settings are
- * up to date, especially if link was powered on by GOP.
- */
- read_edp_current_link_settings_on_detect(link);
- }
-
prev_sink = link->local_sink;
if (prev_sink != NULL) {
dc_sink_retain(prev_sink);
@@ -765,6 +763,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
}
case SIGNAL_TYPE_EDP: {
+ read_edp_current_link_settings_on_detect(link);
detect_edp_sink_caps(link);
sink_caps.transaction_type =
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -2329,7 +2328,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
if (core_dc->current_state->res_ctx.
pipe_ctx[i].stream->link
- == link)
+ == link) {
/* DMCU -1 for all controller id values,
* therefore +1 here
*/
@@ -2337,6 +2336,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
core_dc->current_state->
res_ctx.pipe_ctx[i].stream_res.tg->inst +
1;
+
+ /* Disable brightness ramping when the display is blanked
+ * as it can hang the DMCU
+ */
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
+ frame_ramp = 0;
+ }
}
}
abm->funcs->set_backlight_level_pwm(
@@ -2984,8 +2990,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
/* Retrain with preferred link settings only relevant for
* DP signal type
+ * Check for non-DP signal or if passive dongle present
*/
- if (!dc_is_dp_signal(link->connector_signal))
+ if (!dc_is_dp_signal(link->connector_signal) ||
+ link->dongle_max_pix_clk > 0)
return;
for (i = 0; i < MAX_PIPES; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 056be4c34a98..2c7aaed907b9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2230,18 +2230,25 @@ static void get_active_converter_info(
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
ddc_service_set_dongle_type(link->ddc,
link->dpcd_caps.dongle_type);
+ link->dpcd_caps.is_branch_dev = false;
return;
}
/* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
- link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
+ if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
+ link->dpcd_caps.is_branch_dev = false;
+ }
+
+ else {
+ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
+ }
switch (ds_port.fields.PORT_TYPE) {
case DOWNSTREAM_VGA:
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
break;
- case DOWNSTREAM_DVI_HDMI:
- /* At this point we don't know is it DVI or HDMI,
+ case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS:
+ /* At this point we don't know is it DVI or HDMI or DP++,
* assume DVI.*/
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
break;
@@ -2258,6 +2265,10 @@ static void get_active_converter_info(
det_caps, sizeof(det_caps));
switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
+ /*Handle DP case as DONGLE_NONE*/
+ case DOWN_STREAM_DETAILED_DP:
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ break;
case DOWN_STREAM_DETAILED_VGA:
link->dpcd_caps.dongle_type =
DISPLAY_DONGLE_DP_VGA_CONVERTER;
@@ -2267,6 +2278,8 @@ static void get_active_converter_info(
DISPLAY_DONGLE_DP_DVI_CONVERTER;
break;
case DOWN_STREAM_DETAILED_HDMI:
+ case DOWN_STREAM_DETAILED_DP_PLUS_PLUS:
+ /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/
link->dpcd_caps.dongle_type =
DISPLAY_DONGLE_DP_HDMI_CONVERTER;
@@ -2282,14 +2295,18 @@ static void get_active_converter_info(
link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
- hdmi_caps.bits.YCrCr422_PASS_THROUGH;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
- hdmi_caps.bits.YCrCr420_PASS_THROUGH;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
- hdmi_caps.bits.YCrCr422_CONVERSION;
- link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
- hdmi_caps.bits.YCrCr420_CONVERSION;
+ /*YCBCR capability only for HDMI case*/
+ if (port_caps->bits.DWN_STRM_PORTX_TYPE
+ == DOWN_STREAM_DETAILED_HDMI) {
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through =
+ hdmi_caps.bits.YCrCr422_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through =
+ hdmi_caps.bits.YCrCr420_PASS_THROUGH;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter =
+ hdmi_caps.bits.YCrCr422_CONVERSION;
+ link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter =
+ hdmi_caps.bits.YCrCr420_CONVERSION;
+ }
link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
translate_dpcd_max_bpc(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 51a78283a86d..2ceaab4fb5de 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -258,7 +258,7 @@ bool resource_construct(
* PORT_CONNECTIVITY == 1 (as instructed by HW team).
*/
update_num_audio(&straps, &num_audio, &pool->audio_support);
- for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+ for (i = 0; i < caps->num_audio; i++) {
struct audio *aud = create_funcs->create_audio(ctx, i);
if (aud == NULL) {
@@ -1669,6 +1669,12 @@ static struct audio *find_first_free_audio(
return pool->audios[i];
}
}
+
+ /* use engine id to find free audio */
+ if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
+ return pool->audios[id];
+ }
+
/*not found the matching one, first come first serve*/
for (i = 0; i < pool->audio_count; i++) {
if (res_ctx->is_audio_acquired[i] == false) {
@@ -1833,6 +1839,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
pix_clk /= 2;
if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
case COLOR_DEPTH_888:
normalized_pix_clk = pix_clk;
break;
@@ -1979,7 +1986,7 @@ enum dc_status resource_map_pool_resources(
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
- stream->audio_info.mode_count) {
+ stream->audio_info.mode_count && stream->audio_info.flags.all) {
pipe_ctx->stream_res.audio = find_first_free_audio(
&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index af7f8be230f7..352862370390 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -612,7 +612,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
pipe_ctx->stream->dmdata_address = attr->address;
- if (pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) {
+ if (pipe_ctx->stream_res.stream_enc &&
+ pipe_ctx->stream_res.stream_enc->funcs->set_dynamic_metadata != NULL) {
if (pipe_ctx->stream->dmdata_address.quad_part != 0) {
/* if using dynamic meta, don't set up generic infopackets */
pipe_ctx->stream_res.encoder_info_frame.hdrsmd.valid = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index f8903bcabe49..58bd131d5b48 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -239,6 +239,10 @@ static void dmcu_set_backlight_level(
s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
REG_WRITE(BIOS_SCRATCH_2, s2);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
+ 0, 1, 80000);
}
static void dce_abm_init(struct abm *abm)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 858a58856ebd..fafb4b470140 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -965,11 +965,17 @@ void hwss_edp_backlight_control(
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
/* notify audio driver for audio modes of monitor */
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *core_dc;
struct pp_smu_funcs *pp_smu = NULL;
- struct clk_mgr *clk_mgr = core_dc->clk_mgr;
+ struct clk_mgr *clk_mgr;
unsigned int i, num_audio = 1;
+ if (!pipe_ctx->stream)
+ return;
+
+ core_dc = pipe_ctx->stream->ctx->dc;
+ clk_mgr = core_dc->clk_mgr;
+
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true)
return;
@@ -999,9 +1005,15 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
{
- struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc *dc;
struct pp_smu_funcs *pp_smu = NULL;
- struct clk_mgr *clk_mgr = dc->clk_mgr;
+ struct clk_mgr *clk_mgr;
+
+ if (!pipe_ctx || !pipe_ctx->stream)
+ return;
+
+ dc = pipe_ctx->stream->ctx->dc;
+ clk_mgr = dc->clk_mgr;
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
return;
@@ -1009,6 +1021,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
+ pipe_ctx->stream_res.audio->enabled = false;
+
if (dc->res_pool->pp_smu)
pp_smu = dc->res_pool->pp_smu;
@@ -1039,8 +1053,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
/* dal_audio_disable_azalia_audio_jack_presence(stream->audio,
* stream->stream_engine_id);
*/
- if (pipe_ctx->stream_res.audio)
- pipe_ctx->stream_res.audio->enabled = false;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index e50a696fcb5d..2118ea21d7e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1195,16 +1195,7 @@ static void dcn10_init_hw(struct dc *dc)
* everything down.
*/
if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct hubp *hubp = dc->res_pool->hubps[i];
- struct dpp *dpp = dc->res_pool->dpps[i];
-
- hubp->funcs->hubp_init(hubp);
- dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
- plane_atomic_power_down(dc, dpp, hubp);
- }
-
- apply_DEGVIDCN10_253_wa(dc);
+ dc->hwss.init_pipes(dc, dc->current_state);
}
for (i = 0; i < dc->res_pool->audio_count; i++) {
@@ -1375,10 +1366,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
return result;
}
-
-
-
-
static bool
dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
const struct dc_stream_state *stream)
@@ -2516,6 +2503,12 @@ static void dcn10_apply_ctx_for_surface(
if (removed_pipe[i])
dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (removed_pipe[i]) {
+ dc->hwss.optimize_bandwidth(dc, context);
+ break;
+ }
+
if (dc->hwseq->wa.DEGVIDCN10_254)
hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 1a20461c2937..a12530a3ab9c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -508,7 +508,7 @@ static const struct resource_caps rv2_res_cap = {
.num_audio = 3,
.num_stream_encoder = 3,
.num_pll = 3,
- .num_ddc = 3,
+ .num_ddc = 4,
};
static const struct dc_plane_cap plane_cap = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 51a3dfe97f0e..31aa6ee5cd5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -102,14 +102,19 @@ void dccg2_init(struct dccg *dccg)
switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) {
case 6:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1);
+ /* Fall through */
case 5:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1);
+ /* Fall through */
case 4:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1);
+ /* Fall through */
case 3:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1);
+ /* Fall through */
case 2:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1);
+ /* Fall through */
case 1:
REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index ece6e136437b..6e2dbd03f9bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -337,6 +337,7 @@ static enum dcn_hubbub_page_table_block_size page_table_block_size_to_hw(unsigne
break;
default:
ASSERT(false);
+ block_size = page_table_block_size;
break;
}
@@ -366,25 +367,24 @@ int hubbub2_init_dchub_sys_ctx(struct hubbub *hubbub,
struct dcn_vmid_page_table_config phys_config;
REG_SET(DCN_VM_FB_LOCATION_BASE, 0,
- FB_BASE, pa_config->system_aperture.fb_base);
+ FB_BASE, pa_config->system_aperture.fb_base >> 24);
REG_SET(DCN_VM_FB_LOCATION_TOP, 0,
- FB_TOP, pa_config->system_aperture.fb_top);
+ FB_TOP, pa_config->system_aperture.fb_top >> 24);
REG_SET(DCN_VM_FB_OFFSET, 0,
- FB_OFFSET, pa_config->system_aperture.fb_offset);
+ FB_OFFSET, pa_config->system_aperture.fb_offset >> 24);
REG_SET(DCN_VM_AGP_BOT, 0,
- AGP_BOT, pa_config->system_aperture.agp_bot);
+ AGP_BOT, pa_config->system_aperture.agp_bot >> 24);
REG_SET(DCN_VM_AGP_TOP, 0,
- AGP_TOP, pa_config->system_aperture.agp_top);
+ AGP_TOP, pa_config->system_aperture.agp_top >> 24);
REG_SET(DCN_VM_AGP_BASE, 0,
- AGP_BASE, pa_config->system_aperture.agp_base);
+ AGP_BASE, pa_config->system_aperture.agp_base >> 24);
if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) {
- phys_config.depth = 1;
- phys_config.block_size = 4096;
phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12;
phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12;
phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr;
-
+ phys_config.depth = 0;
+ phys_config.block_size = 0;
// Init VMID 0 based on PA config
dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 0b84a322b8a2..d810c8940129 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1153,8 +1153,8 @@ void dcn20_enable_plane(
apt.sys_default.quad_part = 0;
- apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.start_addr;
- apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.end_addr;
+ apt.sys_low.quad_part = dc->vm_pa_config.system_aperture.start_addr;
+ apt.sys_high.quad_part = dc->vm_pa_config.system_aperture.end_addr;
// Program system aperture settings
pipe_ctx->plane_res.hubp->funcs->hubp_set_vm_system_aperture_settings(pipe_ctx->plane_res.hubp, &apt);
@@ -1242,6 +1242,8 @@ void dcn20_pipe_control_lock_global(
CRTC_STATE_VACTIVE);
pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
CRTC_STATE_VBLANK);
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
+ CRTC_STATE_VACTIVE);
pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
pipe->stream_res.tg);
}
@@ -1263,6 +1265,17 @@ void dcn20_pipe_control_lock(
if (pipe->plane_state != NULL)
flip_immediate = pipe->plane_state->flip_immediate;
+ if (flip_immediate && lock) {
+ while (pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->plane_res.hubp)) {
+ udelay(1);
+ }
+
+ if (pipe->bottom_pipe != NULL)
+ while (pipe->bottom_pipe->plane_res.hubp->funcs->hubp_is_flip_pending(pipe->bottom_pipe->plane_res.hubp)) {
+ udelay(1);
+ }
+ }
+
/* In flip immediate and pipe splitting case, we need to use GSL
* for synchronization. Only do setup on locking and on flip type change.
*/
@@ -1740,8 +1753,11 @@ static void dcn20_reset_back_end_for_pipe(
else if (pipe_ctx->stream_res.audio) {
dc->hwss.disable_audio_stream(pipe_ctx, FREE_ACQUIRED_RESOURCE);
}
-
}
+#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
+ else if (pipe_ctx->stream_res.dsc)
+ dp_set_dsc_enable(pipe_ctx, false);
+#endif
/* by upper caller loop, parent pipe: pipe0, will be reset last.
* back end share by all pipes and will be disable only when disable
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index 26a66ccf6e72..1ae973962d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -535,7 +535,7 @@ void dcn20_timing_generator_init(struct optc *optc1)
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
- optc1->min_h_sync_width = 8;
+ optc1->min_h_sync_width = 4;// Minimum HSYNC = 8 pixels asked By HW in the first place for no actual reason. Oculus Rift S will not light up with 8 as it's hsyncWidth is 6. Changing it to 4 to fix that issue.
optc1->min_v_sync_width = 1;
optc1->comb_opp_id = 0xf;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index d200bc3cec71..b949e202d6cb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -2643,6 +2643,10 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_
if (dc->bb_overrides.min_dcfclk_mhz > 0)
min_dcfclk = dc->bb_overrides.min_dcfclk_mhz;
+ else
+ // Accounting for SOC/DCF relationship, we can go as high as
+ // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506.
+ min_dcfclk = 507;
for (i = 0; i < num_states; i++) {
int min_fclk_required_by_uclk;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
index 27679ef6ebe8..96c263223315 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.c
@@ -23,6 +23,8 @@
*
*/
+#include <linux/delay.h>
+
#include "dcn20_vmid.h"
#include "reg_helper.h"
@@ -36,6 +38,38 @@
#define FN(reg_name, field_name) \
vmid->shifts->field_name, vmid->masks->field_name
+static void dcn20_wait_for_vmid_ready(struct dcn20_vmid *vmid)
+{
+ /* According the hardware spec, we need to poll for the lowest
+ * bit of PAGE_TABLE_BASE_ADDR_LO32 = 1 any time a GPUVM
+ * context is updated. We can't use REG_WAIT here since we
+ * don't have a seperate field to wait on.
+ *
+ * TODO: Confirm timeout / poll interval with hardware team
+ */
+
+ int max_times = 10000;
+ int delay_us = 5;
+ int i;
+
+ for (i = 0; i < max_times; ++i) {
+ uint32_t entry_lo32;
+
+ REG_GET(PAGE_TABLE_BASE_ADDR_LO32,
+ VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32,
+ &entry_lo32);
+
+ if (entry_lo32 & 0x1)
+ return;
+
+ udelay(delay_us);
+ }
+
+ /* VM setup timed out */
+ DC_LOG_WARNING("Timeout while waiting for GPUVM context update\n");
+ ASSERT(0);
+}
+
void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_config *config)
{
REG_SET(PAGE_TABLE_START_ADDR_HI32, 0,
@@ -54,6 +88,9 @@ void dcn20_vmid_setup(struct dcn20_vmid *vmid, const struct dcn_vmid_page_table_
REG_SET(PAGE_TABLE_BASE_ADDR_HI32, 0,
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_HI32, (config->page_table_base_addr >> 32) & 0xFFFFFFFF);
+ /* Note: per hardware spec PAGE_TABLE_BASE_ADDR_LO32 must be programmed last in sequence */
REG_SET(PAGE_TABLE_BASE_ADDR_LO32, 0,
VM_CONTEXT0_PAGE_DIRECTORY_ENTRY_LO32, config->page_table_base_addr & 0xFFFFFFFF);
+
+ dcn20_wait_for_vmid_ready(vmid);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
index 67089765780b..340ef4d41ebd 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/drm_dsc_dc.c
@@ -377,6 +377,12 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+ /* As per DSC spec v1.2a recommendation: */
+ if (vdsc_cfg->native_420)
+ vdsc_cfg->second_line_offset_adj = 512;
+ else
+ vdsc_cfg->second_line_offset_adj = 0;
+
return 0;
}
EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c89393c19232..a148ffde8b12 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -212,7 +212,7 @@ struct resource_pool {
struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
unsigned int clk_src_count;
- struct audio *audios[MAX_PIPES];
+ struct audio *audios[MAX_AUDIOS];
unsigned int audio_count;
struct audio_support audio_support;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 959f5b654611..9502478c4a1b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -61,8 +61,8 @@ enum dcn_hubbub_page_table_depth {
};
enum dcn_hubbub_page_table_block_size {
- DCN_PAGE_TABLE_BLOCK_SIZE_4KB,
- DCN_PAGE_TABLE_BLOCK_SIZE_64KB
+ DCN_PAGE_TABLE_BLOCK_SIZE_4KB = 0,
+ DCN_PAGE_TABLE_BLOCK_SIZE_64KB = 4,
};
struct dcn_hubbub_phys_addr_config {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 8759ec03aede..f82365e2d03c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -34,6 +34,7 @@
* Data types shared between different Virtual HW blocks
******************************************************************************/
+#define MAX_AUDIOS 7
#define MAX_PIPES 6
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
#define MAX_DWB_PIPES 1
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 1c66166d0a94..2c90d1b46c8b 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -43,7 +43,7 @@ enum dpcd_revision {
enum dpcd_downstream_port_type {
DOWNSTREAM_DP = 0,
DOWNSTREAM_VGA,
- DOWNSTREAM_DVI_HDMI,
+ DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS,/* DVI, HDMI, DP++ */
DOWNSTREAM_NONDDC /* has no EDID (TV,CV) */
};
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index 9f661bf96ed0..5b1ebb7f995a 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -123,6 +123,7 @@ enum amd_pp_sensors {
AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK,
AMDGPU_PP_SENSOR_MIN_FAN_RPM,
AMDGPU_PP_SENSOR_MAX_FAN_RPM,
+ AMDGPU_PP_SENSOR_VCN_POWER_STATE,
};
enum amd_pp_task {
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index f1565c448de5..0685a3388e38 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -137,12 +137,37 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
{
int ret = 0, clk_id = 0;
uint32_t param = 0;
+ uint32_t clock_limit;
if (!min && !max)
return -EINVAL;
- if (!smu_clk_dpm_is_enabled(smu, clk_type))
+ if (!smu_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
return 0;
+ }
mutex_lock(&smu->mutex);
clk_id = smu_clk_get_index(smu, clk_type);
@@ -281,7 +306,8 @@ int smu_get_power_num_states(struct smu_context *smu,
/* not support power state */
memset(state_info, 0, sizeof(struct pp_states_info));
- state_info->nums = 0;
+ state_info->nums = 1;
+ state_info->states[0] = POWER_STATE_TYPE_DEFAULT;
return 0;
}
@@ -312,6 +338,10 @@ int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
*(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
*size = 4;
break;
+ case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT) ? 1 : 0;
+ *size = 4;
+ break;
default:
ret = -EINVAL;
break;
@@ -698,6 +728,12 @@ static int smu_sw_init(void *handle)
return ret;
}
+ ret = smu_register_irq_handler(smu);
+ if (ret) {
+ pr_err("Failed to register smc irq handler!\n");
+ return ret;
+ }
+
return 0;
}
@@ -707,6 +743,9 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
+ kfree(smu->irq_source);
+ smu->irq_source = NULL;
+
ret = smu_smc_table_sw_fini(smu);
if (ret) {
pr_err("Failed to sw fini smc table!\n");
@@ -1063,10 +1102,6 @@ static int smu_hw_init(void *handle)
if (ret)
goto failed;
- ret = smu_register_irq_handler(smu);
- if (ret)
- goto failed;
-
if (!smu->pm_enabled)
adev->pm.dpm_enabled = false;
else
@@ -1096,9 +1131,6 @@ static int smu_hw_fini(void *handle)
kfree(table_context->overdrive_table);
table_context->overdrive_table = NULL;
- kfree(smu->irq_source);
- smu->irq_source = NULL;
-
ret = smu_fini_fb_allocations(smu);
if (ret)
return ret;
@@ -1349,13 +1381,49 @@ static int smu_enable_umd_pstate(void *handle,
return 0;
}
+static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
+ smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
+ smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+ return ret;
+}
+
int smu_adjust_power_state_dynamic(struct smu_context *smu,
enum amd_dpm_forced_level level,
bool skip_display_settings)
{
int ret = 0;
int index = 0;
- uint32_t sclk_mask, mclk_mask, soc_mask;
long workload;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
@@ -1386,39 +1454,10 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
if (smu_dpm_ctx->dpm_level != level) {
- switch (level) {
- case AMD_DPM_FORCED_LEVEL_HIGH:
- ret = smu_force_dpm_limit_value(smu, true);
- break;
- case AMD_DPM_FORCED_LEVEL_LOW:
- ret = smu_force_dpm_limit_value(smu, false);
- break;
-
- case AMD_DPM_FORCED_LEVEL_AUTO:
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- ret = smu_unforce_dpm_levels(smu);
- break;
-
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
- case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- ret = smu_get_profiling_clk_mask(smu, level,
- &sclk_mask,
- &mclk_mask,
- &soc_mask);
- if (ret)
- return ret;
- smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask);
- smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask);
- smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
- break;
-
- case AMD_DPM_FORCED_LEVEL_MANUAL:
- case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
- default:
- break;
+ ret = smu_asic_set_performance_level(smu, level);
+ if (ret) {
+ ret = smu_default_set_performance_level(smu, level);
}
-
if (!ret)
smu_dpm_ctx->dpm_level = level;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index e32ae9d3373c..18e780f566fa 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -1111,6 +1111,7 @@ static int smu10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
void *value, int *size)
{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
uint32_t sclk, mclk;
int ret = 0;
@@ -1132,6 +1133,10 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_GPU_TEMP:
*((uint32_t *)value) = smu10_thermal_get_temperature(hwmgr);
break;
+ case AMDGPU_PP_SENSOR_VCN_POWER_STATE:
+ *(uint32_t *)value = smu10_data->vcn_power_gated ? 0 : 1;
+ *size = 4;
+ break;
default:
ret = -EINVAL;
break;
@@ -1175,18 +1180,22 @@ static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
{
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
if (bgate) {
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerDownVcn, 0);
+ smu10_data->vcn_power_gated = true;
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PowerUpVcn, 0);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
+ smu10_data->vcn_power_gated = false;
}
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 1af992fb0bde..208e6711d506 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -429,7 +429,6 @@ struct smu_table_context
struct smu_table *tables;
uint32_t table_count;
struct smu_table memory_pool;
- uint16_t software_shutdown_temp;
uint8_t thermal_controller_type;
uint16_t TDPODLimit;
@@ -613,6 +612,7 @@ struct pptable_funcs {
int (*tables_init)(struct smu_context *smu, struct smu_table *tables);
int (*set_thermal_fan_table)(struct smu_context *smu);
int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
+ int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed);
int (*set_watermarks_table)(struct smu_context *smu, void *watermarks,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*get_current_clk_freq_by_table)(struct smu_context *smu,
@@ -621,6 +621,7 @@ struct pptable_funcs {
int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range);
int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states);
int (*set_default_od_settings)(struct smu_context *smu, bool initialize);
+ int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
};
struct smu_funcs
@@ -685,7 +686,6 @@ struct smu_funcs
int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
int (*conv_power_profile_to_pplib_workload)(int power_profile);
- int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed);
uint32_t (*get_fan_control_mode)(struct smu_context *smu);
int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
@@ -751,8 +751,6 @@ struct smu_funcs
((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
#define smu_set_default_od_settings(smu, initialize) \
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-#define smu_get_current_rpm(smu, speed) \
- ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0)
#define smu_set_fan_speed_rpm(smu, speed) \
((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
#define smu_send_smc_msg(smu, msg) \
@@ -841,6 +839,8 @@ struct smu_funcs
((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0)
#define smu_set_fan_speed_percent(smu, speed) \
((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
+#define smu_get_fan_speed_rpm(smu, speed) \
+ ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0)
#define smu_msg_get_index(smu, msg) \
((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
@@ -918,6 +918,9 @@ struct smu_funcs
((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0)
#define smu_baco_reset(smu) \
((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0)
+#define smu_asic_set_performance_level(smu, level) \
+ ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL);
+
extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
uint16_t *size, uint8_t *frev, uint8_t *crev,
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 2dae0ae0829e..cc0a3b2256af 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -23,6 +23,7 @@
#include "pp_debug.h"
#include <linux/firmware.h>
+#include <linux/pci.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
@@ -577,28 +578,20 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
{
int ret = 0;
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
- if (enable && power_gate->uvd_gated) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
- if (ret)
- return ret;
- }
- power_gate->uvd_gated = false;
+ if (enable) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+ if (ret)
+ return ret;
} else {
- if (!enable && !power_gate->uvd_gated) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
- if (ret)
- return ret;
- }
- power_gate->uvd_gated = true;
- }
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ if (ret)
+ return ret;
}
- return 0;
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, enable);
+
+ return ret;
}
static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
@@ -626,11 +619,26 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
return ret;
}
+static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
+{
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+ DpmDescriptor_t *dpm_desc = NULL;
+ uint32_t clk_index = 0;
+
+ clk_index = smu_clk_get_index(smu, clk_type);
+ dpm_desc = &pptable->DpmDescriptor[clk_index];
+
+ /* 0 - Fine grained DPM, 1 - Discrete DPM */
+ return dpm_desc->SnapToDiscrete == 0 ? true : false;
+}
+
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
int i, size = 0, ret = 0;
uint32_t cur_value = 0, value = 0, count = 0;
+ uint32_t freq_values[3] = {0};
+ uint32_t mark_index = 0;
switch (clk_type) {
case SMU_GFXCLK:
@@ -643,22 +651,42 @@ static int navi10_print_clk_levels(struct smu_context *smu,
ret = smu_get_current_clk_freq(smu, clk_type, &cur_value);
if (ret)
return size;
+
/* 10KHz -> MHz */
cur_value = cur_value / 100;
- size += sprintf(buf, "current clk: %uMhz\n", cur_value);
-
ret = smu_get_dpm_level_count(smu, clk_type, &count);
if (ret)
return size;
- for (i = 0; i < count; i++) {
- ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+ for (i = 0; i < count; i++) {
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ if (ret)
+ return size;
+
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ }
+ } else {
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
+ if (ret)
+ return size;
+ ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
if (ret)
return size;
- size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
- cur_value == value ? "*" : "");
+ freq_values[1] = cur_value;
+ mark_index = cur_value == freq_values[0] ? 0 :
+ cur_value == freq_values[2] ? 2 : 1;
+ if (mark_index != 1)
+ freq_values[1] = (freq_values[0] + freq_values[2]) / 2;
+
+ for (i = 0; i < 3; i++) {
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i, freq_values[i],
+ i == mark_index ? "*" : "");
+ }
+
}
break;
default:
@@ -919,12 +947,13 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
return !!(feature_enabled & SMC_DPM_FEATURE);
}
-static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value)
+static int navi10_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
{
SmuMetrics_t metrics;
int ret = 0;
- if (!value)
+ if (!speed)
return -EINVAL;
memset(&metrics, 0, sizeof(metrics));
@@ -934,7 +963,7 @@ static int navi10_get_fan_speed(struct smu_context *smu, uint16_t *value)
if (ret)
return ret;
- *value = metrics.CurrFanSpeed;
+ *speed = metrics.CurrFanSpeed;
return ret;
}
@@ -944,10 +973,10 @@ static int navi10_get_fan_speed_percent(struct smu_context *smu,
{
int ret = 0;
uint32_t percent = 0;
- uint16_t current_rpm;
+ uint32_t current_rpm;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- ret = navi10_get_fan_speed(smu, &current_rpm);
+ ret = navi10_get_fan_speed_rpm(smu, &current_rpm);
if (ret)
return ret;
@@ -1530,6 +1559,76 @@ static int navi10_set_ppfeature_status(struct smu_context *smu,
return 0;
}
+static int navi10_set_peak_clock_by_device(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+ uint32_t sclk_freq = 0, uclk_freq = 0;
+ uint32_t uclk_level = 0;
+
+ switch (adev->pdev->revision) {
+ case 0xf0: /* XTX */
+ case 0xc0:
+ sclk_freq = NAVI10_PEAK_SCLK_XTX;
+ break;
+ case 0xf1: /* XT */
+ case 0xc1:
+ sclk_freq = NAVI10_PEAK_SCLK_XT;
+ break;
+ default: /* XL */
+ sclk_freq = NAVI10_PEAK_SCLK_XL;
+ break;
+ }
+
+ ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level);
+ if (ret)
+ return ret;
+ ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq);
+ if (ret)
+ return ret;
+
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ if (ret)
+ return ret;
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = navi10_set_peak_clock_by_device(smu);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int navi10_get_thermal_temperature_range(struct smu_context *smu,
+ struct smu_temperature_range *range)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_11_0_powerplay_table *powerplay_table = table_context->power_play_table;
+
+ if (!range || !powerplay_table)
+ return -EINVAL;
+
+ /* The unit is temperature */
+ range->min = 0;
+ range->max = powerplay_table->software_shutdown_temp;
+
+ return 0;
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.tables_init = navi10_tables_init,
.alloc_dpm_context = navi10_allocate_dpm_context,
@@ -1557,6 +1656,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.unforce_dpm_levels = navi10_unforce_dpm_levels,
.is_dpm_running = navi10_is_dpm_running,
.get_fan_speed_percent = navi10_get_fan_speed_percent,
+ .get_fan_speed_rpm = navi10_get_fan_speed_rpm,
.get_power_profile_mode = navi10_get_power_profile_mode,
.set_power_profile_mode = navi10_set_power_profile_mode,
.get_profiling_clk_mask = navi10_get_profiling_clk_mask,
@@ -1565,6 +1665,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.get_uclk_dpm_states = navi10_get_uclk_dpm_states,
.get_ppfeature_status = navi10_get_ppfeature_status,
.set_ppfeature_status = navi10_set_ppfeature_status,
+ .set_performance_level = navi10_set_performance_level,
+ .get_thermal_temperature_range = navi10_get_thermal_temperature_range,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
index 957288e22f47..620ff17c2fef 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h
@@ -23,6 +23,10 @@
#ifndef __NAVI10_PPT_H__
#define __NAVI10_PPT_H__
+#define NAVI10_PEAK_SCLK_XTX (1830)
+#define NAVI10_PEAK_SCLK_XT (1755)
+#define NAVI10_PEAK_SCLK_XL (1625)
+
extern void navi10_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 95c7c4dae523..ac5b26228e75 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -1124,10 +1124,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
struct smu_temperature_range *range)
{
struct amdgpu_device *adev = smu->adev;
- int low = SMU_THERMAL_MINIMUM_ALERT_TEMP *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int low = SMU_THERMAL_MINIMUM_ALERT_TEMP;
+ int high = SMU_THERMAL_MAXIMUM_ALERT_TEMP;
uint32_t val;
if (!range)
@@ -1138,6 +1136,9 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
if (high > range->max)
high = range->max;
+ low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP, range->min);
+ high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP, range->max);
+
if (low > high)
return -EINVAL;
@@ -1146,8 +1147,8 @@ static int smu_v11_0_set_thermal_range(struct smu_context *smu,
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
- val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
@@ -1186,7 +1187,10 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
if (!smu->pm_enabled)
return ret;
+
ret = smu_get_thermal_temperature_range(smu, &range);
+ if (ret)
+ return ret;
if (smu->smu_table.thermal_controller_type) {
ret = smu_v11_0_set_thermal_range(smu, &range);
@@ -1202,15 +1206,17 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu)
return ret;
}
- adev->pm.dpm.thermal.min_temp = range.min;
- adev->pm.dpm.thermal.max_temp = range.max;
- adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max;
- adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min;
- adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max;
- adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max;
- adev->pm.dpm.thermal.min_mem_temp = range.mem_min;
- adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max;
- adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max;
+ adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_edge_emergency_temp = range.edge_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_hotspot_temp = range.hotspot_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_hotspot_crit_temp = range.hotspot_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_hotspot_emergency_temp = range.hotspot_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_mem_temp = range.mem_min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_mem_crit_temp = range.mem_crit_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_mem_emergency_temp = range.mem_emergency_max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.min_temp = range.min * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ adev->pm.dpm.thermal.max_temp = range.max * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
return ret;
}
@@ -1371,23 +1377,6 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
return ret;
}
-static int smu_v11_0_get_current_rpm(struct smu_context *smu,
- uint32_t *current_rpm)
-{
- int ret;
-
- ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
-
- if (ret) {
- pr_err("Attempt to get current RPM from SMC Failed!\n");
- return ret;
- }
-
- smu_read_smc_arg(smu, current_rpm);
-
- return 0;
-}
-
static uint32_t
smu_v11_0_get_fan_control_mode(struct smu_context *smu)
{
@@ -1773,7 +1762,6 @@ static const struct smu_funcs smu_v11_0_funcs = {
.set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
.display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
.set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
- .get_current_rpm = smu_v11_0_get_current_rpm,
.get_fan_control_mode = smu_v11_0_get_fan_control_mode,
.set_fan_control_mode = smu_v11_0_set_fan_control_mode,
.set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index bb9bb09cfc7a..dd6fd1c8bf24 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -450,7 +450,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu)
memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
sizeof(PPTable_t));
- table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
@@ -3015,6 +3014,23 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu)
return ret;
}
+static int vega20_get_fan_speed_rpm(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
+
+ if (ret) {
+ pr_err("Attempt to get current RPM from SMC Failed!\n");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, speed);
+
+ return 0;
+}
+
static int vega20_get_fan_speed_percent(struct smu_context *smu,
uint32_t *speed)
{
@@ -3022,7 +3038,7 @@ static int vega20_get_fan_speed_percent(struct smu_context *smu,
uint32_t current_rpm = 0, percent = 0;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- ret = smu_get_current_rpm(smu, &current_rpm);
+ ret = vega20_get_fan_speed_rpm(smu, &current_rpm);
if (ret)
return ret;
@@ -3217,35 +3233,24 @@ static int vega20_set_watermarks_table(struct smu_context *smu,
return 0;
}
-static const struct smu_temperature_range vega20_thermal_policy[] =
-{
- {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
- { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
-};
-
static int vega20_get_thermal_temperature_range(struct smu_context *smu,
struct smu_temperature_range *range)
{
-
+ struct smu_table_context *table_context = &smu->smu_table;
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = table_context->power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- if (!range)
+ if (!range || !powerplay_table)
return -EINVAL;
- memcpy(range, &vega20_thermal_policy[0], sizeof(struct smu_temperature_range));
-
- range->max = pptable->TedgeLimit *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->hotspot_crit_max = pptable->ThotspotLimit *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->mem_crit_max = pptable->ThbmLimit *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
- range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM)*
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ /* The unit is temperature */
+ range->min = 0;
+ range->max = powerplay_table->usSoftwareShutdownTemp;
+ range->edge_emergency_max = (pptable->TedgeLimit + CTF_OFFSET_EDGE);
+ range->hotspot_crit_max = pptable->ThotspotLimit;
+ range->hotspot_emergency_max = (pptable->ThotspotLimit + CTF_OFFSET_HOTSPOT);
+ range->mem_crit_max = pptable->ThbmLimit;
+ range->mem_emergency_max = (pptable->ThbmLimit + CTF_OFFSET_HBM);
return 0;
@@ -3293,6 +3298,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.is_dpm_running = vega20_is_dpm_running,
.set_thermal_fan_table = vega20_set_thermal_fan_table,
.get_fan_speed_percent = vega20_get_fan_speed_percent,
+ .get_fan_speed_rpm = vega20_get_fan_speed_rpm,
.set_watermarks_table = vega20_set_watermarks_table,
.get_thermal_temperature_range = vega20_get_thermal_temperature_range
};
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index bc19dbd531ef..359030d5d818 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -191,6 +191,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.fb_base = bochs->fb_base;
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
+ bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index ee777469293a..e4e22bbae2a7 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -48,6 +48,7 @@ config DRM_DUMB_VGA_DAC
config DRM_LVDS_ENCODER
tristate "Transparent parallel to LVDS encoder support"
depends on OF
+ select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
Support for transparent parallel to LVDS encoders that don't require
@@ -116,9 +117,10 @@ config DRM_THINE_THC63LVD1024
config DRM_TOSHIBA_TC358764
tristate "TC358764 DSI/LVDS bridge"
- depends on DRM && DRM_PANEL
depends on OF
select DRM_MIPI_DSI
+ select DRM_KMS_HELPER
+ select DRM_PANEL
help
Toshiba TC358764 DSI/LVDS bridge driver.
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 410572f14257..e1dafb0cc5e2 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -254,7 +254,6 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
struct drm_device *dev = client->dev;
struct drm_client_buffer *buffer;
struct drm_gem_object *obj;
- void *vaddr;
int ret;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
@@ -281,6 +280,36 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
buffer->gem = obj;
+ return buffer;
+
+err_delete:
+ drm_client_buffer_delete(buffer);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * drm_client_buffer_vmap - Map DRM client buffer into address space
+ * @buffer: DRM client buffer
+ *
+ * This function maps a client buffer into kernel address space. If the
+ * buffer is already mapped, it returns the mapping's address.
+ *
+ * Client buffer mappings are not ref'counted. Each call to
+ * drm_client_buffer_vmap() should be followed by a call to
+ * drm_client_buffer_vunmap(); or the client buffer should be mapped
+ * throughout its lifetime.
+ *
+ * Returns:
+ * The mapped memory's address
+ */
+void *drm_client_buffer_vmap(struct drm_client_buffer *buffer)
+{
+ void *vaddr;
+
+ if (buffer->vaddr)
+ return buffer->vaddr;
+
/*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
@@ -289,21 +318,30 @@ drm_client_buffer_create(struct drm_client_dev *client, u32 width, u32 height, u
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
- vaddr = drm_gem_vmap(obj);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto err_delete;
- }
+ vaddr = drm_gem_vmap(buffer->gem);
+ if (IS_ERR(vaddr))
+ return vaddr;
buffer->vaddr = vaddr;
- return buffer;
-
-err_delete:
- drm_client_buffer_delete(buffer);
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_client_buffer_vmap);
- return ERR_PTR(ret);
+/**
+ * drm_client_buffer_vunmap - Unmap DRM client buffer
+ * @buffer: DRM client buffer
+ *
+ * This function removes a client buffer's memory mapping. Calling this
+ * function is only required by clients that manage their buffer mappings
+ * by themselves.
+ */
+void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
+{
+ drm_gem_vunmap(buffer->gem, buffer->vaddr);
+ buffer->vaddr = NULL;
}
+EXPORT_SYMBOL(drm_client_buffer_vunmap);
static void drm_client_buffer_rmfb(struct drm_client_buffer *buffer)
{
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 56d36779d213..c8922b7cac09 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -859,7 +859,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
* simple XOR between the two handle the addition nicely.
*/
cmdline = &connector->cmdline_mode;
- if (cmdline->specified) {
+ if (cmdline->specified && cmdline->rotation_reflection) {
unsigned int cmdline_rest, panel_rest;
unsigned int cmdline_rot, panel_rot;
unsigned int sum_rot, sum_rest;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1984e5c54d58..a7ba5b4902d6 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -403,6 +403,7 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
struct drm_clip_rect *clip = &helper->dirty_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
+ void *vaddr;
spin_lock_irqsave(&helper->dirty_lock, flags);
clip_copy = *clip;
@@ -412,10 +413,20 @@ static void drm_fb_helper_dirty_work(struct work_struct *work)
/* call dirty callback only when it has been really touched */
if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
+
/* Generic fbdev uses a shadow buffer */
- if (helper->buffer)
+ if (helper->buffer) {
+ vaddr = drm_client_buffer_vmap(helper->buffer);
+ if (IS_ERR(vaddr))
+ return;
drm_fb_helper_dirty_blit_real(helper, &clip_copy);
- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ }
+ if (helper->fb->funcs->dirty)
+ helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
+ &clip_copy, 1);
+
+ if (helper->buffer)
+ drm_client_buffer_vunmap(helper->buffer);
}
}
@@ -604,6 +615,16 @@ void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
}
EXPORT_SYMBOL(drm_fb_helper_unlink_fbi);
+static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_framebuffer *fb = fb_helper->fb;
+
+ return dev->mode_config.prefer_shadow_fbdev ||
+ dev->mode_config.prefer_shadow ||
+ fb->funcs->dirty;
+}
+
static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
u32 width, u32 height)
{
@@ -611,7 +632,7 @@ static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
struct drm_clip_rect *clip = &helper->dirty_clip;
unsigned long flags;
- if (!helper->fb->funcs->dirty)
+ if (!drm_fbdev_use_shadow_fb(helper))
return;
spin_lock_irqsave(&helper->dirty_lock, flags);
@@ -2178,6 +2199,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
struct drm_framebuffer *fb;
struct fb_info *fbi;
u32 format;
+ void *vaddr;
DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
sizes->surface_width, sizes->surface_height,
@@ -2200,16 +2222,10 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbops = &drm_fbdev_fb_ops;
fbi->screen_size = fb->height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size;
- fbi->screen_buffer = buffer->vaddr;
- /* Shamelessly leak the physical address to user-space */
-#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
- if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
- fbi->fix.smem_start =
- page_to_phys(virt_to_page(fbi->screen_buffer));
-#endif
+
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
- if (fb->funcs->dirty) {
+ if (drm_fbdev_use_shadow_fb(fb_helper)) {
struct fb_ops *fbops;
void *shadow;
@@ -2231,6 +2247,19 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fbdefio = &drm_fbdev_defio;
fb_deferred_io_init(fbi);
+ } else {
+ /* buffer is mapped for HW framebuffer */
+ vaddr = drm_client_buffer_vmap(fb_helper->buffer);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ fbi->screen_buffer = vaddr;
+ /* Shamelessly leak the physical address to user-space */
+#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
+ if (drm_leak_fbdev_smem && fbi->fix.smem_start == 0)
+ fbi->fix.smem_start =
+ page_to_phys(virt_to_page(fbi->screen_buffer));
+#endif
}
return 0;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 0b72468e8131..57564318ceea 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -835,7 +835,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
struct drm_device *dev = fb->dev;
struct drm_atomic_state *state;
struct drm_plane *plane;
- struct drm_connector *conn;
+ struct drm_connector *conn __maybe_unused;
struct drm_connector_state *conn_state;
int i, ret;
unsigned plane_mask;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 74a5739df506..80fcd5dc1558 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1686,7 +1686,7 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
*
* Additionals options can be provided following the mode, using a comma to
* separate each option. Valid options can be found in
- * Documentation/fb/modedb.txt.
+ * Documentation/fb/modedb.rst.
*
* The intermediate drm_cmdline_mode structure is required to store additional
* options from the command line modline like the force-enable/disable flag.
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 60ce4a8ad9e1..6f7d3b3b3628 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,6 +2,7 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST)
+ depends on MMU
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select SND_SOC_HDMI_CODEC if SND_SOC
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index a594ab7be2c0..164d914cbe9a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -44,7 +44,7 @@ static unsigned int fimc_mask = 0xc;
module_param_named(fimc_devs, fimc_mask, uint, 0644);
MODULE_PARM_DESC(fimc_devs, "Alias mask for assigning FIMC devices to Exynos DRM");
-#define get_fimc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_fimc_context(dev) dev_get_drvdata(dev)
enum {
FIMC_CLK_LCLK,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 50904eee96f7..2a3382d43bc9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -267,7 +267,7 @@ static inline void g2d_hw_reset(struct g2d_data *g2d)
static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
- struct g2d_cmdlist_node *node = g2d->cmdlist_node;
+ struct g2d_cmdlist_node *node;
int nr;
int ret;
struct g2d_buf_info *buf_info;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 1e4b21c49a06..1c524db9570f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -58,7 +58,7 @@
#define GSC_COEF_DEPTH 3
#define GSC_AUTOSUSPEND_DELAY 2000
-#define get_gsc_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define get_gsc_context(dev) dev_get_drvdata(dev)
#define gsc_read(offset) readl(ctx->regs + (offset))
#define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 9af096479e1c..b24ba948b725 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -94,12 +94,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
do {
cpu_relax();
- } while (retry > 1 &&
+ } while (--retry > 1 &&
scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
do {
cpu_relax();
scaler_write(1, SCALER_INT_EN);
- } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+ } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
return retry ? 0 : -EIO;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 91355c2ea8a5..8cace65f50ce 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -16,7 +16,6 @@ subdir-ccflags-y := -Wall -Wextra
subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
subdir-ccflags-y += $(call cc-disable-warning, type-limits)
subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
-subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough)
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
# clang warnings
subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index c4710889cb32..3ef4e9f573cf 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -765,7 +765,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
}
if (bdb->version >= 226) {
- u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
+ u32 wakeup_time = psr->psr2_tp2_tp3_wakeup_time;
wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
switch (wakeup_time) {
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 753ac3165061..7b908e10d32e 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -178,6 +178,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
clpchgroup = (sa->deburst * deinterleave / num_channels) << i;
bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
+ bi->num_qgv_points = qi.num_points;
+
for (j = 0; j < qi.num_points; j++) {
const struct intel_qgv_point *sp = &qi.points[j];
int ct, bw;
@@ -195,7 +197,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv)
bi->deratedbw[j] = min(maxdebw,
bw * 9 / 10); /* 90% */
- DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%d\n",
+ DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
@@ -211,14 +213,17 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
{
int i;
- /* Did we initialize the bw limits successfully? */
- if (dev_priv->max_bw[0].num_planes == 0)
- return UINT_MAX;
-
for (i = 0; i < ARRAY_SIZE(dev_priv->max_bw); i++) {
const struct intel_bw_info *bi =
&dev_priv->max_bw[i];
+ /*
+ * Pcode will not expose all QGV points when
+ * SAGV is forced to off/min/med/max.
+ */
+ if (qgv_point >= bi->num_qgv_points)
+ return UINT_MAX;
+
if (num_planes >= bi->num_planes)
return bi->deratedbw[qgv_point];
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 8993ab283562..0d19bbd08122 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -2240,6 +2240,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(2 * 96000, min_cdclk);
/*
+ * "For DP audio configuration, cdclk frequency shall be set to
+ * meet the following requirements:
+ * DP Link Frequency(MHz) | Cdclk frequency(MHz)
+ * 270 | 320 or higher
+ * 162 | 200 or higher"
+ */
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
+ min_cdclk = max(crtc_state->port_clock, min_cdclk);
+
+ /*
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
* than 320000KHz.
*/
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8592a7d422de..592b92782fab 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1839,7 +1839,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
- trace_intel_pipe_enable(dev_priv, pipe);
+ trace_intel_pipe_enable(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
@@ -1880,7 +1880,7 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- trace_intel_pipe_disable(dev_priv, pipe);
+ trace_intel_pipe_disable(crtc);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
@@ -12042,7 +12042,7 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
case INTEL_OUTPUT_DDI:
if (WARN_ON(!HAS_DDI(to_i915(dev))))
break;
- /* else: fall through */
+ /* else, fall through */
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index c93ad512014c..2d1939db108f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -438,16 +438,23 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
#define ICL_AUX_PW_TO_CH(pw_idx) \
((pw_idx) - ICL_PW_CTL_IDX_AUX_A + AUX_CH_A)
+#define ICL_TBT_AUX_PW_TO_CH(pw_idx) \
+ ((pw_idx) - ICL_PW_CTL_IDX_AUX_TBT1 + AUX_CH_C)
+
static void
icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- enum aux_ch aux_ch = ICL_AUX_PW_TO_CH(power_well->desc->hsw.idx);
+ int pw_idx = power_well->desc->hsw.idx;
+ bool is_tbt = power_well->desc->hsw.is_tc_tbt;
+ enum aux_ch aux_ch;
u32 val;
+ aux_ch = is_tbt ? ICL_TBT_AUX_PW_TO_CH(pw_idx) :
+ ICL_AUX_PW_TO_CH(pw_idx);
val = I915_READ(DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
- if (power_well->desc->hsw.is_tc_tbt)
+ if (is_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 4336df46fe78..d0fc34826771 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -231,6 +231,7 @@ static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
switch (lane_info) {
default:
MISSING_CASE(lane_info);
+ /* fall through */
case 1:
case 2:
case 4:
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 2f4894e9a03d..5ddbe71ab423 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -478,13 +478,13 @@ struct psr_table {
/* TP wake up time in multiple of 100 */
u16 tp1_wakeup_time;
u16 tp2_tp3_wakeup_time;
-
- /* PSR2 TP2/TP3 wakeup time for 16 panels */
- u32 psr2_tp2_tp3_wakeup_time;
} __packed;
struct bdb_psr {
struct psr_table psr_table[16];
+
+ /* PSR2 TP2/TP3 wakeup time for 16 panels */
+ u32 psr2_tp2_tp3_wakeup_time;
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 391621ee3cbb..39a661927d8e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -341,7 +341,7 @@ err:
*/
if (!i915_terminally_wedged(i915))
return VM_FAULT_SIGBUS;
- /* else: fall through */
+ /* else, fall through */
case -EAGAIN:
/*
* EAGAIN means the gpu is hung and we'll wait for the error
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b36ad269f4ea..65eb430cedba 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -268,7 +268,7 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
switch (type) {
default:
MISSING_CASE(type);
- /* fallthrough to use PAGE_KERNEL anyway */
+ /* fallthrough - to use PAGE_KERNEL anyway */
case I915_MAP_WB:
pgprot = PAGE_KERNEL;
break;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 05011d4a3b88..914b5d4112bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -253,14 +253,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
+ if (i915_gem_init_hw(i915))
+ goto err_wedged;
+
/*
* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- intel_gt_resume(i915);
-
- if (i915_gem_init_hw(i915))
+ if (intel_gt_resume(i915))
goto err_wedged;
intel_uc_resume(i915);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 528b61678334..2caa594322bc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -664,7 +664,15 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
for_each_sgt_page(page, sgt_iter, pages) {
if (obj->mm.dirty)
- set_page_dirty(page);
+ /*
+ * As this may not be anonymous memory (e.g. shmem)
+ * but exist on a real mapping, we have to lock
+ * the page in order to dirty it -- holding
+ * the page reference is not sufficient to
+ * prevent the inode from being truncated.
+ * Play safe and take the lock.
+ */
+ set_page_dirty_lock(page);
mark_page_accessed(page);
put_page(page);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 2c454f227c2e..23120901c55f 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -126,6 +126,7 @@ static void intel_context_retire(struct i915_active *active)
if (ce->state)
__context_unpin_state(ce->state);
+ intel_ring_unpin(ce->ring);
intel_context_put(ce);
}
@@ -160,27 +161,35 @@ int intel_context_active_acquire(struct intel_context *ce, unsigned long flags)
intel_context_get(ce);
+ err = intel_ring_pin(ce->ring);
+ if (err)
+ goto err_put;
+
if (!ce->state)
return 0;
err = __context_pin_state(ce->state, flags);
- if (err) {
- i915_active_cancel(&ce->active);
- intel_context_put(ce);
- return err;
- }
+ if (err)
+ goto err_ring;
/* Preallocate tracking nodes */
if (!i915_gem_context_is_kernel(ce->gem_context)) {
err = i915_active_acquire_preallocate_barrier(&ce->active,
ce->engine);
- if (err) {
- i915_active_release(&ce->active);
- return err;
- }
+ if (err)
+ goto err_state;
}
return 0;
+
+err_state:
+ __context_unpin_state(ce->state);
+err_ring:
+ intel_ring_unpin(ce->ring);
+err_put:
+ intel_context_put(ce);
+ i915_active_cancel(&ce->active);
+ return err;
}
void intel_context_active_release(struct intel_context *ce)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 7fd33e81c2d9..f25632c9b292 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -969,9 +969,14 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ unsigned int slice = fls(sseu->slice_mask) - 1;
+ unsigned int subslice;
u32 mcr_s_ss_select;
- u32 slice = fls(sseu->slice_mask);
- u32 subslice = fls(sseu->subslice_mask[slice]);
+
+ GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask));
+ subslice = fls(sseu->subslice_mask[slice]);
+ GEM_BUG_ON(!subslice);
+ subslice--;
if (IS_GEN(dev_priv, 10))
mcr_s_ss_select = GEN8_MCR_SLICE(slice) |
@@ -1471,6 +1476,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
struct i915_gpu_error * const error = &engine->i915->gpu_error;
struct i915_request *rq;
intel_wakeref_t wakeref;
+ unsigned long flags;
if (header) {
va_list ap;
@@ -1490,10 +1496,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
i915_reset_engine_count(error, engine),
i915_reset_count(error));
- rcu_read_lock();
-
drm_printf(m, "\tRequests:\n");
+ spin_lock_irqsave(&engine->active.lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
@@ -1513,8 +1518,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
print_request_ring(m, rq);
}
-
- rcu_read_unlock();
+ spin_unlock_irqrestore(&engine->active.lock, flags);
wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm);
if (wakeref) {
@@ -1672,7 +1676,6 @@ struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine)
{
struct i915_request *request, *active = NULL;
- unsigned long flags;
/*
* We are called by the error capture, reset and to dump engine
@@ -1685,7 +1688,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
- spin_lock_irqsave(&engine->active.lock, flags);
+ lockdep_assert_held(&engine->active.lock);
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
@@ -1700,7 +1703,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
active = request;
break;
}
- spin_unlock_irqrestore(&engine->active.lock, flags);
return active;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 2ce00d3dc42a..ae5b6baf6dff 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
{
intel_wakeref_init(&engine->wakeref);
}
-
-int intel_engines_resume(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = 0;
-
- intel_gt_pm_get(i915);
- for_each_engine(engine, i915, id) {
- intel_engine_pm_get(engine);
- engine->serial++; /* kernel context lost */
- err = engine->resume(engine);
- intel_engine_pm_put(engine);
- if (err) {
- dev_err(i915->drm.dev,
- "Failed to restart %s (%d)\n",
- engine->name, err);
- break;
- }
- }
- intel_gt_pm_put(i915);
-
- return err;
-}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index b326cd993d60..a11c893f64c6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -7,16 +7,22 @@
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
+#include "intel_engine_types.h"
+#include "intel_wakeref.h"
+
struct drm_i915_private;
-struct intel_engine_cs;
void intel_engine_pm_get(struct intel_engine_cs *engine);
void intel_engine_pm_put(struct intel_engine_cs *engine);
+static inline bool
+intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+{
+ return intel_wakeref_get_if_active(&engine->wakeref);
+}
+
void intel_engine_park(struct intel_engine_cs *engine);
void intel_engine_init__pm(struct intel_engine_cs *engine);
-int intel_engines_resume(struct drm_i915_private *i915);
-
#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 868b220214f8..43e975a26016 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -70,6 +70,18 @@ struct intel_ring {
struct list_head request_list;
struct list_head active_link;
+ /*
+ * As we have two types of rings, one global to the engine used
+ * by ringbuffer submission and those that are exclusive to a
+ * context used by execlists, we have to play safe and allow
+ * atomic updates to the pin_count. However, the actual pinning
+ * of the context is either done during initialisation for
+ * ringbuffer submission or serialised as part of the context
+ * pinning for execlists, and so we do not need a mutex ourselves
+ * to serialise intel_ring_pin/intel_ring_unpin.
+ */
+ atomic_t pin_count;
+
u32 head;
u32 tail;
u32 emit;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 7b5967751762..9f8f7f54191f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -5,6 +5,7 @@
*/
#include "i915_drv.h"
+#include "intel_engine_pm.h"
#include "intel_gt_pm.h"
#include "intel_pm.h"
#include "intel_wakeref.h"
@@ -118,10 +119,11 @@ void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
intel_engine_reset(engine, false);
}
-void intel_gt_resume(struct drm_i915_private *i915)
+int intel_gt_resume(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ int err = 0;
/*
* After resume, we may need to poke into the pinned kernel
@@ -129,9 +131,12 @@ void intel_gt_resume(struct drm_i915_private *i915)
* Only the kernel contexts should remain pinned over suspend,
* allowing us to fixup the user contexts on their first pin.
*/
+ intel_gt_pm_get(i915);
for_each_engine(engine, i915, id) {
struct intel_context *ce;
+ intel_engine_pm_get(engine);
+
ce = engine->kernel_context;
if (ce)
ce->ops->reset(ce);
@@ -139,5 +144,19 @@ void intel_gt_resume(struct drm_i915_private *i915)
ce = engine->preempt_context;
if (ce)
ce->ops->reset(ce);
+
+ engine->serial++; /* kernel context lost */
+ err = engine->resume(engine);
+
+ intel_engine_pm_put(engine);
+ if (err) {
+ dev_err(i915->drm.dev,
+ "Failed to restart %s (%d)\n",
+ engine->name, err);
+ break;
+ }
}
+ intel_gt_pm_put(i915);
+
+ return err;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 7dd1130a19a4..53f342b20181 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -22,6 +22,6 @@ void intel_gt_pm_put(struct drm_i915_private *i915);
void intel_gt_pm_init(struct drm_i915_private *i915);
void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
+int intel_gt_resume(struct drm_i915_private *i915);
#endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b42b5f158295..82b7ace62d97 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1414,6 +1414,7 @@ static void execlists_context_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+ GEM_BUG_ON(!i915_active_is_idle(&ce->active));
GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->state)
@@ -1426,7 +1427,6 @@ static void execlists_context_unpin(struct intel_context *ce)
{
i915_gem_context_unpin_hw_id(ce->gem_context);
i915_gem_object_unpin_map(ce->state->obj);
- intel_ring_unpin(ce->ring);
}
static void
@@ -1478,13 +1478,9 @@ __execlists_context_pin(struct intel_context *ce,
goto unpin_active;
}
- ret = intel_ring_pin(ce->ring);
- if (ret)
- goto unpin_map;
-
ret = i915_gem_context_pin_hw_id(ce->gem_context);
if (ret)
- goto unpin_ring;
+ goto unpin_map;
ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
@@ -1492,8 +1488,6 @@ __execlists_context_pin(struct intel_context *ce,
return 0;
-unpin_ring:
- intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
unpin_active:
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 4c478b38e420..3f907701ef4d 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -687,7 +687,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
- intel_engine_pm_get(engine);
intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
@@ -718,16 +717,21 @@ static void revoke_mmaps(struct drm_i915_private *i915)
}
}
-static void reset_prepare(struct drm_i915_private *i915)
+static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake = 0;
enum intel_engine_id id;
- intel_gt_pm_get(i915);
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, i915, id) {
+ if (intel_engine_pm_get_if_awake(engine))
+ awake |= engine->mask;
reset_prepare_engine(engine);
+ }
intel_uc_reset_prepare(i915);
+
+ return awake;
}
static void gt_revoke(struct drm_i915_private *i915)
@@ -761,20 +765,22 @@ static int gt_reset(struct drm_i915_private *i915,
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
- intel_engine_pm_put(engine);
intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+
+ intel_engine_signal_breadcrumbs(engine);
}
-static void reset_finish(struct drm_i915_private *i915)
+static void reset_finish(struct drm_i915_private *i915,
+ intel_engine_mask_t awake)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
for_each_engine(engine, i915, id) {
reset_finish_engine(engine);
- intel_engine_signal_breadcrumbs(engine);
+ if (awake & engine->mask)
+ intel_engine_pm_put(engine);
}
- intel_gt_pm_put(i915);
}
static void nop_submit_request(struct i915_request *request)
@@ -798,6 +804,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
+ intel_engine_mask_t awake;
enum intel_engine_id id;
if (test_bit(I915_WEDGED, &error->flags))
@@ -817,7 +824,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- reset_prepare(i915);
+ awake = reset_prepare(i915);
/* Even if the GPU reset fails, it should still stop the engines */
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -841,7 +848,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
engine->cancel_requests(engine);
- reset_finish(i915);
+ reset_finish(i915, awake);
GEM_TRACE("end\n");
}
@@ -951,6 +958,21 @@ static int do_reset(struct drm_i915_private *i915,
return gt_reset(i915, stalled_mask);
}
+static int resume(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int ret;
+
+ for_each_engine(engine, i915, id) {
+ ret = engine->resume(engine);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* i915_reset - reset chip after a hang
* @i915: #drm_i915_private to reset
@@ -973,6 +995,7 @@ void i915_reset(struct drm_i915_private *i915,
const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
+ intel_engine_mask_t awake;
int ret;
GEM_TRACE("flags=%lx\n", error->flags);
@@ -989,7 +1012,7 @@ void i915_reset(struct drm_i915_private *i915,
dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
error->reset_count++;
- reset_prepare(i915);
+ awake = reset_prepare(i915);
if (!intel_has_gpu_reset(i915)) {
if (i915_modparams.reset)
@@ -1024,13 +1047,17 @@ void i915_reset(struct drm_i915_private *i915,
if (ret) {
DRM_ERROR("Failed to initialise HW following reset (%d)\n",
ret);
- goto error;
+ goto taint;
}
+ ret = resume(i915);
+ if (ret)
+ goto taint;
+
i915_queue_hangcheck(i915);
finish:
- reset_finish(i915);
+ reset_finish(i915, awake);
unlock:
mutex_unlock(&error->wedge_mutex);
return;
@@ -1081,7 +1108,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
- if (!intel_wakeref_active(&engine->wakeref))
+ if (!intel_engine_pm_get_if_awake(engine))
return 0;
reset_prepare_engine(engine);
@@ -1116,12 +1143,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
* process to program RING_MODE, HWSP and re-enable submission.
*/
ret = engine->resume(engine);
- if (ret)
- goto out;
out:
intel_engine_cancel_stop_cs(engine);
reset_finish_engine(engine);
+ intel_engine_pm_put(engine);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index c6023bc9452d..12010e798868 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1149,16 +1149,16 @@ i915_emit_bb_start(struct i915_request *rq,
int intel_ring_pin(struct intel_ring *ring)
{
struct i915_vma *vma = ring->vma;
- enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
unsigned int flags;
void *addr;
int ret;
- GEM_BUG_ON(ring->vaddr);
+ if (atomic_fetch_inc(&ring->pin_count))
+ return 0;
ret = i915_timeline_pin(ring->timeline);
if (ret)
- return ret;
+ goto err_unpin;
flags = PIN_GLOBAL;
@@ -1172,26 +1172,31 @@ int intel_ring_pin(struct intel_ring *ring)
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
- goto unpin_timeline;
+ goto err_timeline;
if (i915_vma_is_map_and_fenceable(vma))
addr = (void __force *)i915_vma_pin_iomap(vma);
else
- addr = i915_gem_object_pin_map(vma->obj, map);
+ addr = i915_gem_object_pin_map(vma->obj,
+ i915_coherent_map_type(vma->vm->i915));
if (IS_ERR(addr)) {
ret = PTR_ERR(addr);
- goto unpin_ring;
+ goto err_ring;
}
vma->obj->pin_global++;
+ GEM_BUG_ON(ring->vaddr);
ring->vaddr = addr;
+
return 0;
-unpin_ring:
+err_ring:
i915_vma_unpin(vma);
-unpin_timeline:
+err_timeline:
i915_timeline_unpin(ring->timeline);
+err_unpin:
+ atomic_dec(&ring->pin_count);
return ret;
}
@@ -1207,16 +1212,19 @@ void intel_ring_reset(struct intel_ring *ring, u32 tail)
void intel_ring_unpin(struct intel_ring *ring)
{
- GEM_BUG_ON(!ring->vma);
- GEM_BUG_ON(!ring->vaddr);
+ if (!atomic_dec_and_test(&ring->pin_count))
+ return;
/* Discard any unused bytes beyond that submitted to hw. */
intel_ring_reset(ring, ring->tail);
+ GEM_BUG_ON(!ring->vma);
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
i915_gem_object_unpin_map(ring->vma->obj);
+
+ GEM_BUG_ON(!ring->vaddr);
ring->vaddr = NULL;
ring->vma->obj->pin_global--;
@@ -2081,10 +2089,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ intel_engine_cleanup_common(engine);
+
intel_ring_unpin(engine->buffer);
intel_ring_put(engine->buffer);
- intel_engine_cleanup_common(engine);
kfree(engine);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 15e90fd2cfdc..98dfb086320f 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -1098,10 +1098,25 @@ static void glk_whitelist_build(struct intel_engine_cs *engine)
static void cfl_whitelist_build(struct intel_engine_cs *engine)
{
+ struct i915_wa_list *w = &engine->whitelist;
+
if (engine->class != RENDER_CLASS)
return;
- gen9_whitelist_build(&engine->whitelist);
+ gen9_whitelist_build(w);
+
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
+ *
+ * This covers 4 register which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
}
static void cnl_whitelist_build(struct intel_engine_cs *engine)
@@ -1129,6 +1144,19 @@ static void icl_whitelist_build(struct intel_engine_cs *engine)
/* WaEnableStateCacheRedirectToCS:icl */
whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
+
+ /*
+ * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
+ *
+ * This covers 4 register which are next to one another :
+ * - PS_INVOCATION_COUNT
+ * - PS_INVOCATION_COUNT_UDW
+ * - PS_DEPTH_COUNT
+ * - PS_DEPTH_COUNT_UDW
+ */
+ whitelist_reg_ext(w, PS_INVOCATION_COUNT,
+ RING_FORCE_TO_NONPRIV_RD |
+ RING_FORCE_TO_NONPRIV_RANGE_4);
break;
case VIDEO_DECODE_CLASS:
@@ -1258,8 +1286,12 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
wa_write_or(wal,
GEN7_SARCHKMD,
- GEN7_DISABLE_DEMAND_PREFETCH |
- GEN7_DISABLE_SAMPLER_PREFETCH);
+ GEN7_DISABLE_DEMAND_PREFETCH);
+
+ /* Wa_1606682166:icl */
+ wa_write_or(wal,
+ GEN7_SARCHKMD,
+ GEN7_DISABLE_SAMPLER_PREFETCH);
}
if (IS_GEN_RANGE(i915, 9, 11)) {
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 086801b51441..486c6953dcb1 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -66,6 +66,7 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->base.effective_size = sz;
ring->base.vaddr = (void *)(ring + 1);
ring->base.timeline = &ring->timeline;
+ atomic_set(&ring->base.pin_count, 1);
INIT_LIST_HEAD(&ring->base.request_list);
intel_ring_update_space(&ring->base);
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 89da9e7cc1ba..b5c590c9ccba 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -71,13 +71,16 @@ static int igt_atomic_reset(void *arg)
goto unlock;
for (p = igt_atomic_phases; p->name; p++) {
+ intel_engine_mask_t awake;
+
GEM_TRACE("intel_gpu_reset under %s\n", p->name);
+ awake = reset_prepare(i915);
p->critical_section_begin();
reset_prepare(i915);
err = intel_gpu_reset(i915, ALL_ENGINES);
- reset_finish(i915);
p->critical_section_end();
+ reset_finish(i915, awake);
if (err) {
pr_err("intel_gpu_reset failed under %s\n", p->name);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 9eaf030affd0..44becd9538be 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -925,7 +925,12 @@ check_whitelisted_registers(struct intel_engine_cs *engine,
err = 0;
for (i = 0; i < engine->whitelist.count; i++) {
- if (!fn(engine, a[i], b[i], engine->whitelist.list[i].reg))
+ const struct i915_wa *wa = &engine->whitelist.list[i];
+
+ if (i915_mmio_reg_offset(wa->reg) & RING_FORCE_TO_NONPRIV_RD)
+ continue;
+
+ if (!fn(engine, a[i], b[i], wa->reg))
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6ea88270c818..b09dc315e2da 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -2674,11 +2674,6 @@ static int scan_workload(struct intel_vgpu_workload *workload)
gma_head == gma_tail)
return 0;
- if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
- ret = -EINVAL;
- goto out;
- }
-
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
@@ -2724,11 +2719,6 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.workload = workload;
s.is_ctx_wa = true;
- if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
- ret = -EINVAL;
- goto out;
- }
-
ret = ip_gma_set(&s, gma_head);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 65e847392aea..8bb292b01271 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -245,7 +245,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt;
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -368,7 +368,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
alpha_plane, alpha_force);
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
@@ -472,7 +472,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->drm_format = drm_format;
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
- if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0))
+ if (!vgpu_gmadr_is_valid(vgpu, plane->base))
return -EINVAL;
plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 53115bdae12b..4b04af569c05 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2141,11 +2141,20 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
unsigned long index = off >> info->gtt_entry_size_shift;
+ unsigned long gma;
struct intel_gvt_gtt_entry e;
if (bytes != 4 && bytes != 8)
return -EINVAL;
+ gma = index << I915_GTT_PAGE_SHIFT;
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ gma, 1 << I915_GTT_PAGE_SHIFT)) {
+ gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
+ memset(p_data, 0, bytes);
+ return 0;
+ }
+
ggtt_get_guest_entry(ggtt_mm, &e, index);
memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
bytes);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 144301b778df..23aa3e50cbf8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1911,6 +1911,18 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
+ } else if (entry->size != size) {
+ /* the same gfn with different size: unmap and re-map */
+ gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
+ __gvt_cache_remove_entry(vgpu, entry);
+
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
+ if (ret)
+ goto err_unlock;
+
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ if (ret)
+ goto err_unmap;
} else {
kref_get(&entry->ref);
*dma_addr = entry->dma_addr;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 2144fb46d0e1..9f3fd7d96a69 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -364,16 +364,13 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->indirect_ctx.shadow_va = NULL;
}
-static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
- struct i915_gem_context *ctx)
+static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+ struct i915_gem_context *ctx)
{
struct intel_vgpu_mm *mm = workload->shadow_mm;
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm);
int i = 0;
- if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
- return -EINVAL;
-
if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
px_dma(ppgtt->pd) = mm->ppgtt_mm.shadow_pdps[0];
} else {
@@ -384,8 +381,6 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
-
- return 0;
}
static int
@@ -614,6 +609,8 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_vgpu_submission *s = &vgpu->submission;
+ int ring = workload->ring_id;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -622,8 +619,16 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
return ret;
}
+ if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
+ !workload->shadow_mm->ppgtt_mm.shadowed) {
+ gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+ return -EINVAL;
+ }
+
update_shadow_pdps(workload);
+ set_context_ppgtt_from_shadow(workload, s->shadow[ring]->gem_context);
+
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to vgpu sync oos pages\n");
@@ -674,7 +679,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_request *rq;
int ring_id = workload->ring_id;
int ret;
@@ -685,13 +689,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&vgpu->vgpu_lock);
mutex_lock(&dev_priv->drm.struct_mutex);
- ret = set_context_ppgtt_from_shadow(workload,
- s->shadow[ring_id]->gem_context);
- if (ret < 0) {
- gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
- goto err_req;
- }
-
ret = intel_gvt_workload_req_alloc(workload);
if (ret)
goto err_req;
@@ -990,6 +987,7 @@ static int workload_thread(void *priv)
int ret;
bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
kfree(p);
@@ -1013,6 +1011,8 @@ static int workload_thread(void *priv)
workload->ring_id, workload,
workload->vgpu->id);
+ intel_runtime_pm_get(rpm);
+
gvt_dbg_sched("ring id %d will dispatch workload %p\n",
workload->ring_id, workload);
@@ -1042,6 +1042,7 @@ complete:
intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
+ intel_runtime_pm_put_unchecked(rpm);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1492,6 +1493,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
+ if (!intel_gvt_ggtt_validate_range(vgpu, start,
+ _RING_CTL_BUF_SIZE(ctl))) {
+ gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
+ return ERR_PTR(-EINVAL);
+ }
+
workload = alloc_workload(vgpu);
if (IS_ERR(workload))
return workload;
@@ -1516,9 +1523,31 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
+
+ if (workload->wa_ctx.indirect_ctx.size != 0) {
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ workload->wa_ctx.indirect_ctx.guest_gma,
+ workload->wa_ctx.indirect_ctx.size)) {
+ kmem_cache_free(s->workloads, workload);
+ gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
+ workload->wa_ctx.indirect_ctx.guest_gma);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.per_ctx.valid = per_ctx & 1;
+ if (workload->wa_ctx.per_ctx.valid) {
+ if (!intel_gvt_ggtt_validate_range(vgpu,
+ workload->wa_ctx.per_ctx.guest_gma,
+ CACHELINE_BYTES)) {
+ kmem_cache_free(s->workloads, workload);
+ gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
+ workload->wa_ctx.per_ctx.guest_gma);
+ return ERR_PTR(-EINVAL);
+ }
+ }
}
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
diff --git a/drivers/gpu/drm/i915/gvt/trace_points.c b/drivers/gpu/drm/i915/gvt/trace_points.c
index a3deed692b9c..fe552e877e09 100644
--- a/drivers/gpu/drm/i915/gvt/trace_points.c
+++ b/drivers/gpu/drm/i915/gvt/trace_points.c
@@ -28,8 +28,6 @@
*
*/
-#include "trace.h"
-
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bc909ec5d9c3..fe7a6ec2c199 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1674,8 +1674,9 @@ struct drm_i915_private {
} dram_info;
struct intel_bw_info {
- int num_planes;
- int deratedbw[3];
+ unsigned int deratedbw[3]; /* for each QGV point */
+ u8 num_qgv_points;
+ u8 num_planes;
} max_bw[6];
struct drm_private_obj bw_obj;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 190ad54fb072..8a659d3d7435 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,6 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h"
-#include "gt/intel_engine_pm.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
@@ -1307,21 +1306,13 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
intel_mocs_init_l3cc_table(dev_priv);
- /* Only when the HW is re-initialised, can we replay the requests */
- ret = intel_engines_resume(dev_priv);
- if (ret)
- goto cleanup_uc;
-
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_engines_set_scheduler_caps(dev_priv);
return 0;
-cleanup_uc:
- intel_uc_fini_hw(dev_priv);
out:
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
return ret;
}
@@ -1580,6 +1571,11 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
if (ret)
goto err_uc_init;
+ /* Only when the HW is re-initialised, can we replay the requests */
+ ret = intel_gt_resume(dev_priv);
+ if (ret)
+ goto err_init_hw;
+
/*
* Despite its name intel_init_clock_gating applies both display
* clock gating workarounds; GT mmio workarounds and the occasional
@@ -1593,20 +1589,20 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
ret = intel_engines_verify_workarounds(dev_priv);
if (ret)
- goto err_init_hw;
+ goto err_gt;
ret = __intel_engines_record_defaults(dev_priv);
if (ret)
- goto err_init_hw;
+ goto err_gt;
if (i915_inject_load_failure()) {
ret = -ENODEV;
- goto err_init_hw;
+ goto err_gt;
}
if (i915_inject_load_failure()) {
ret = -EIO;
- goto err_init_hw;
+ goto err_gt;
}
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
@@ -1620,7 +1616,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* HW as irrevisibly wedged, but keep enough state around that the
* driver doesn't explode during runtime.
*/
-err_init_hw:
+err_gt:
mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_set_wedged(dev_priv);
@@ -1630,6 +1626,7 @@ err_init_hw:
i915_gem_drain_workqueue(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
+err_init_hw:
intel_uc_fini_hw(dev_priv);
err_uc_init:
intel_uc_fini(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 8ab820145ea6..7015a97b1097 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1444,9 +1444,11 @@ unwind_pd:
spin_lock(&pdp->lock);
if (atomic_dec_and_test(&pd->used)) {
gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
+ pdp->entry[pdpe] = vm->scratch_pd;
GEM_BUG_ON(!atomic_read(&pdp->used));
atomic_dec(&pdp->used);
- free_pd(vm, pd);
+ GEM_BUG_ON(alloc);
+ alloc = pd; /* defer the free to after the lock */
}
spin_unlock(&pdp->lock);
unwind:
@@ -1515,7 +1517,9 @@ unwind_pdp:
spin_lock(&pml4->lock);
if (atomic_dec_and_test(&pdp->used)) {
gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
- free_pd(vm, pdp);
+ pml4->entry[pml4e] = vm->scratch_pdp;
+ GEM_BUG_ON(alloc);
+ alloc = pdp; /* defer the free until after the lock */
}
spin_unlock(&pml4->lock);
unwind:
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b7e9fddef270..8bc76fcff70d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1194,6 +1194,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
switch (engine->id) {
default:
MISSING_CASE(engine->id);
+ /* fall through */
case RCS0:
mmio = RENDER_HWS_PGA_GEN7;
break;
@@ -1417,6 +1418,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct intel_engine_cs *engine = i915->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
struct i915_request *request;
+ unsigned long flags;
ee->engine_id = -1;
@@ -1428,10 +1430,11 @@ static void gem_record_rings(struct i915_gpu_state *error)
error_record_engine_registers(error, engine, ee);
error_record_engine_execlists(engine, ee);
+ spin_lock_irqsave(&engine->active.lock, flags);
request = intel_engine_find_active_request(engine);
if (request) {
struct i915_gem_context *ctx = request->gem_context;
- struct intel_ring *ring;
+ struct intel_ring *ring = request->ring;
ee->vm = ctx->vm ?: &ggtt->vm;
@@ -1461,7 +1464,6 @@ static void gem_record_rings(struct i915_gpu_state *error)
ee->rq_post = request->postfix;
ee->rq_tail = request->tail;
- ring = request->ring;
ee->cpu_ring_head = ring->head;
ee->cpu_ring_tail = ring->tail;
ee->ringbuffer =
@@ -1469,6 +1471,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
engine_record_requests(engine, request, ee);
}
+ spin_unlock_irqrestore(&engine->active.lock, flags);
ee->hws_page =
i915_error_object_create(i915,
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index a700c5c3d167..5140017f9a39 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1567,28 +1567,10 @@ static void config_oa_regs(struct drm_i915_private *dev_priv,
}
}
-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+static void delay_after_mux(void)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
- const struct i915_oa_config *oa_config = stream->oa_config;
-
- /* PRM:
- *
- * OA unit is using “crclk†for its functionality. When trunk
- * level clock gating takes place, OA clock would be gated,
- * unable to count the events from non-render clock domain.
- * Render clock gating must be disabled when OA is enabled to
- * count the events from non-render domain. Unit level clock
- * gating for RCS should also be disabled.
- */
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN7_DOP_CLOCK_GATE_ENABLE));
- I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
- GEN6_CSUNIT_CLOCK_GATE_DISABLE));
-
- config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
-
- /* It apparently takes a fairly long time for a new MUX
+ /*
+ * It apparently takes a fairly long time for a new MUX
* configuration to be be applied after these register writes.
* This delay duration was derived empirically based on the
* render_basic config but hopefully it covers the maximum
@@ -1610,6 +1592,30 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream)
* a delay at this location would mitigate any invalid reports.
*/
usleep_range(15000, 20000);
+}
+
+static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+{
+ struct drm_i915_private *dev_priv = stream->dev_priv;
+ const struct i915_oa_config *oa_config = stream->oa_config;
+
+ /*
+ * PRM:
+ *
+ * OA unit is using “crclk†for its functionality. When trunk
+ * level clock gating takes place, OA clock would be gated,
+ * unable to count the events from non-render clock domain.
+ * Render clock gating must be disabled when OA is enabled to
+ * count the events from non-render domain. Unit level clock
+ * gating for RCS should also be disabled.
+ */
+ I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+ ~GEN7_DOP_CLOCK_GATE_ENABLE));
+ I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) |
+ GEN6_CSUNIT_CLOCK_GATE_DISABLE));
+
+ config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+ delay_after_mux();
config_oa_regs(dev_priv, oa_config->b_counter_regs,
oa_config->b_counter_regs_len);
@@ -1835,6 +1841,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
return ret;
config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len);
+ delay_after_mux();
config_oa_regs(dev_priv, oa_config->b_counter_regs,
oa_config->b_counter_regs_len);
@@ -2515,6 +2522,9 @@ static int i915_perf_release(struct inode *inode, struct file *file)
i915_perf_destroy_locked(stream);
mutex_unlock(&dev_priv->perf.lock);
+ /* Release the reference the perf stream kept on the driver. */
+ drm_dev_put(&dev_priv->drm);
+
return 0;
}
@@ -2650,6 +2660,11 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
if (!(param->flags & I915_PERF_FLAG_DISABLED))
i915_perf_enable_locked(stream);
+ /* Take a reference on the driver that will be kept with stream_fd
+ * until its release.
+ */
+ drm_dev_get(&dev_priv->drm);
+
return stream_fd;
err_open:
@@ -3477,9 +3492,13 @@ void i915_perf_init(struct drm_i915_private *dev_priv)
dev_priv->perf.oa.ops.enable_metric_set = gen8_enable_metric_set;
dev_priv->perf.oa.ops.disable_metric_set = gen10_disable_metric_set;
- dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
- dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
-
+ if (IS_GEN(dev_priv, 10)) {
+ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x128;
+ dev_priv->perf.oa.ctx_flexeu0_offset = 0x3de;
+ } else {
+ dev_priv->perf.oa.ctx_oactxctrl_offset = 0x124;
+ dev_priv->perf.oa.ctx_flexeu0_offset = 0x78e;
+ }
dev_priv->perf.oa.gen8_valid_ctx_bit = (1<<16);
}
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f4ce643b3bc3..cce426b23a24 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -21,24 +21,22 @@
/* watermark/fifo updates */
TRACE_EVENT(intel_pipe_enable,
- TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
- TP_ARGS(dev_priv, pipe),
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
TP_STRUCT__entry(
__array(u32, frame, 3)
__array(u32, scanline, 3)
__field(enum pipe, pipe)
),
-
TP_fast_assign(
- enum pipe _pipe;
- for_each_pipe(dev_priv, _pipe) {
- __entry->frame[_pipe] =
- dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
- __entry->scanline[_pipe] =
- intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *it__;
+ for_each_intel_crtc(&dev_priv->drm, it__) {
+ __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+ __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
}
- __entry->pipe = pipe;
+ __entry->pipe = crtc->pipe;
),
TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -49,8 +47,8 @@ TRACE_EVENT(intel_pipe_enable,
);
TRACE_EVENT(intel_pipe_disable,
- TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
- TP_ARGS(dev_priv, pipe),
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
TP_STRUCT__entry(
__array(u32, frame, 3)
@@ -59,14 +57,13 @@ TRACE_EVENT(intel_pipe_disable,
),
TP_fast_assign(
- enum pipe _pipe;
- for_each_pipe(dev_priv, _pipe) {
- __entry->frame[_pipe] =
- dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
- __entry->scanline[_pipe] =
- intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc *it__;
+ for_each_intel_crtc(&dev_priv->drm, it__) {
+ __entry->frame[it__->pipe] = intel_crtc_get_vblank_counter(it__);
+ __entry->scanline[it__->pipe] = intel_get_crtc_scanline(it__);
}
- __entry->pipe = pipe;
+ __entry->pipe = crtc->pipe;
),
TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
@@ -89,8 +86,7 @@ TRACE_EVENT(intel_pipe_crc,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
),
@@ -112,9 +108,10 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
),
TP_fast_assign(
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
__entry->pipe = pipe;
- __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
- __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pipe %c, frame=%u, scanline=%u",
@@ -134,9 +131,10 @@ TRACE_EVENT(intel_pch_fifo_underrun,
TP_fast_assign(
enum pipe pipe = pch_transcoder;
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
__entry->pipe = pipe;
- __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
- __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
),
TP_printk("pch transcoder %c, frame=%u, scanline=%u",
@@ -156,12 +154,10 @@ TRACE_EVENT(intel_memory_cxsr,
),
TP_fast_assign(
- enum pipe pipe;
- for_each_pipe(dev_priv, pipe) {
- __entry->frame[pipe] =
- dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
- __entry->scanline[pipe] =
- intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ struct intel_crtc *crtc;
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ __entry->frame[crtc->pipe] = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline[crtc->pipe] = intel_get_crtc_scanline(crtc);
}
__entry->old = old;
__entry->new = new;
@@ -198,8 +194,7 @@ TRACE_EVENT(g4x_wm,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
__entry->sprite = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
@@ -243,8 +238,7 @@ TRACE_EVENT(vlv_wm,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->level = wm->level;
__entry->cxsr = wm->cxsr;
@@ -278,8 +272,7 @@ TRACE_EVENT(vlv_fifo_size,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->sprite0_start = sprite0_start;
__entry->sprite1_start = sprite1_start;
@@ -310,8 +303,7 @@ TRACE_EVENT(intel_update_plane,
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->name = plane->name;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
@@ -338,8 +330,7 @@ TRACE_EVENT(intel_disable_plane,
TP_fast_assign(
__entry->pipe = crtc->pipe;
__entry->name = plane->name;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
),
@@ -364,8 +355,7 @@ TRACE_EVENT(i915_pipe_update_start,
TP_fast_assign(
__entry->pipe = crtc->pipe;
- __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
- crtc->pipe);
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
__entry->min = crtc->debug.min_vbl;
__entry->max = crtc->debug.max_vbl;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 502c54428570..8d1aebc3e857 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -221,13 +221,11 @@ __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
static void
dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
{
- struct drm_printer p;
+ if (debug->count) {
+ struct drm_printer p = drm_debug_printer("i915");
- if (!debug->count)
- return;
-
- p = drm_debug_printer("i915");
- __print_intel_runtime_pm_wakeref(&p, debug);
+ __print_intel_runtime_pm_wakeref(&p, debug);
+ }
kfree(debug->owners);
}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 9cbb2ebf575b..38275310b196 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -66,6 +66,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm,
}
/**
+ * intel_wakeref_get_if_in_use: Acquire the wakeref
+ * @wf: the wakeref
+ *
+ * Acquire a hold on the wakeref, but only if the wakeref is already
+ * active.
+ *
+ * Returns: true if the wakeref was acquired, false otherwise.
+ */
+static inline bool
+intel_wakeref_get_if_active(struct intel_wakeref *wf)
+{
+ return atomic_inc_not_zero(&wf->count);
+}
+
+/**
* intel_wakeref_put: Release the wakeref
* @i915: the drm_i915_private device
* @wf: the wakeref
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 1671db47aa57..e9c55d1d6c04 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -59,6 +59,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
/* copy commands into RB: */
obj = submit->bos[submit->cmd[i].idx].obj;
@@ -149,6 +150,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index be39cf01e51e..dc8ec2c94301 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -115,6 +115,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 9acbbc0f3232..048c8be426f3 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -428,6 +428,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* ignore if there has not been a ctx switch: */
if (priv->lastctx == ctx)
break;
+ /* fall-thru */
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 0e2f74163a16..0aa8a12c9952 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2221,8 +2221,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
if (ret)
goto fail;
- spin_lock_init(&dpu_enc->enc_spinlock);
-
atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
@@ -2276,6 +2274,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+ spin_lock_init(&dpu_enc->enc_spinlock);
dpu_enc->enabled = false;
return &dpu_enc->base;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index ff14555372d0..78d5fa230c16 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -439,6 +439,18 @@ static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
mdp5_crtc->enabled = false;
}
+static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_interface *intf = mdp5_cstate->pipeline.intf;
+ u32 count;
+
+ count = intf->mode == MDP5_INTF_DSI_MODE_COMMAND ? 0 : 0xffffffff;
+ drm_crtc_set_max_vblank_count(crtc, count);
+
+ drm_crtc_vblank_on(crtc);
+}
+
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -475,7 +487,7 @@ static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
}
/* Restore vblank irq handling after power is enabled */
- drm_crtc_vblank_on(crtc);
+ mdp5_crtc_vblank_on(crtc);
mdp5_crtc_mode_set_nofb(crtc);
@@ -1028,6 +1040,8 @@ static void mdp5_crtc_reset(struct drm_crtc *crtc)
mdp5_crtc_destroy_state(crtc, crtc->state);
__drm_atomic_helper_crtc_reset(crtc, &mdp5_cstate->base);
+
+ drm_crtc_vblank_reset(crtc);
}
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 4a60f5fca6b0..fec6ef1ae3b9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -740,7 +740,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
dev->driver->get_scanout_position = mdp5_get_scanoutpos;
dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
- dev->max_vblank_count = 0xffffffff;
+ dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
return kms;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ab64ab470de7..c356f5ccf253 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -619,7 +619,7 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
msm_submitqueue_init(dev, ctx);
- ctx->aspace = priv->gpu->aspace;
+ ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
file->driver_priv = ctx;
return 0;
@@ -1279,7 +1279,8 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- drm_of_component_match_add(dev, matchptr, compare_of, np);
+ if (of_device_is_available(np))
+ drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 8b78554cfde3..8cf6362e64bf 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -32,6 +32,46 @@ static bool use_pages(struct drm_gem_object *obj)
return !msm_obj->vram_node;
}
+/*
+ * Cache sync.. this is a bit over-complicated, to fit dma-mapping
+ * API. Really GPU cache is out of scope here (handled on cmdstream)
+ * and all we need to do is invalidate newly allocated pages before
+ * mapping to CPU as uncached/writecombine.
+ *
+ * On top of this, we have the added headache, that depending on
+ * display generation, the display's iommu may be wired up to either
+ * the toplevel drm device (mdss), or to the mdp sub-node, meaning
+ * that here we either have dma-direct or iommu ops.
+ *
+ * Let this be a cautionary tail of abstraction gone wrong.
+ */
+
+static void sync_for_device(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ if (get_dma_ops(dev)) {
+ dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_map_sg(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+}
+
+static void sync_for_cpu(struct msm_gem_object *msm_obj)
+{
+ struct device *dev = msm_obj->base.dev->dev;
+
+ if (get_dma_ops(dev)) {
+ dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ } else {
+ dma_unmap_sg(dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+}
+
/* allocate pages from VRAM carveout, used when no IOMMU: */
static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{
@@ -97,8 +137,7 @@ static struct page **get_pages(struct drm_gem_object *obj)
* because display controller, GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_map_sg(dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ sync_for_device(msm_obj);
}
return msm_obj->pages;
@@ -127,9 +166,7 @@ static void put_pages(struct drm_gem_object *obj)
* GPU, etc. are not coherent:
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents,
- DMA_BIDIRECTIONAL);
+ sync_for_cpu(msm_obj);
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 8497768f1b41..126703816794 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -780,7 +780,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock,
connector->display_info.bpc * 3);
- if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ if (crtc_state->mode_changed) {
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr,
mstc->port,
asyh->dp.pbn);
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 8c92374afcf2..a835cebb6d90 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -475,6 +475,47 @@ nouveau_svm_fault_cache(struct nouveau_svm *svm,
fault->inst, fault->addr, fault->access);
}
+static inline bool
+nouveau_range_done(struct hmm_range *range)
+{
+ bool ret = hmm_range_valid(range);
+
+ hmm_range_unregister(range);
+ return ret;
+}
+
+static int
+nouveau_range_fault(struct hmm_mirror *mirror, struct hmm_range *range)
+{
+ long ret;
+
+ range->default_flags = 0;
+ range->pfn_flags_mask = -1UL;
+
+ ret = hmm_range_register(range, mirror,
+ range->start, range->end,
+ PAGE_SHIFT);
+ if (ret) {
+ up_read(&range->vma->vm_mm->mmap_sem);
+ return (int)ret;
+ }
+
+ if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
+ up_read(&range->vma->vm_mm->mmap_sem);
+ return -EAGAIN;
+ }
+
+ ret = hmm_range_fault(range, true);
+ if (ret <= 0) {
+ if (ret == 0)
+ ret = -EBUSY;
+ up_read(&range->vma->vm_mm->mmap_sem);
+ hmm_range_unregister(range);
+ return ret;
+ }
+ return 0;
+}
+
static int
nouveau_svm_fault(struct nvif_notify *notify)
{
@@ -649,10 +690,10 @@ nouveau_svm_fault(struct nvif_notify *notify)
range.values = nouveau_svm_pfn_values;
range.pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT;
again:
- ret = hmm_vma_fault(&svmm->mirror, &range, true);
+ ret = nouveau_range_fault(&svmm->mirror, &range);
if (ret == 0) {
mutex_lock(&svmm->mutex);
- if (!hmm_vma_range_done(&range)) {
+ if (!nouveau_range_done(&range)) {
mutex_unlock(&svmm->mutex);
goto again;
}
@@ -666,8 +707,8 @@ again:
NULL);
svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
+ up_read(&svmm->mm->mmap_sem);
}
- up_read(&svmm->mm->mmap_sem);
/* Cancel any faults in the window whose pages didn't manage
* to keep their valid bit, or stay writeable when required.
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index d594f7520b7b..7d78e6deac89 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -285,9 +285,13 @@ static int ttm_set_pages_caching(struct dma_pool *pool,
static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
{
+ unsigned long attrs = 0;
dma_addr_t dma = d_page->dma;
d_page->vaddr &= ~VADDR_FLAG_HUGE_POOL;
- dma_free_coherent(pool->dev, pool->size, (void *)d_page->vaddr, dma);
+ if (pool->type & IS_HUGE)
+ attrs = DMA_ATTR_NO_WARN;
+
+ dma_free_attrs(pool->dev, pool->size, (void *)d_page->vaddr, dma, attrs);
kfree(d_page);
d_page = NULL;
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 98bf694626f7..3a8c4a5971f7 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -23,12 +23,36 @@
#define A4_2WHEEL_MOUSE_HACK_7 0x01
#define A4_2WHEEL_MOUSE_HACK_B8 0x02
+#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
+
struct a4tech_sc {
unsigned long quirks;
unsigned int hw_wheel;
__s32 delayed_value;
};
+static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct a4tech_sc *a4 = hid_get_drvdata(hdev);
+
+ if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
+ usage->hid == A4_WHEEL_ORIENTATION) {
+ /*
+ * We do not want to have this usage mapped to anything as it's
+ * nonstandard and doesn't really behave like an HID report.
+ * It's only selecting the orientation (vertical/horizontal) of
+ * the previous mouse wheel report. The input_events will be
+ * generated once both reports are recorded in a4_event().
+ */
+ return -1;
+ }
+
+ return 0;
+
+}
+
static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@@ -52,8 +76,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
struct input_dev *input;
- if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
- !usage->type)
+ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
return 0;
input = field->hidinput->input;
@@ -64,7 +87,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
return 1;
}
- if (usage->hid == 0x000100b8) {
+ if (usage->hid == A4_WHEEL_ORIENTATION) {
input_event(input, EV_REL, value ? REL_HWHEEL :
REL_WHEEL, a4->delayed_value);
input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES :
@@ -131,6 +154,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
static struct hid_driver a4_driver = {
.name = "a4tech",
.id_table = a4_devices,
+ .input_mapping = a4_input_mapping,
.input_mapped = a4_input_mapped,
.event = a4_event,
.probe = a4_probe,
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index b3d502421b79..0a38e8e9bc78 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -123,9 +123,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
/* Locate the boot interface, to receive the LED change events */
struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+ struct hid_device *boot_hid;
+ struct hid_input *boot_hid_input;
- struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
- struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+ if (unlikely(boot_interface == NULL))
+ return -ENODEV;
+
+ boot_hid = usb_get_intfdata(boot_interface);
+ boot_hid_input = list_first_entry(&boot_hid->inputs,
struct hid_input, list);
return boot_hid_input->input->event(boot_hid_input->input, type, code,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0d695f8e1b2c..0a00be19f7a0 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -568,6 +568,7 @@
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
@@ -768,7 +769,8 @@
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER 0xc52f
#define USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2 0xc532
#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2 0xc534
-#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING 0xc539
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED 0xc539
+#define USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY 0xc53a
#define USB_DEVICE_ID_SPACETRAVELLER 0xc623
#define USB_DEVICE_ID_SPACENAVIGATOR 0xc626
#define USB_DEVICE_ID_DINOVO_DESKTOP 0xc704
@@ -989,6 +991,7 @@
#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
+#define USB_DEVICE_ID_SAITEK_X52 0x075c
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 6196217a7d93..cc47f948c1d0 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1125,7 +1125,7 @@ static int logi_dj_recv_query_hidpp_devices(struct dj_receiver_dev *djrcv_dev)
HID_REQ_SET_REPORT);
kfree(hidpp_report);
- return retval;
+ return (retval < 0) ? retval : 0;
}
static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
@@ -1832,13 +1832,17 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
.driver_data = recvr_type_hidpp},
- { /* Logitech gaming receiver (0xc539) */
+ { /* Logitech lightspeed receiver (0xc539) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
- USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_GAMING),
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_LIGHTSPEED),
.driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc513) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER),
.driver_data = recvr_type_27mhz},
+ { /* Logitech powerplay receiver (0xc53a) */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
+ USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_POWERPLAY),
+ .driver_data = recvr_type_gaming_hidpp},
{ /* Logitech 27 MHz HID++ 1.0 receiver (0xc517) */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_S510_RECEIVER_2),
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index e3b6245bf4b2..21268c9fa71a 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3749,15 +3749,45 @@ static const struct hid_device_id hidpp_devices[] = {
{ L27MHZ_DEVICE(HID_ANY_ID) },
- { /* Logitech G403 Gaming Mouse over USB */
+ { /* Logitech G203/Prodigy Gaming Mouse */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) },
+ { /* Logitech G302 Gaming Mouse */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) },
+ { /* Logitech G303 Gaming Mouse */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) },
+ { /* Logitech G400 Gaming Mouse */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) },
+ { /* Logitech G403 Wireless Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
+ { /* Logitech G403 Gaming Mouse */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) },
+ { /* Logitech G403 Hero Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) },
+ { /* Logitech G502 Proteus Core Gaming Mouse */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) },
+ { /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
+ { /* Logitech G502 Hero Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
{ /* Logitech G700 Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
+ { /* Logitech G700s Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
+ { /* Logitech G703 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
+ { /* Logitech G703 Hero Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC090) },
{ /* Logitech G900 Gaming Mouse over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC081) },
+ { /* Logitech G903 Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) },
+ { /* Logitech G903 Hero Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) },
{ /* Logitech G920 Wheel over USB */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
+ { /* Logitech G Pro Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC088) },
{ /* MX5000 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 185a577c46f6..166f41f3173b 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -92,6 +92,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
@@ -141,6 +142,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 93942063b51b..49dd2d905c7f 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -585,10 +585,14 @@ static void sony_set_leds(struct sony_sc *sc);
static inline void sony_schedule_work(struct sony_sc *sc,
enum sony_worker which)
{
+ unsigned long flags;
+
switch (which) {
case SONY_WORKER_STATE:
- if (!sc->defer_initialization)
+ spin_lock_irqsave(&sc->lock, flags);
+ if (!sc->defer_initialization && sc->state_worker_initialized)
schedule_work(&sc->state_worker);
+ spin_unlock_irqrestore(&sc->lock, flags);
break;
case SONY_WORKER_HOTPLUG:
if (sc->hotplug_worker_initialized)
@@ -2558,13 +2562,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
static inline void sony_cancel_work_sync(struct sony_sc *sc)
{
+ unsigned long flags;
+
if (sc->hotplug_worker_initialized)
cancel_work_sync(&sc->hotplug_worker);
- if (sc->state_worker_initialized)
+ if (sc->state_worker_initialized) {
+ spin_lock_irqsave(&sc->lock, flags);
+ sc->state_worker_initialized = 0;
+ spin_unlock_irqrestore(&sc->lock, flags);
cancel_work_sync(&sc->state_worker);
+ }
}
-
static int sony_input_configured(struct hid_device *hdev,
struct hid_input *hidinput)
{
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index e12f2588ddeb..bdfc5ff3b2c5 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -22,6 +22,8 @@
#include "hid-ids.h"
+#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
+
static const signed short ff_rumble[] = {
FF_RUMBLE,
-1
@@ -76,6 +78,7 @@ static int tmff_play(struct input_dev *dev, void *data,
struct hid_field *ff_field = tmff->ff_field;
int x, y;
int left, right; /* Rumbling */
+ int motor_swap;
switch (effect->type) {
case FF_CONSTANT:
@@ -100,6 +103,13 @@ static int tmff_play(struct input_dev *dev, void *data,
ff_field->logical_minimum,
ff_field->logical_maximum);
+ /* 2-in-1 strong motor is left */
+ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
+ motor_swap = left;
+ left = right;
+ right = motor_swap;
+ }
+
dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
ff_field->value[0] = left;
ff_field->value[1] = right;
@@ -226,6 +236,8 @@ static const struct hid_device_id tm_devices[] = {
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
.driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
+ .driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 55b72573066b..4e11cc6fc34b 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -284,6 +284,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
spin_unlock_irq(&list->hiddev->list_lock);
mutex_lock(&hiddev->existancelock);
+ /*
+ * recheck exist with existance lock held to
+ * avoid opening a disconnected device
+ */
+ if (!list->hiddev->exist) {
+ res = -ENODEV;
+ goto bail_unlock;
+ }
if (!list->hiddev->open++)
if (list->hiddev->exist) {
struct hid_device *hid = hiddev->hid;
@@ -300,6 +308,10 @@ bail_normal_power:
hid_hw_power(hid, PM_HINT_NORMAL);
bail_unlock:
mutex_unlock(&hiddev->existancelock);
+
+ spin_lock_irq(&list->hiddev->list_lock);
+ list_del(&list->node);
+ spin_unlock_irq(&list->hiddev->list_lock);
bail:
file->private_data = NULL;
vfree(list);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 8fc36a28081b..7a8ddc999a8e 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -533,14 +533,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
*/
buttons = (data[4] << 1) | (data[3] & 0x01);
} else if (features->type == CINTIQ_COMPANION_2) {
- /* d-pad right -> data[4] & 0x10
- * d-pad up -> data[4] & 0x20
- * d-pad left -> data[4] & 0x40
- * d-pad down -> data[4] & 0x80
- * d-pad center -> data[3] & 0x01
+ /* d-pad right -> data[2] & 0x10
+ * d-pad up -> data[2] & 0x20
+ * d-pad left -> data[2] & 0x40
+ * d-pad down -> data[2] & 0x80
+ * d-pad center -> data[1] & 0x01
*/
buttons = ((data[2] >> 4) << 7) |
- ((data[1] & 0x04) << 6) |
+ ((data[1] & 0x04) << 4) |
((data[2] & 0x0F) << 2) |
(data[1] & 0x03);
} else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index e7dff5febe16..d42bc0883a32 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -852,7 +852,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
+static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
@@ -3764,6 +3764,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
+ data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6106_REG_PWM;
data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index a7d2b16dd702..30e18eb60da7 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -408,8 +408,10 @@ static ssize_t occ_show_power_1(struct device *dev,
static u64 occ_get_powr_avg(u64 *accum, u32 *samples)
{
- return div64_u64(get_unaligned_be64(accum) * 1000000ULL,
- get_unaligned_be32(samples));
+ u64 divisor = get_unaligned_be32(samples);
+
+ return (divisor == 0) ? 0 :
+ div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor);
}
static ssize_t occ_show_power_2(struct device *dev,
diff --git a/drivers/i2c/busses/i2c-at91-core.c b/drivers/i2c/busses/i2c-at91-core.c
index 8d55cdd69ff4..435c7d7377a3 100644
--- a/drivers/i2c/busses/i2c-at91-core.c
+++ b/drivers/i2c/busses/i2c-at91-core.c
@@ -142,7 +142,7 @@ static struct at91_twi_pdata sama5d4_config = {
static struct at91_twi_pdata sama5d2_config = {
.clk_max_div = 7,
- .clk_offset = 4,
+ .clk_offset = 3,
.has_unre_flag = true,
.has_alt_cmd = true,
.has_hold_field = true,
diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c
index e87232f2e708..a3fcc35ffd3b 100644
--- a/drivers/i2c/busses/i2c-at91-master.c
+++ b/drivers/i2c/busses/i2c-at91-master.c
@@ -122,9 +122,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
/* send stop when last byte has been written */
- if (--dev->buf_len == 0)
+ if (--dev->buf_len == 0) {
if (!dev->use_alt_cmd)
at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
+ at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
+ }
dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
@@ -542,9 +544,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
} else {
at91_twi_write_next_byte(dev);
at91_twi_write(dev, AT91_TWI_IER,
- AT91_TWI_TXCOMP |
- AT91_TWI_NACK |
- AT91_TWI_TXRDY);
+ AT91_TWI_TXCOMP | AT91_TWI_NACK |
+ (dev->buf_len ? AT91_TWI_TXRDY : 0));
}
}
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
index 2c7f145a036e..d7fd76baec92 100644
--- a/drivers/i2c/busses/i2c-bcm-iproc.c
+++ b/drivers/i2c/busses/i2c-bcm-iproc.c
@@ -392,16 +392,18 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
static void bcm_iproc_i2c_read_valid_bytes(struct bcm_iproc_i2c_dev *iproc_i2c)
{
struct i2c_msg *msg = iproc_i2c->msg;
+ uint32_t val;
/* Read valid data from RX FIFO */
while (iproc_i2c->rx_bytes < msg->len) {
- if (!((iproc_i2c_rd_reg(iproc_i2c, M_FIFO_CTRL_OFFSET) >> M_FIFO_RX_CNT_SHIFT)
- & M_FIFO_RX_CNT_MASK))
+ val = iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET);
+
+ /* rx fifo empty */
+ if (!((val >> M_RX_STATUS_SHIFT) & M_RX_STATUS_MASK))
break;
msg->buf[iproc_i2c->rx_bytes] =
- (iproc_i2c_rd_reg(iproc_i2c, M_RX_OFFSET) >>
- M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+ (val >> M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
iproc_i2c->rx_bytes++;
}
}
diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
index cfc76b5de726..5a1235fd86bb 100644
--- a/drivers/i2c/busses/i2c-nvidia-gpu.c
+++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
@@ -364,7 +364,7 @@ static void gpu_i2c_remove(struct pci_dev *pdev)
/*
* We need gpu_i2c_suspend() even if it is stub, for runtime pm to work
* correctly. Without it, lspci shows runtime pm status as "D0" for the card.
- * Documentation/power/pci.txt also insists for driver to provide this.
+ * Documentation/power/pci.rst also insists for driver to provide this.
*/
static __maybe_unused int gpu_i2c_suspend(struct device *dev)
{
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index d97fb857b0ea..c98ef4c4a0c9 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -435,6 +435,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
* fall through to the write state, as we will need to
* send a byte as well
*/
+ /* Fall through */
case STATE_WRITE:
/*
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 888d89ce81df..beee7b7e0d9a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -302,7 +302,9 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
struct ib_udata *udata,
struct ib_uobject *uobj)
{
+ enum ib_qp_type qp_type = attr->qp_type;
struct ib_qp *qp;
+ bool is_xrc;
if (!dev->ops.create_qp)
return ERR_PTR(-EOPNOTSUPP);
@@ -320,7 +322,8 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
* and more importantly they are created internaly by driver,
* see mlx5 create_dev_resources() as an example.
*/
- if (attr->qp_type < IB_QPT_XRC_INI) {
+ is_xrc = qp_type == IB_QPT_XRC_INI || qp_type == IB_QPT_XRC_TGT;
+ if ((qp_type < IB_QPT_MAX && !is_xrc) || qp_type == IB_QPT_DRIVER) {
qp->res.type = RDMA_RESTRACK_QP;
if (uobj)
rdma_restrack_uadd(&qp->res);
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 01faef7bc061..45d5164e9574 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -393,6 +393,9 @@ u64 rdma_counter_get_hwstat_value(struct ib_device *dev, u8 port, u32 index)
u64 sum;
port_counter = &dev->port_data[port].port_counter;
+ if (!port_counter->hstats)
+ return 0;
+
sum = get_running_counters_hwstat_sum(dev, port, index);
sum += port_counter->hstats->value[index];
@@ -594,7 +597,7 @@ void rdma_counter_init(struct ib_device *dev)
struct rdma_port_counter *port_counter;
u32 port;
- if (!dev->ops.alloc_hw_stats || !dev->port_data)
+ if (!dev->port_data)
return;
rdma_for_each_port(dev, port) {
@@ -602,6 +605,9 @@ void rdma_counter_init(struct ib_device *dev)
port_counter->mode.mode = RDMA_COUNTER_MODE_NONE;
mutex_init(&port_counter->lock);
+ if (!dev->ops.alloc_hw_stats)
+ continue;
+
port_counter->hstats = dev->ops.alloc_hw_stats(dev, port);
if (!port_counter->hstats)
goto fail;
@@ -624,9 +630,6 @@ void rdma_counter_release(struct ib_device *dev)
struct rdma_port_counter *port_counter;
u32 port;
- if (!dev->ops.alloc_hw_stats)
- return;
-
rdma_for_each_port(dev, port) {
port_counter = &dev->port_data[port].port_counter;
kfree(port_counter->hstats);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 9773145dee09..ea8661a00651 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -94,11 +94,17 @@ static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
static DECLARE_RWSEM(devices_rwsem);
#define DEVICE_REGISTERED XA_MARK_1
-static LIST_HEAD(client_list);
+static u32 highest_client_id;
#define CLIENT_REGISTERED XA_MARK_1
static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
static DECLARE_RWSEM(clients_rwsem);
+static void ib_client_put(struct ib_client *client)
+{
+ if (refcount_dec_and_test(&client->uses))
+ complete(&client->uses_zero);
+}
+
/*
* If client_data is registered then the corresponding client must also still
* be registered.
@@ -661,6 +667,14 @@ static int add_client_context(struct ib_device *device,
down_write(&device->client_data_rwsem);
/*
+ * So long as the client is registered hold both the client and device
+ * unregistration locks.
+ */
+ if (!refcount_inc_not_zero(&client->uses))
+ goto out_unlock;
+ refcount_inc(&device->refcount);
+
+ /*
* Another caller to add_client_context got here first and has already
* completely initialized context.
*/
@@ -683,6 +697,9 @@ static int add_client_context(struct ib_device *device,
return 0;
out:
+ ib_device_put(device);
+ ib_client_put(client);
+out_unlock:
up_write(&device->client_data_rwsem);
return ret;
}
@@ -702,7 +719,7 @@ static void remove_client_context(struct ib_device *device,
client_data = xa_load(&device->client_data, client_id);
xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
client = xa_load(&clients, client_id);
- downgrade_write(&device->client_data_rwsem);
+ up_write(&device->client_data_rwsem);
/*
* Notice we cannot be holding any exclusive locks when calling the
@@ -712,17 +729,13 @@ static void remove_client_context(struct ib_device *device,
*
* For this reason clients and drivers should not call the
* unregistration functions will holdling any locks.
- *
- * It tempting to drop the client_data_rwsem too, but this is required
- * to ensure that unregister_client does not return until all clients
- * are completely unregistered, which is required to avoid module
- * unloading races.
*/
if (client->remove)
client->remove(device, client_data);
xa_erase(&device->client_data, client_id);
- up_read(&device->client_data_rwsem);
+ ib_device_put(device);
+ ib_client_put(client);
}
static int alloc_port_data(struct ib_device *device)
@@ -1224,7 +1237,7 @@ static int setup_device(struct ib_device *device)
static void disable_device(struct ib_device *device)
{
- struct ib_client *client;
+ u32 cid;
WARN_ON(!refcount_read(&device->refcount));
@@ -1232,10 +1245,19 @@ static void disable_device(struct ib_device *device)
xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
up_write(&devices_rwsem);
+ /*
+ * Remove clients in LIFO order, see assign_client_id. This could be
+ * more efficient if xarray learns to reverse iterate. Since no new
+ * clients can be added to this ib_device past this point we only need
+ * the maximum possible client_id value here.
+ */
down_read(&clients_rwsem);
- list_for_each_entry_reverse(client, &client_list, list)
- remove_client_context(device, client->client_id);
+ cid = highest_client_id;
up_read(&clients_rwsem);
+ while (cid) {
+ cid--;
+ remove_client_context(device, cid);
+ }
/* Pairs with refcount_set in enable_device */
ib_device_put(device);
@@ -1662,30 +1684,31 @@ static int assign_client_id(struct ib_client *client)
/*
* The add/remove callbacks must be called in FIFO/LIFO order. To
* achieve this we assign client_ids so they are sorted in
- * registration order, and retain a linked list we can reverse iterate
- * to get the LIFO order. The extra linked list can go away if xarray
- * learns to reverse iterate.
+ * registration order.
*/
- if (list_empty(&client_list)) {
- client->client_id = 0;
- } else {
- struct ib_client *last;
-
- last = list_last_entry(&client_list, struct ib_client, list);
- client->client_id = last->client_id + 1;
- }
+ client->client_id = highest_client_id;
ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
if (ret)
goto out;
+ highest_client_id++;
xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
- list_add_tail(&client->list, &client_list);
out:
up_write(&clients_rwsem);
return ret;
}
+static void remove_client_id(struct ib_client *client)
+{
+ down_write(&clients_rwsem);
+ xa_erase(&clients, client->client_id);
+ for (; highest_client_id; highest_client_id--)
+ if (xa_load(&clients, highest_client_id - 1))
+ break;
+ up_write(&clients_rwsem);
+}
+
/**
* ib_register_client - Register an IB client
* @client:Client to register
@@ -1705,6 +1728,8 @@ int ib_register_client(struct ib_client *client)
unsigned long index;
int ret;
+ refcount_set(&client->uses, 1);
+ init_completion(&client->uses_zero);
ret = assign_client_id(client);
if (ret)
return ret;
@@ -1740,21 +1765,30 @@ void ib_unregister_client(struct ib_client *client)
unsigned long index;
down_write(&clients_rwsem);
+ ib_client_put(client);
xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
up_write(&clients_rwsem);
- /*
- * Every device still known must be serialized to make sure we are
- * done with the client callbacks before we return.
- */
- down_read(&devices_rwsem);
- xa_for_each (&devices, index, device)
+
+ /* We do not want to have locks while calling client->remove() */
+ rcu_read_lock();
+ xa_for_each (&devices, index, device) {
+ if (!ib_device_try_get(device))
+ continue;
+ rcu_read_unlock();
+
remove_client_context(device, client->client_id);
- up_read(&devices_rwsem);
- down_write(&clients_rwsem);
- list_del(&client->list);
- xa_erase(&clients, client->client_id);
- up_write(&clients_rwsem);
+ ib_device_put(device);
+ rcu_read_lock();
+ }
+ rcu_read_unlock();
+
+ /*
+ * remove_client_context() is not a fence, it can return even though a
+ * removal is ongoing. Wait until all removals are completed.
+ */
+ wait_for_completion(&client->uses_zero);
+ remove_client_id(client);
}
EXPORT_SYMBOL(ib_unregister_client);
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index cc99479b2c09..9947d16edef2 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3224,18 +3224,18 @@ static int ib_mad_port_open(struct ib_device *device,
if (has_smi)
cq_size *= 2;
+ port_priv->pd = ib_alloc_pd(device, 0);
+ if (IS_ERR(port_priv->pd)) {
+ dev_err(&device->dev, "Couldn't create ib_mad PD\n");
+ ret = PTR_ERR(port_priv->pd);
+ goto error3;
+ }
+
port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
IB_POLL_UNBOUND_WORKQUEUE);
if (IS_ERR(port_priv->cq)) {
dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq);
- goto error3;
- }
-
- port_priv->pd = ib_alloc_pd(device, 0);
- if (IS_ERR(port_priv->pd)) {
- dev_err(&device->dev, "Couldn't create ib_mad PD\n");
- ret = PTR_ERR(port_priv->pd);
goto error4;
}
@@ -3278,11 +3278,11 @@ error8:
error7:
destroy_mad_qp(&port_priv->qp_info[0]);
error6:
- ib_dealloc_pd(port_priv->pd);
-error4:
ib_free_cq(port_priv->cq);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+ ib_dealloc_pd(port_priv->pd);
error3:
kfree(port_priv);
@@ -3312,8 +3312,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
destroy_workqueue(port_priv->wq);
destroy_mad_qp(&port_priv->qp_info[1]);
destroy_mad_qp(&port_priv->qp_info[0]);
- ib_dealloc_pd(port_priv->pd);
ib_free_cq(port_priv->cq);
+ ib_dealloc_pd(port_priv->pd);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
/* XXX: Handle deallocation of MAD registration tables */
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 9f8a48016b41..ffdeaf6e0b68 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -49,6 +49,7 @@
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
@@ -884,11 +885,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
if (get_user(id, arg))
return -EFAULT;
+ if (id >= IB_UMAD_MAX_AGENTS)
+ return -EINVAL;
mutex_lock(&file->port->file_mutex);
mutex_lock(&file->mutex);
- if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+ id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+ if (!__get_agent(file, id)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index a91653aabf38..098ab883733e 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -308,6 +308,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
struct bnxt_re_dev *rdev = to_bnxt_re_dev(attr->device, ibdev);
struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
struct bnxt_qplib_gid *gid_to_del;
+ u16 vlan_id = 0xFFFF;
/* Delete the entry from the hardware */
ctx = *context;
@@ -317,7 +318,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
if (sgid_tbl && sgid_tbl->active) {
if (ctx->idx >= sgid_tbl->max)
return -EINVAL;
- gid_to_del = &sgid_tbl->tbl[ctx->idx];
+ gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
+ vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
/* DEL_GID is called in WQ context(netdevice_event_work_handler)
* or via the ib_unregister_device path. In the former case QP1
* may not be destroyed yet, in which case just return as FW
@@ -335,7 +337,8 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context)
}
ctx->refcnt--;
if (!ctx->refcnt) {
- rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
+ rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
+ vlan_id, true);
if (rc) {
dev_err(rdev_to_dev(rdev),
"Failed to remove GID: %#x", rc);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 37928b1111df..bdbde8e22420 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -488,7 +488,7 @@ static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl,
u16 max)
{
- sgid_tbl->tbl = kcalloc(max, sizeof(struct bnxt_qplib_gid), GFP_KERNEL);
+ sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
if (!sgid_tbl->tbl)
return -ENOMEM;
@@ -526,9 +526,10 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
for (i = 0; i < sgid_tbl->max; i++) {
if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero)))
- bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i], true);
+ bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
+ sgid_tbl->tbl[i].vlan_id, true);
}
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+ memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
sgid_tbl->active = 0;
@@ -537,7 +538,11 @@ static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct net_device *netdev)
{
- memset(sgid_tbl->tbl, 0, sizeof(struct bnxt_qplib_gid) * sgid_tbl->max);
+ u32 i;
+
+ for (i = 0; i < sgid_tbl->max; i++)
+ sgid_tbl->tbl[i].vlan_id = 0xffff;
+
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 30c42c92fac7..fbda11a7ab1a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -111,7 +111,7 @@ struct bnxt_qplib_pd_tbl {
};
struct bnxt_qplib_sgid_tbl {
- struct bnxt_qplib_gid *tbl;
+ struct bnxt_qplib_gid_info *tbl;
u16 *hw_id;
u16 max;
u16 active;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 48793d3512ac..40296b97d21e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -213,12 +213,12 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
index, sgid_tbl->max);
return -EINVAL;
}
- memcpy(gid, &sgid_tbl->tbl[index], sizeof(*gid));
+ memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
return 0;
}
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, bool update)
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -236,7 +236,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return -ENOMEM;
}
for (index = 0; index < sgid_tbl->max; index++) {
- if (!memcmp(&sgid_tbl->tbl[index], gid, sizeof(*gid)))
+ if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
+ vlan_id == sgid_tbl->tbl[index].vlan_id)
break;
}
if (index == sgid_tbl->max) {
@@ -262,8 +263,9 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
if (rc)
return rc;
}
- memcpy(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
+ memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
sizeof(bnxt_qplib_gid_zero));
+ sgid_tbl->tbl[index].vlan_id = 0xFFFF;
sgid_tbl->vlan[index] = 0;
sgid_tbl->active--;
dev_dbg(&res->pdev->dev,
@@ -296,7 +298,8 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
free_idx = sgid_tbl->max;
for (i = 0; i < sgid_tbl->max; i++) {
- if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
+ if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
+ sgid_tbl->tbl[i].vlan_id == vlan_id) {
dev_dbg(&res->pdev->dev,
"SGID entry already exist in entry %d!\n", i);
*index = i;
@@ -351,6 +354,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
/* Add GID to the sgid_tbl */
memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
+ sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
sgid_tbl->active++;
if (vlan_id != 0xFFFF)
sgid_tbl->vlan[free_idx] = 1;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 0ec3b12b0bcd..13d9432d5ce2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -84,6 +84,11 @@ struct bnxt_qplib_gid {
u8 data[16];
};
+struct bnxt_qplib_gid_info {
+ struct bnxt_qplib_gid gid;
+ u16 vlan_id;
+};
+
struct bnxt_qplib_ah {
struct bnxt_qplib_gid dgid;
struct bnxt_qplib_pd *pd;
@@ -221,7 +226,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, bool update);
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
bool update, u32 *index);
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index d5b643a1d9fd..67052dc3100c 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -14452,7 +14452,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
}
-static void init_rxe(struct hfi1_devdata *dd)
+static int init_rxe(struct hfi1_devdata *dd)
{
struct rsm_map_table *rmt;
u64 val;
@@ -14461,6 +14461,9 @@ static void init_rxe(struct hfi1_devdata *dd)
write_csr(dd, RCV_ERR_MASK, ~0ull);
rmt = alloc_rsm_map_table(dd);
+ if (!rmt)
+ return -ENOMEM;
+
/* set up QOS, including the QPN map table */
init_qos(dd, rmt);
init_fecn_handling(dd, rmt);
@@ -14487,6 +14490,7 @@ static void init_rxe(struct hfi1_devdata *dd)
val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
RCV_BYPASS_HDR_SIZE_SHIFT);
write_csr(dd, RCV_BYPASS, val);
+ return 0;
}
static void init_other(struct hfi1_devdata *dd)
@@ -15024,7 +15028,10 @@ int hfi1_init_dd(struct hfi1_devdata *dd)
goto bail_cleanup;
/* set initial RXE CSRs */
- init_rxe(dd);
+ ret = init_rxe(dd);
+ if (ret)
+ goto bail_cleanup;
+
/* set initial TXE CSRs */
init_txe(dd);
/* set initial non-RXE, non-TXE CSRs */
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 0477c14633ab..024a7c2b6124 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
trdma_clean_swqe(qp, wqe);
- rvt_qp_wqe_unreserve(qp, wqe);
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
rvt_qp_complete_swqe(qp,
wqe,
@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
trdma_clean_swqe(qp, wqe);
- rvt_qp_wqe_unreserve(qp, wqe);
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
rvt_qp_complete_swqe(qp,
wqe,
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 92acccaaaa86..996fc298207e 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -1620,6 +1620,7 @@ static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
flows[i].req = req;
flows[i].npagesets = 0;
flows[i].pagesets[0].mapped = 0;
+ flows[i].resync_npkts = 0;
}
req->flows = flows;
return 0;
@@ -1673,34 +1674,6 @@ static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
return NULL;
}
-static struct tid_rdma_flow *
-__find_flow_ranged(struct tid_rdma_request *req, u16 head, u16 tail,
- u32 psn, u16 *fidx)
-{
- for ( ; CIRC_CNT(head, tail, MAX_FLOWS);
- tail = CIRC_NEXT(tail, MAX_FLOWS)) {
- struct tid_rdma_flow *flow = &req->flows[tail];
- u32 spsn, lpsn;
-
- spsn = full_flow_psn(flow, flow->flow_state.spsn);
- lpsn = full_flow_psn(flow, flow->flow_state.lpsn);
-
- if (cmp_psn(psn, spsn) >= 0 && cmp_psn(psn, lpsn) <= 0) {
- if (fidx)
- *fidx = tail;
- return flow;
- }
- }
- return NULL;
-}
-
-static struct tid_rdma_flow *find_flow(struct tid_rdma_request *req,
- u32 psn, u16 *fidx)
-{
- return __find_flow_ranged(req, req->setup_head, req->clear_tail, psn,
- fidx);
-}
-
/* TID RDMA READ functions */
u32 hfi1_build_tid_rdma_read_packet(struct rvt_swqe *wqe,
struct ib_other_headers *ohdr, u32 *bth1,
@@ -2788,19 +2761,7 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
* to prevent continuous Flow Sequence errors for any
* packets that could be still in the fabric.
*/
- flow = find_flow(req, psn, NULL);
- if (!flow) {
- /*
- * We can't find the IB PSN matching the
- * received KDETH PSN. The only thing we can
- * do at this point is report the error to
- * the QP.
- */
- hfi1_kern_read_tid_flow_free(qp);
- spin_unlock(&qp->s_lock);
- rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
- return ret;
- }
+ flow = &req->flows[req->clear_tail];
if (priv->s_flags & HFI1_R_TID_SW_PSN) {
diff = cmp_psn(psn,
flow->flow_state.r_next_psn);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index c4b243f50c76..646f61545ed6 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -54,6 +54,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <rdma/opa_addr.h>
+#include <linux/nospec.h>
#include "hfi.h"
#include "common.h"
@@ -1536,6 +1537,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
sl = rdma_ah_get_sl(ah_attr);
if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
return -EINVAL;
+ sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
sc5 = ibp->sl_to_sc[sl];
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 8bf847bcd8d3..54782197c717 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HNS
- tristate "HNS RoCE Driver"
+ bool "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
depends on ARM64 || (COMPILE_TEST && 64BIT)
---help---
@@ -11,7 +11,7 @@ config INFINIBAND_HNS
To compile HIP06 or HIP08 driver as module, choose M here.
config INFINIBAND_HNS_HIP06
- bool "Hisilicon Hip06 Family RoCE support"
+ tristate "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
@@ -21,7 +21,7 @@ config INFINIBAND_HNS_HIP06
module will be called hns-roce-hw-v1
config INFINIBAND_HNS_HIP08
- bool "Hisilicon Hip08 Family RoCE support"
+ tristate "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index e105945b94a1..449a2d81319d 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,12 +9,8 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
-ifdef CONFIG_INFINIBAND_HNS_HIP06
hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
-ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
-endif
+obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 627aa46ef683..c00714c2f16a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -12,13 +12,15 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
struct ib_udata *udata, unsigned long virt,
struct hns_roce_db *db)
{
+ unsigned long page_addr = virt & PAGE_MASK;
struct hns_roce_user_db_page *page;
+ unsigned int offset;
int ret = 0;
mutex_lock(&context->page_mutex);
list_for_each_entry(page, &context->page_list, list)
- if (page->user_virt == (virt & PAGE_MASK))
+ if (page->user_virt == page_addr)
goto found;
page = kmalloc(sizeof(*page), GFP_KERNEL);
@@ -28,8 +30,8 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
}
refcount_set(&page->refcount, 1);
- page->user_virt = (virt & PAGE_MASK);
- page->umem = ib_umem_get(udata, virt & PAGE_MASK, PAGE_SIZE, 0, 0);
+ page->user_virt = page_addr;
+ page->umem = ib_umem_get(udata, page_addr, PAGE_SIZE, 0, 0);
if (IS_ERR(page->umem)) {
ret = PTR_ERR(page->umem);
kfree(page);
@@ -39,10 +41,9 @@ int hns_roce_db_map_user(struct hns_roce_ucontext *context,
list_add(&page->list, &context->page_list);
found:
- db->dma = sg_dma_address(page->umem->sg_head.sgl) +
- (virt & ~PAGE_MASK);
- page->umem->sg_head.sgl->offset = virt & ~PAGE_MASK;
- db->virt_addr = sg_virt(page->umem->sg_head.sgl);
+ offset = virt - page_addr;
+ db->dma = sg_dma_address(page->umem->sg_head.sgl) + offset;
+ db->virt_addr = sg_virt(page->umem->sg_head.sgl) + offset;
db->u.user_page = page;
refcount_inc(&page->refcount);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 81e6dedb1e02..c07e387a07a3 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -750,8 +750,10 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
- if (!pd)
+ if (!pd) {
+ ret = -ENOMEM;
goto alloc_mem_failed;
+ }
pd->device = ibdev;
ret = hns_roce_alloc_pd(pd, NULL);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index c2a5780cb394..e12a4404096b 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -5802,13 +5802,12 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
return;
}
- if (mpi->mdev_events.notifier_call)
- mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
- mpi->mdev_events.notifier_call = NULL;
-
mpi->ibdev = NULL;
spin_unlock(&port->mp.mpi_lock);
+ if (mpi->mdev_events.notifier_call)
+ mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events);
+ mpi->mdev_events.notifier_call = NULL;
mlx5_remove_netdev_notifier(ibdev, port_num);
spin_lock(&port->mp.mpi_lock);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index c482f19958b3..f6a53455bf8b 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -481,6 +481,7 @@ struct mlx5_umr_wr {
u64 length;
int access_flags;
u32 mkey;
+ u8 ignore_free_state:1;
};
static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 20ece6e0b2fc..b74fad08412f 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
- return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
}
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
- return order <= mr_cache_max_order(dev) &&
- umr_can_modify_entity_size(dev);
-}
-
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -545,13 +535,16 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return;
c = order2idx(dev, mr->order);
- if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
- mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
- return;
- }
+ WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
- if (unreg_umr(dev, mr))
+ if (unreg_umr(dev, mr)) {
+ mr->allocated_from_cache = false;
+ destroy_mkey(dev, mr);
+ ent = &cache->ent[c];
+ if (ent->cur < ent->limit)
+ queue_work(cache->wq, &ent->work);
return;
+ }
ent = &cache->ent[c];
spin_lock_irq(&ent->lock);
@@ -1268,7 +1261,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
- bool populate_mtts = false;
+ bool use_umr;
struct ib_umem *umem;
int page_shift;
int npages;
@@ -1300,29 +1293,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err < 0)
return ERR_PTR(err);
- if (use_umr(dev, order)) {
+ use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+ (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+ !MLX5_CAP_GEN(dev->mdev, atomic));
+
+ if (order <= mr_cache_max_order(dev) && use_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) {
mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
mr = NULL;
}
- populate_mtts = false;
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
if (access_flags & IB_ACCESS_ON_DEMAND) {
err = -EINVAL;
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
goto error;
}
- populate_mtts = true;
+ use_umr = false;
}
if (!mr) {
- if (!umr_can_modify_entity_size(dev))
- populate_mtts = true;
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
- page_shift, access_flags, populate_mtts);
+ page_shift, access_flags, !use_umr);
mutex_unlock(&dev->slow_path_mutex);
}
@@ -1338,7 +1332,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
update_odp_mr(mr);
- if (!populate_mtts) {
+ if (use_umr) {
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
if (access_flags & IB_ACCESS_ON_DEMAND)
@@ -1373,9 +1367,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return 0;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
- MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+ MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
umrwr.wr.opcode = MLX5_IB_WR_UMR;
+ umrwr.pd = dev->umrc.pd;
umrwr.mkey = mr->mmkey.key;
+ umrwr.ignore_free_state = 1;
return mlx5_ib_post_send_wait(dev, &umrwr);
}
@@ -1577,10 +1573,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig = NULL;
}
- mlx5_free_priv_descs(mr);
-
- if (!allocated_from_cache)
+ if (!allocated_from_cache) {
destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
+ }
}
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 5b642d81e617..81da82050d05 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -246,7 +246,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
* overwrite the same MTTs. Concurent invalidations might race us,
* but they will write 0s as well, so no difference in the end result.
*/
-
+ mutex_lock(&umem_odp->umem_mutex);
for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) {
idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
/*
@@ -278,6 +278,7 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
idx - blk_start_idx + 1, 0,
MLX5_IB_UPD_XLT_ZAP |
MLX5_IB_UPD_XLT_ATOMIC);
+ mutex_unlock(&umem_odp->umem_mutex);
/*
* We are now sure that the device will not access the
* memory. We can safely unmap it, and mark it as dirty if
@@ -1771,7 +1772,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work)
num_pending_prefetch_dec(to_mdev(w->pd->device), w->sg_list,
w->num_sge, 0);
- kfree(w);
+ kvfree(w);
}
int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
@@ -1813,7 +1814,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
if (valid_req)
queue_work(system_unbound_wq, &work->work);
else
- kfree(work);
+ kvfree(work);
srcu_read_unlock(&dev->mr_srcu, srcu_key);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 2a97619ed603..379328b2598f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1713,7 +1713,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, ucmd.rx_hash_key, len);
break;
}
@@ -4295,10 +4294,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
memset(umr, 0, sizeof(*umr));
- if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
- umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
- else
- umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+ if (!umrwr->ignore_free_state) {
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ /* fail if free */
+ umr->flags = MLX5_UMR_CHECK_FREE;
+ else
+ /* fail if not free */
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+ }
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 533157a2a3be..f97b3d65b30c 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -125,14 +125,20 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
struct qedr_dev *dev =
rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->attr.hw_ver);
}
static DEVICE_ATTR_RO(hw_rev);
static ssize_t hca_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
- return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+ struct qedr_dev *dev =
+ rdma_device_to_drv_device(device, struct qedr_dev, ibdev);
+
+ return scnprintf(buf, PAGE_SIZE, "FastLinQ QL%x %s\n",
+ dev->pdev->device,
+ rdma_protocol_iwarp(&dev->ibdev, 1) ?
+ "iWARP" : "RoCE");
}
static DEVICE_ATTR_RO(hca_type);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index a7cde98e73e8..9ce8a1b925d2 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -220,13 +220,12 @@ static void siw_put_work(struct siw_cm_work *work)
static void siw_cep_set_inuse(struct siw_cep *cep)
{
unsigned long flags;
- int rv;
retry:
spin_lock_irqsave(&cep->lock, flags);
if (cep->in_use) {
spin_unlock_irqrestore(&cep->lock, flags);
- rv = wait_event_interruptible(cep->waitq, !cep->in_use);
+ wait_event_interruptible(cep->waitq, !cep->in_use);
if (signal_pending(current))
flush_signals(current);
goto retry;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index f55c4e80aea4..d0f140daf659 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -612,6 +612,7 @@ static __init int siw_init_module(void)
if (!siw_create_tx_threads()) {
pr_info("siw: Could not start any TX thread\n");
+ rv = -ENOMEM;
goto out_error;
}
/*
diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
index 11383d9f95ef..e27bd5b35b96 100644
--- a/drivers/infiniband/sw/siw/siw_qp.c
+++ b/drivers/infiniband/sw/siw/siw_qp.c
@@ -220,12 +220,14 @@ static int siw_qp_enable_crc(struct siw_qp *qp)
{
struct siw_rx_stream *c_rx = &qp->rx_stream;
struct siw_iwarp_tx *c_tx = &qp->tx_ctx;
- int size = crypto_shash_descsize(siw_crypto_shash) +
- sizeof(struct shash_desc);
+ int size;
if (siw_crypto_shash == NULL)
return -ENOENT;
+ size = crypto_shash_descsize(siw_crypto_shash) +
+ sizeof(struct shash_desc);
+
c_tx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
c_rx->mpa_crc_hd = kzalloc(size, GFP_KERNEL);
if (!c_tx->mpa_crc_hd || !c_rx->mpa_crc_hd) {
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index eb104c719629..4413aa67000e 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -23,6 +23,8 @@
#include <linux/mem_encrypt.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
+#include <asm/apic.h>
+#include <asm/msidef.h>
#include <asm/gart.h>
#include <asm/x86_init.h>
#include <asm/iommu_table.h>
@@ -1920,6 +1922,90 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
return 0;
}
+#define XT_INT_DEST_MODE(x) (((x) & 0x1ULL) << 2)
+#define XT_INT_DEST_LO(x) (((x) & 0xFFFFFFULL) << 8)
+#define XT_INT_VEC(x) (((x) & 0xFFULL) << 32)
+#define XT_INT_DEST_HI(x) ((((x) >> 24) & 0xFFULL) << 56)
+
+/**
+ * Setup the IntCapXT registers with interrupt routing information
+ * based on the PCI MSI capability block registers, accessed via
+ * MMIO MSI address low/hi and MSI data registers.
+ */
+static void iommu_update_intcapxt(struct amd_iommu *iommu)
+{
+ u64 val;
+ u32 addr_lo = readl(iommu->mmio_base + MMIO_MSI_ADDR_LO_OFFSET);
+ u32 addr_hi = readl(iommu->mmio_base + MMIO_MSI_ADDR_HI_OFFSET);
+ u32 data = readl(iommu->mmio_base + MMIO_MSI_DATA_OFFSET);
+ bool dm = (addr_lo >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
+ u32 dest = ((addr_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xFF);
+
+ if (x2apic_enabled())
+ dest |= MSI_ADDR_EXT_DEST_ID(addr_hi);
+
+ val = XT_INT_VEC(data & 0xFF) |
+ XT_INT_DEST_MODE(dm) |
+ XT_INT_DEST_LO(dest) |
+ XT_INT_DEST_HI(dest);
+
+ /**
+ * Current IOMMU implemtation uses the same IRQ for all
+ * 3 IOMMU interrupts.
+ */
+ writeq(val, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
+ writeq(val, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
+ writeq(val, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+}
+
+static void _irq_notifier_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct amd_iommu *iommu;
+
+ for_each_iommu(iommu) {
+ if (iommu->dev->irq == notify->irq) {
+ iommu_update_intcapxt(iommu);
+ break;
+ }
+ }
+}
+
+static void _irq_notifier_release(struct kref *ref)
+{
+}
+
+static int iommu_init_intcapxt(struct amd_iommu *iommu)
+{
+ int ret;
+ struct irq_affinity_notify *notify = &iommu->intcapxt_notify;
+
+ /**
+ * IntCapXT requires XTSup=1, which can be inferred
+ * amd_iommu_xt_mode.
+ */
+ if (amd_iommu_xt_mode != IRQ_REMAP_X2APIC_MODE)
+ return 0;
+
+ /**
+ * Also, we need to setup notifier to update the IntCapXT registers
+ * whenever the irq affinity is changed from user-space.
+ */
+ notify->irq = iommu->dev->irq;
+ notify->notify = _irq_notifier_notify,
+ notify->release = _irq_notifier_release,
+ ret = irq_set_affinity_notifier(iommu->dev->irq, notify);
+ if (ret) {
+ pr_err("Failed to register irq affinity notifier (devid=%#x, irq %d)\n",
+ iommu->devid, iommu->dev->irq);
+ return ret;
+ }
+
+ iommu_update_intcapxt(iommu);
+ iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
+ return ret;
+}
+
static int iommu_init_msi(struct amd_iommu *iommu)
{
int ret;
@@ -1936,6 +2022,10 @@ static int iommu_init_msi(struct amd_iommu *iommu)
return ret;
enable_faults:
+ ret = iommu_init_intcapxt(iommu);
+ if (ret)
+ return ret;
+
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
if (iommu->ppr_log != NULL)
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 52c35d557fad..64edd5a9694c 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -60,6 +60,12 @@
#define MMIO_PPR_LOG_OFFSET 0x0038
#define MMIO_GA_LOG_BASE_OFFSET 0x00e0
#define MMIO_GA_LOG_TAIL_OFFSET 0x00e8
+#define MMIO_MSI_ADDR_LO_OFFSET 0x015C
+#define MMIO_MSI_ADDR_HI_OFFSET 0x0160
+#define MMIO_MSI_DATA_OFFSET 0x0164
+#define MMIO_INTCAPXT_EVT_OFFSET 0x0170
+#define MMIO_INTCAPXT_PPR_OFFSET 0x0178
+#define MMIO_INTCAPXT_GALOG_OFFSET 0x0180
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
@@ -150,6 +156,7 @@
#define CONTROL_GALOG_EN 0x1CULL
#define CONTROL_GAINT_EN 0x1DULL
#define CONTROL_XT_EN 0x32ULL
+#define CONTROL_INTCAPXT_EN 0x33ULL
#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
#define CTRL_INV_TO_NONE 0
@@ -592,6 +599,8 @@ struct amd_iommu {
/* DebugFS Info */
struct dentry *debugfs;
#endif
+ /* IRQ notifier for IntCapXT interrupt */
+ struct irq_affinity_notify intcapxt_notify;
};
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
index 73a552914455..2b25d9c59336 100644
--- a/drivers/iommu/intel-iommu-debugfs.c
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -162,9 +162,9 @@ static inline void print_tbl_walk(struct seq_file *m)
(u64)0, (u64)0, (u64)0);
else
seq_printf(m, "%-6d\t0x%016llx:0x%016llx:0x%016llx\n",
- tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[0],
+ tbl_wlk->pasid, tbl_wlk->pasid_tbl_entry->val[2],
tbl_wlk->pasid_tbl_entry->val[1],
- tbl_wlk->pasid_tbl_entry->val[2]);
+ tbl_wlk->pasid_tbl_entry->val[0]);
}
static void pasid_tbl_walk(struct seq_file *m, struct pasid_entry *tbl_entry,
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index ac4172c02244..bdaed2da8a55 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -339,8 +339,6 @@ static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct device *dev);
static void __dmar_remove_one_dev_info(struct device_domain_info *info);
-static void domain_context_clear(struct intel_iommu *iommu,
- struct device *dev);
static int domain_detach_iommu(struct dmar_domain *domain,
struct intel_iommu *iommu);
static bool device_is_rmrr_locked(struct device *dev);
@@ -1833,9 +1831,65 @@ static inline int guestwidth_to_adjustwidth(int gaw)
return agaw;
}
+static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
+ int guest_width)
+{
+ int adjust_width, agaw;
+ unsigned long sagaw;
+ int err;
+
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+
+ err = init_iova_flush_queue(&domain->iovad,
+ iommu_flush_iova, iova_entry_free);
+ if (err)
+ return err;
+
+ domain_reserve_special_ranges(domain);
+
+ /* calculate AGAW */
+ if (guest_width > cap_mgaw(iommu->cap))
+ guest_width = cap_mgaw(iommu->cap);
+ domain->gaw = guest_width;
+ adjust_width = guestwidth_to_adjustwidth(guest_width);
+ agaw = width_to_agaw(adjust_width);
+ sagaw = cap_sagaw(iommu->cap);
+ if (!test_bit(agaw, &sagaw)) {
+ /* hardware doesn't support it, choose a bigger one */
+ pr_debug("Hardware doesn't support agaw %d\n", agaw);
+ agaw = find_next_bit(&sagaw, 5, agaw);
+ if (agaw >= 5)
+ return -ENODEV;
+ }
+ domain->agaw = agaw;
+
+ if (ecap_coherent(iommu->ecap))
+ domain->iommu_coherency = 1;
+ else
+ domain->iommu_coherency = 0;
+
+ if (ecap_sc_support(iommu->ecap))
+ domain->iommu_snooping = 1;
+ else
+ domain->iommu_snooping = 0;
+
+ if (intel_iommu_superpage)
+ domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
+ else
+ domain->iommu_superpage = 0;
+
+ domain->nid = iommu->node;
+
+ /* always allocate the top pgd */
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+ if (!domain->pgd)
+ return -ENOMEM;
+ __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE);
+ return 0;
+}
+
static void domain_exit(struct dmar_domain *domain)
{
- struct page *freelist;
/* Remove associated devices and clear attached or cached domains */
domain_remove_dev_info(domain);
@@ -1843,9 +1897,12 @@ static void domain_exit(struct dmar_domain *domain)
/* destroy iovas */
put_iova_domain(&domain->iovad);
- freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+ if (domain->pgd) {
+ struct page *freelist;
- dma_free_pagelist(freelist);
+ freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
+ dma_free_pagelist(freelist);
+ }
free_domain_mem(domain);
}
@@ -2048,26 +2105,9 @@ out_unlock:
return ret;
}
-struct domain_context_mapping_data {
- struct dmar_domain *domain;
- struct intel_iommu *iommu;
- struct pasid_table *table;
-};
-
-static int domain_context_mapping_cb(struct pci_dev *pdev,
- u16 alias, void *opaque)
-{
- struct domain_context_mapping_data *data = opaque;
-
- return domain_context_mapping_one(data->domain, data->iommu,
- data->table, PCI_BUS_NUM(alias),
- alias & 0xff);
-}
-
static int
domain_context_mapping(struct dmar_domain *domain, struct device *dev)
{
- struct domain_context_mapping_data data;
struct pasid_table *table;
struct intel_iommu *iommu;
u8 bus, devfn;
@@ -2077,17 +2117,7 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
return -ENODEV;
table = intel_pasid_get_table(dev);
-
- if (!dev_is_pci(dev))
- return domain_context_mapping_one(domain, iommu, table,
- bus, devfn);
-
- data.domain = domain;
- data.iommu = iommu;
- data.table = table;
-
- return pci_for_each_dma_alias(to_pci_dev(dev),
- &domain_context_mapping_cb, &data);
+ return domain_context_mapping_one(domain, iommu, table, bus, devfn);
}
static int domain_context_mapped_cb(struct pci_dev *pdev,
@@ -2513,31 +2543,6 @@ static int get_last_alias(struct pci_dev *pdev, u16 alias, void *opaque)
return 0;
}
-static int domain_init(struct dmar_domain *domain, int guest_width)
-{
- int adjust_width;
-
- init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
- domain_reserve_special_ranges(domain);
-
- /* calculate AGAW */
- domain->gaw = guest_width;
- adjust_width = guestwidth_to_adjustwidth(guest_width);
- domain->agaw = width_to_agaw(adjust_width);
-
- domain->iommu_coherency = 0;
- domain->iommu_snooping = 0;
- domain->iommu_superpage = 0;
- domain->max_addr = 0;
-
- /* always allocate the top pgd */
- domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
- if (!domain->pgd)
- return -ENOMEM;
- domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
- return 0;
-}
-
static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
{
struct device_domain_info *info;
@@ -2575,19 +2580,11 @@ static struct dmar_domain *find_or_alloc_domain(struct device *dev, int gaw)
domain = alloc_domain(0);
if (!domain)
return NULL;
-
- if (domain_init(domain, gaw)) {
+ if (domain_init(domain, iommu, gaw)) {
domain_exit(domain);
return NULL;
}
- if (init_iova_flush_queue(&domain->iovad,
- iommu_flush_iova,
- iova_entry_free)) {
- pr_warn("iova flush queue initialization failed\n");
- intel_iommu_strict = 1;
- }
-
out:
return domain;
}
@@ -2692,6 +2689,8 @@ static int domain_prepare_identity_map(struct device *dev,
return iommu_domain_identity_map(domain, start, end);
}
+static int md_domain_init(struct dmar_domain *domain, int guest_width);
+
static int __init si_domain_init(int hw)
{
struct dmar_rmrr_unit *rmrr;
@@ -2702,7 +2701,7 @@ static int __init si_domain_init(int hw)
if (!si_domain)
return -EFAULT;
- if (domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
return -EFAULT;
}
@@ -3564,7 +3563,8 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
freelist = domain_unmap(domain, start_pfn, last_pfn);
- if (intel_iommu_strict || (pdev && pdev->untrusted)) {
+ if (intel_iommu_strict || (pdev && pdev->untrusted) ||
+ !has_iova_flush_queue(&domain->iovad)) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
nrpages, !freelist, 0);
/* free iova */
@@ -4758,28 +4758,6 @@ out_free_dmar:
return ret;
}
-static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
-{
- struct intel_iommu *iommu = opaque;
-
- domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
- return 0;
-}
-
-/*
- * NB - intel-iommu lacks any sort of reference counting for the users of
- * dependent devices. If multiple endpoints have intersecting dependent
- * devices, unbinding the driver from any one of them will possibly leave
- * the others unable to operate.
- */
-static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
-{
- if (!iommu || !dev || !dev_is_pci(dev))
- return;
-
- pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
-}
-
static void __dmar_remove_one_dev_info(struct device_domain_info *info)
{
struct dmar_domain *domain;
@@ -4800,7 +4778,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
PASID_RID2PASID);
iommu_disable_dev_iotlb(info);
- domain_context_clear(iommu, info->dev);
+ domain_context_clear_one(iommu, info->bus, info->devfn);
intel_pasid_free_table(info->dev);
}
@@ -4829,6 +4807,31 @@ static void dmar_remove_one_dev_info(struct device *dev)
spin_unlock_irqrestore(&device_domain_lock, flags);
}
+static int md_domain_init(struct dmar_domain *domain, int guest_width)
+{
+ int adjust_width;
+
+ init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
+ domain_reserve_special_ranges(domain);
+
+ /* calculate AGAW */
+ domain->gaw = guest_width;
+ adjust_width = guestwidth_to_adjustwidth(guest_width);
+ domain->agaw = width_to_agaw(adjust_width);
+
+ domain->iommu_coherency = 0;
+ domain->iommu_snooping = 0;
+ domain->iommu_superpage = 0;
+ domain->max_addr = 0;
+
+ /* always allocate the top pgd */
+ domain->pgd = (struct dma_pte *)alloc_pgtable_page(domain->nid);
+ if (!domain->pgd)
+ return -ENOMEM;
+ domain_flush_cache(domain, domain->pgd, PAGE_SIZE);
+ return 0;
+}
+
static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
{
struct dmar_domain *dmar_domain;
@@ -4843,7 +4846,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
pr_err("Can't allocate dmar_domain\n");
return NULL;
}
- if (domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
pr_err("Domain initialization failed\n");
domain_exit(dmar_domain);
return NULL;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index d499b2621239..3e1a8a675572 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -54,9 +54,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
}
EXPORT_SYMBOL_GPL(init_iova_domain);
+bool has_iova_flush_queue(struct iova_domain *iovad)
+{
+ return !!iovad->fq;
+}
+
static void free_iova_flush_queue(struct iova_domain *iovad)
{
- if (!iovad->fq)
+ if (!has_iova_flush_queue(iovad))
return;
if (timer_pending(&iovad->fq_timer))
@@ -74,13 +79,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
{
+ struct iova_fq __percpu *queue;
int cpu;
atomic64_set(&iovad->fq_flush_start_cnt, 0);
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
- iovad->fq = alloc_percpu(struct iova_fq);
- if (!iovad->fq)
+ queue = alloc_percpu(struct iova_fq);
+ if (!queue)
return -ENOMEM;
iovad->flush_cb = flush_cb;
@@ -89,13 +95,17 @@ int init_iova_flush_queue(struct iova_domain *iovad,
for_each_possible_cpu(cpu) {
struct iova_fq *fq;
- fq = per_cpu_ptr(iovad->fq, cpu);
+ fq = per_cpu_ptr(queue, cpu);
fq->head = 0;
fq->tail = 0;
spin_lock_init(&fq->lock);
}
+ smp_wmb();
+
+ iovad->fq = queue;
+
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
atomic_set(&iovad->fq_timer_on, 0);
@@ -127,8 +137,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
struct iova *cached_iova;
cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
- if (free->pfn_hi < iovad->dma_32bit_pfn &&
- free->pfn_lo >= cached_iova->pfn_lo) {
+ if (free == cached_iova ||
+ (free->pfn_hi < iovad->dma_32bit_pfn &&
+ free->pfn_lo >= cached_iova->pfn_lo)) {
iovad->cached32_node = rb_next(&free->node);
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
}
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 433f4d2ee956..80a740df0737 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -2,7 +2,7 @@
/*
* Virtio driver for the paravirtualized IOMMU
*
- * Copyright (C) 2018 Arm Limited
+ * Copyright (C) 2019 Arm Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -47,7 +47,10 @@ struct viommu_dev {
/* Device configuration */
struct iommu_domain_geometry geometry;
u64 pgsize_bitmap;
- u8 domain_bits;
+ u32 first_domain;
+ u32 last_domain;
+ /* Supported MAP flags */
+ u32 map_flags;
u32 probe_size;
};
@@ -62,6 +65,7 @@ struct viommu_domain {
struct viommu_dev *viommu;
struct mutex mutex; /* protects viommu pointer */
unsigned int id;
+ u32 map_flags;
spinlock_t mappings_lock;
struct rb_root_cached mappings;
@@ -113,6 +117,8 @@ static int viommu_get_req_errno(void *buf, size_t len)
return -ENOENT;
case VIRTIO_IOMMU_S_FAULT:
return -EFAULT;
+ case VIRTIO_IOMMU_S_NOMEM:
+ return -ENOMEM;
case VIRTIO_IOMMU_S_IOERR:
case VIRTIO_IOMMU_S_DEVERR:
default:
@@ -607,15 +613,15 @@ static int viommu_domain_finalise(struct viommu_dev *viommu,
{
int ret;
struct viommu_domain *vdomain = to_viommu_domain(domain);
- unsigned int max_domain = viommu->domain_bits > 31 ? ~0 :
- (1U << viommu->domain_bits) - 1;
vdomain->viommu = viommu;
+ vdomain->map_flags = viommu->map_flags;
domain->pgsize_bitmap = viommu->pgsize_bitmap;
domain->geometry = viommu->geometry;
- ret = ida_alloc_max(&viommu->domain_ids, max_domain, GFP_KERNEL);
+ ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+ viommu->last_domain, GFP_KERNEL);
if (ret >= 0)
vdomain->id = (unsigned int)ret;
@@ -710,7 +716,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
int ret;
- int flags;
+ u32 flags;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -718,6 +724,9 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
(prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
(prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
+ if (flags & ~vdomain->map_flags)
+ return -EINVAL;
+
ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
if (ret)
return ret;
@@ -1027,7 +1036,8 @@ static int viommu_probe(struct virtio_device *vdev)
goto err_free_vqs;
}
- viommu->domain_bits = 32;
+ viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
+ viommu->last_domain = ~0U;
/* Optional features */
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
@@ -1038,9 +1048,13 @@ static int viommu_probe(struct virtio_device *vdev)
struct virtio_iommu_config, input_range.end,
&input_end);
- virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_BITS,
- struct virtio_iommu_config, domain_bits,
- &viommu->domain_bits);
+ virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+ struct virtio_iommu_config, domain_range.start,
+ &viommu->first_domain);
+
+ virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
+ struct virtio_iommu_config, domain_range.end,
+ &viommu->last_domain);
virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE,
struct virtio_iommu_config, probe_size,
@@ -1052,6 +1066,9 @@ static int viommu_probe(struct virtio_device *vdev)
.force_aperture = true,
};
+ if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
+ viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
+
viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
virtio_device_ready(vdev);
@@ -1130,9 +1147,10 @@ static void viommu_config_changed(struct virtio_device *vdev)
static unsigned int features[] = {
VIRTIO_IOMMU_F_MAP_UNMAP,
- VIRTIO_IOMMU_F_DOMAIN_BITS,
VIRTIO_IOMMU_F_INPUT_RANGE,
+ VIRTIO_IOMMU_F_DOMAIN_RANGE,
VIRTIO_IOMMU_F_PROBE,
+ VIRTIO_IOMMU_F_MMIO,
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 730fbe0e2a9d..1b5c3672aea2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3010,7 +3010,7 @@ static int its_vpe_init(struct its_vpe *vpe)
if (!its_alloc_vpe_table(vpe_id)) {
its_vpe_id_free(vpe_id);
- its_free_pending_table(vpe->vpt_page);
+ its_free_pending_table(vpt_page);
return -ENOMEM;
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9bca4896fa6f..96d927f0f91a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -771,8 +771,10 @@ static void gic_cpu_sys_reg_init(void)
case 7:
write_gicreg(0, ICC_AP0R3_EL1);
write_gicreg(0, ICC_AP0R2_EL1);
+ /* Fall through */
case 6:
write_gicreg(0, ICC_AP0R1_EL1);
+ /* Fall through */
case 5:
case 4:
write_gicreg(0, ICC_AP0R0_EL1);
@@ -786,8 +788,10 @@ static void gic_cpu_sys_reg_init(void)
case 7:
write_gicreg(0, ICC_AP1R3_EL1);
write_gicreg(0, ICC_AP1R2_EL1);
+ /* Fall through */
case 6:
write_gicreg(0, ICC_AP1R1_EL1);
+ /* Fall through */
case 5:
case 4:
write_gicreg(0, ICC_AP1R0_EL1);
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index bf2237ac5d09..4f74c15c4755 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
.irq_unmask = imx_gpcv2_irq_unmask,
.irq_set_wake = imx_gpcv2_irq_set_wake,
.irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 3dd28382d5f5..3f09f658e8e2 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -241,12 +241,15 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
parent = platform_bus_type.dev_root;
child = of_platform_device_create(np, NULL, parent);
- if (!child)
+ if (!child) {
+ of_node_put(np);
return -ENOMEM;
+ }
if (of_property_read_u32(child->dev.of_node, "num-pins",
&num_pins) < 0) {
dev_err(&pdev->dev, "No num-pins property\n");
+ of_node_put(np);
return -EINVAL;
}
@@ -254,8 +257,10 @@ static int mbigen_of_create_domain(struct platform_device *pdev,
mbigen_write_msg,
&mbigen_domain_ops,
mgn_chip);
- if (!domain)
+ if (!domain) {
+ of_node_put(np);
return -ENOMEM;
+ }
}
return 0;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 0e224232f746..008a74a1ed44 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1394,6 +1394,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
printk(KERN_DEBUG
"%s: %s: alloc urb for fifo %i failed",
hw->name, __func__, fifo->fifonum);
+ continue;
}
fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
fifo->iso[i].indx = i;
@@ -1692,13 +1693,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
static int
setup_hfcsusb(struct hfcsusb *hw)
{
+ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
u_char b;
+ int ret;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
+
+ memcpy(&b, dmabuf, sizeof(u_char));
+ kfree(dmabuf);
+
/* check the chip id */
- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
+ if (ret != 1) {
printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
hw->name, __func__);
return 1;
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 276065c888bc..23f1f41c8602 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -852,6 +852,7 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd)
break;
case SMU_I2C_TRANSFER_COMBINED:
cmd->info.devaddr &= 0xfe;
+ /* fall through */
case SMU_I2C_TRANSFER_STDSUB:
if (cmd->info.sublen > 3)
return -EINVAL;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 26e374fbf57c..20ed838e9413 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -931,6 +931,9 @@ int bch_cached_dev_run(struct cached_dev *dc)
if (dc->io_disable) {
pr_err("I/O disabled on cached dev %s",
dc->backing_dev_name);
+ kfree(env[1]);
+ kfree(env[2]);
+ kfree(buf);
return -EIO;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index caaee8032afe..7b6c3ee9e755 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -882,23 +882,23 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
/* validate the dax capability of the target device span */
int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+ sector_t start, sector_t len, void *data)
{
int blocksize = *(int *) data;
return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize,
- start, len);
+ start, len);
}
/* Check devices support synchronous DAX */
-static int device_synchronous(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data)
+static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- return dax_synchronous(dev->dax_dev);
+ return dev->dax_dev && dax_synchronous(dev->dax_dev);
}
bool dm_table_supports_dax(struct dm_table *t,
- iterate_devices_callout_fn iterate_fn, int *blocksize)
+ iterate_devices_callout_fn iterate_fn, int *blocksize)
{
struct dm_target *ti;
unsigned i;
@@ -911,7 +911,7 @@ bool dm_table_supports_dax(struct dm_table *t,
return false;
if (!ti->type->iterate_devices ||
- !ti->type->iterate_devices(ti, iterate_fn, blocksize))
+ !ti->type->iterate_devices(ti, iterate_fn, blocksize))
return false;
}
@@ -1921,7 +1921,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (dm_table_supports_dax(t, device_supports_dax, &page_size)) {
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
- if (dm_table_supports_dax(t, device_synchronous, NULL))
+ if (dm_table_supports_dax(t, device_dax_synchronous, NULL))
set_dax_synchronous(t->md->dax_dev);
}
else
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index f88094719552..f2abe27010ef 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -5,6 +5,7 @@ config EEPROM_AT24
tristate "I2C EEPROMs / RAMs / ROMs from most vendors"
depends on I2C && SYSFS
select NVMEM
+ select NVMEM_SYSFS
select REGMAP_I2C
help
Enable this driver to get read/write support to most I2C EEPROMs
@@ -34,6 +35,7 @@ config EEPROM_AT25
tristate "SPI EEPROMs from most vendors"
depends on SPI && SYSFS
select NVMEM
+ select NVMEM_SYSFS
help
Enable this driver to get read/write support to most SPI EEPROMs,
after you configure the board init code to know about each eeprom
@@ -80,6 +82,7 @@ config EEPROM_93XX46
depends on SPI && SYSFS
select REGMAP
select NVMEM
+ select NVMEM_SYSFS
help
Driver for the microwire EEPROM chipsets 93xx46x. The driver
supports both read and write commands and also the command to
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 35bf2477693d..518945b2f737 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -685,7 +685,7 @@ static int at24_probe(struct i2c_client *client)
nvmem_config.name = dev_name(dev);
nvmem_config.dev = dev;
nvmem_config.read_only = !writable;
- nvmem_config.root_only = true;
+ nvmem_config.root_only = !(flags & AT24_FLAG_IRUGO);
nvmem_config.owner = THIS_MODULE;
nvmem_config.compat = true;
nvmem_config.base_dev = dev;
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 75294ec65257..1a2c062a57d4 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -695,8 +695,8 @@ static int goya_sw_init(struct hl_device *hdev)
goto free_dma_pool;
}
- dev_dbg(hdev->dev, "cpu accessible memory at bus address 0x%llx\n",
- hdev->cpu_accessible_dma_address);
+ dev_dbg(hdev->dev, "cpu accessible memory at bus address %pad\n",
+ &hdev->cpu_accessible_dma_address);
hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1);
if (!hdev->cpu_accessible_dma_pool) {
@@ -4449,7 +4449,6 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_AXI_ECC:
case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
- case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
goya_print_irq_info(hdev, event_type, false);
hl_device_reset(hdev, true, false);
break;
@@ -4485,6 +4484,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
goya_unmask_irq(hdev, event_type);
break;
+ case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT:
case GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU:
case GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU:
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index d74b182e19f3..6c0173772162 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -81,6 +81,9 @@
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
+#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 7a2b3545a7f9..57cb68f5cc64 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,6 +98,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index e327f80ebe70..7102e2ebc614 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -10,6 +10,7 @@
#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/backing-dev.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -427,6 +428,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card)
goto free_tag_set;
}
+ if (mmc_host_is_spi(host) && host->use_spi_crc)
+ mq->queue->backing_dev_info->capabilities |=
+ BDI_CAP_STABLE_WRITES;
+
mq->queue->queuedata = mq;
blk_queue_rq_timeout(mq->queue, 60 * HZ);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index faaaf52a46d2..eea52e2c5a0c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2012,8 +2012,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
- if ((err != -ETIMEDOUT) &&
- (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+ if (err != -ETIMEDOUT) {
state = STATE_SENDING_DATA;
continue;
}
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 2d736e416775..ba9a63db73da 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -73,7 +73,7 @@
#define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
- #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10)
#define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
#define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
#define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 6ee340a3fb3a..603a5d9f045a 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -624,6 +624,7 @@ err_cleanup_host:
sdhci_cleanup_host(host);
pm_runtime_disable:
+ pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
diff --git a/drivers/mtd/hyperbus/Kconfig b/drivers/mtd/hyperbus/Kconfig
index cff6bbd226f5..b4e3caf7d799 100644
--- a/drivers/mtd/hyperbus/Kconfig
+++ b/drivers/mtd/hyperbus/Kconfig
@@ -14,8 +14,9 @@ if MTD_HYPERBUS
config HBMC_AM654
tristate "HyperBus controller driver for AM65x SoC"
+ depends on ARM64 || COMPILE_TEST
select MULTIPLEXER
- select MUX_MMIO
+ imply MUX_MMIO
help
This is the driver for HyperBus controller on TI's AM65x and
other SoCs
diff --git a/drivers/mtd/nand/onenand/onenand_base.c b/drivers/mtd/nand/onenand/onenand_base.c
index a1f8fe1abb10..e082d632fb74 100644
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
@@ -3259,6 +3259,7 @@ static void onenand_check_features(struct mtd_info *mtd)
switch (density) {
case ONENAND_DEVICE_DENSITY_8Gb:
this->options |= ONENAND_HAS_NOP_1;
+ /* fall through */
case ONENAND_DEVICE_DENSITY_4Gb:
if (ONENAND_IS_DDP(this))
this->options |= ONENAND_HAS_2PLANE;
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 1622d3145587..8ca9fad6e6ad 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -390,6 +390,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
(chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
return MICRON_ON_DIE_UNSUPPORTED;
+ /*
+ * It seems that there are devices which do not support ECC officially.
+ * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
+ * enabling the ECC feature but don't reflect that to the READ_ID table.
+ * So we have to guarantee that we disable the ECC feature directly
+ * after we did the READ_ID table command. Later we can evaluate the
+ * ECC_ENABLE support.
+ */
ret = micron_nand_on_die_ecc_setup(chip, true);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
@@ -398,13 +406,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
- if (!(id[4] & MICRON_ID_ECC_ENABLED))
- return MICRON_ON_DIE_UNSUPPORTED;
-
ret = micron_nand_on_die_ecc_setup(chip, false);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
+ if (!(id[4] & MICRON_ID_ECC_ENABLED))
+ return MICRON_ON_DIE_UNSUPPORTED;
+
ret = nand_readid_op(chip, 0, id, sizeof(id));
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 11c5bad95226..14a5fb378145 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -363,10 +363,13 @@ static int __init arcrimi_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
+ /* Fall through */
case 3: /* Node ID */
node = ints[3];
+ /* Fall through */
case 2: /* IRQ */
irq = ints[2];
+ /* Fall through */
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 28510e33924f..cd27fdc1059b 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -197,16 +197,22 @@ static int __init com20020isa_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_info("Too many arguments\n");
+ /* Fall through */
case 6: /* Timeout */
timeout = ints[6];
+ /* Fall through */
case 5: /* CKP value */
clockp = ints[5];
+ /* Fall through */
case 4: /* Backplane flag */
backplane = ints[4];
+ /* Fall through */
case 3: /* Node ID */
node = ints[3];
+ /* Fall through */
case 2: /* IRQ */
irq = ints[2];
+ /* Fall through */
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 2c546013a980..186bbf87bc84 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -363,8 +363,10 @@ static int __init com90io_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
+ /* Fall through */
case 2: /* IRQ */
irq = ints[2];
+ /* Fall through */
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index ca4a57c30bf8..bd75d06ad7df 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -693,10 +693,13 @@ static int __init com90xx_setup(char *s)
switch (ints[0]) {
default: /* ERROR */
pr_err("Too many arguments\n");
+ /* Fall through */
case 3: /* Mem address */
shmem = ints[3];
+ /* Fall through */
case 2: /* IRQ */
irq = ints[2];
+ /* Fall through */
case 1: /* IO address */
io = ints[1];
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 9b7016abca2f..02fd7822c14a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2196,6 +2196,15 @@ static void bond_miimon_commit(struct bonding *bond)
bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
+ /* For 802.3ad mode, check current slave speed and
+ * duplex again in case its port was disabled after
+ * invalid speed/duplex reporting but recovered before
+ * link monitoring could make a decision on the actual
+ * link status
+ */
+ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+ slave->link == BOND_LINK_UP)
+ bond_3ad_adapter_speed_duplex_changed(slave);
continue;
case BOND_LINK_UP:
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index b6b93a2d93a5..483d270664cc 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -1249,6 +1249,8 @@ int register_candev(struct net_device *dev)
return -EINVAL;
dev->rtnl_link_ops = &can_link_ops;
+ netif_carrier_off(dev);
+
return register_netdev(dev);
}
EXPORT_SYMBOL_GPL(register_candev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 09d8e623dcf6..dc5695dffc2e 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -404,9 +404,10 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
priv->write(reg_mcr, &regs->mcr);
}
-static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
+static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
+ unsigned int ackval;
u32 reg_mcr;
reg_mcr = priv->read(&regs->mcr);
@@ -416,20 +417,37 @@ static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
/* enable stop request */
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
+
+ /* get stop acknowledgment */
+ if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
+ ackval, ackval & (1 << priv->stm.ack_bit),
+ 0, FLEXCAN_TIMEOUT_US))
+ return -ETIMEDOUT;
+
+ return 0;
}
-static inline void flexcan_exit_stop_mode(struct flexcan_priv *priv)
+static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->regs;
+ unsigned int ackval;
u32 reg_mcr;
/* remove stop request */
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 0);
+ /* get stop acknowledgment */
+ if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
+ ackval, !(ackval & (1 << priv->stm.ack_bit)),
+ 0, FLEXCAN_TIMEOUT_US))
+ return -ETIMEDOUT;
+
reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
+
+ return 0;
}
static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
@@ -1455,10 +1473,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
priv = netdev_priv(dev);
priv->stm.gpr = syscon_node_to_regmap(gpr_np);
- of_node_put(gpr_np);
if (IS_ERR(priv->stm.gpr)) {
dev_dbg(&pdev->dev, "could not find gpr regmap\n");
- return PTR_ERR(priv->stm.gpr);
+ ret = PTR_ERR(priv->stm.gpr);
+ goto out_put_node;
}
priv->stm.req_gpr = out_val[1];
@@ -1477,6 +1495,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
device_set_wakeup_enable(&pdev->dev, true);
return 0;
+
+out_put_node:
+ of_node_put(gpr_np);
+ return ret;
}
static const struct of_device_id flexcan_of_match[] = {
@@ -1644,7 +1666,9 @@ static int __maybe_unused flexcan_suspend(struct device *device)
*/
if (device_may_wakeup(device)) {
enable_irq_wake(dev->irq);
- flexcan_enter_stop_mode(priv);
+ err = flexcan_enter_stop_mode(priv);
+ if (err)
+ return err;
} else {
err = flexcan_chip_disable(priv);
if (err)
@@ -1717,10 +1741,13 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
+ int err;
if (netif_running(dev) && device_may_wakeup(device)) {
flexcan_enable_wakeup_irq(priv, false);
- flexcan_exit_stop_mode(priv);
+ err = flexcan_exit_stop_mode(priv);
+ if (err)
+ return err;
}
return 0;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 51eecc7cdcdd..edaa1ca972c1 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1508,10 +1508,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
/* All packets processed */
if (num_pkts < quota) {
- napi_complete_done(napi, num_pkts);
- /* Enable Rx FIFO interrupts */
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
- RCANFD_RFCC_RFIE);
+ if (napi_complete_done(napi, num_pkts)) {
+ /* Enable Rx FIFO interrupts */
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ RCANFD_RFCC_RFIE);
+ }
}
return num_pkts;
}
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 185c7f7d38a4..5e0d5e8101c8 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -479,7 +479,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
if (!netdev)
continue;
- strncpy(name, netdev->name, IFNAMSIZ);
+ strlcpy(name, netdev->name, IFNAMSIZ);
unregister_sja1000dev(netdev);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 234cf1042df6..12358f06d194 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -664,17 +664,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
return regulator_disable(reg);
}
-static void mcp251x_open_clean(struct net_device *net)
-{
- struct mcp251x_priv *priv = netdev_priv(net);
- struct spi_device *spi = priv->spi;
-
- free_irq(spi->irq, priv);
- mcp251x_hw_sleep(spi);
- mcp251x_power_enable(priv->transceiver, 0);
- close_candev(net);
-}
-
static int mcp251x_stop(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
@@ -941,37 +930,43 @@ static int mcp251x_open(struct net_device *net)
flags | IRQF_ONESHOT, DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
- mcp251x_power_enable(priv->transceiver, 0);
- close_candev(net);
- goto open_unlock;
+ goto out_close;
}
priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto out_clean;
+ }
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
ret = mcp251x_hw_reset(spi);
- if (ret) {
- mcp251x_open_clean(net);
- goto open_unlock;
- }
+ if (ret)
+ goto out_free_wq;
ret = mcp251x_setup(net, spi);
- if (ret) {
- mcp251x_open_clean(net);
- goto open_unlock;
- }
+ if (ret)
+ goto out_free_wq;
ret = mcp251x_set_normal_mode(spi);
- if (ret) {
- mcp251x_open_clean(net);
- goto open_unlock;
- }
+ if (ret)
+ goto out_free_wq;
can_led_event(net, CAN_LED_EVENT_OPEN);
netif_wake_queue(net);
+ mutex_unlock(&priv->mcp_lock);
-open_unlock:
+ return 0;
+
+out_free_wq:
+ destroy_workqueue(priv->wq);
+out_clean:
+ free_irq(spi->irq, priv);
+ mcp251x_hw_sleep(spi);
+out_close:
+ mcp251x_power_enable(priv->transceiver, 0);
+ close_candev(net);
mutex_unlock(&priv->mcp_lock);
return ret;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 458154c9b482..65dce642b86b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -568,16 +568,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
dev->state &= ~PCAN_USB_STATE_STARTED;
netif_stop_queue(netdev);
+ close_candev(netdev);
+
+ dev->can.state = CAN_STATE_STOPPED;
+
/* unlink all pending urbs and free used memory */
peak_usb_unlink_all_urbs(dev);
if (dev->adapter->dev_stop)
dev->adapter->dev_stop(dev);
- close_candev(netdev);
-
- dev->can.state = CAN_STATE_STOPPED;
-
/* can set bus off now */
if (dev->adapter->dev_set_bus) {
int err = dev->adapter->dev_set_bus(dev, 0);
@@ -855,7 +855,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
- strncpy(name, netdev->name, IFNAMSIZ);
+ strlcpy(name, netdev->name, IFNAMSIZ);
unregister_netdev(netdev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 34761c3a6286..47cc1ff5b88e 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -841,7 +841,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
goto err_out;
/* allocate command buffer once for all for the interface */
- pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
GFP_KERNEL);
if (!pdev->cmd_buffer_addr)
goto err_out_1;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 178bb7cff0c1..53cb2f72bdd0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -494,7 +494,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
u8 *buffer;
int err;
- buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+ buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 467e61ced82c..d3804ffd3d2a 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -27,7 +27,6 @@
#include <linux/platform_data/mv88e6xxx.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
-#include <linux/phy.h>
#include <linux/phylink.h>
#include <net/dsa.h>
@@ -430,7 +429,7 @@ int mv88e6xxx_port_setup_mac(struct mv88e6xxx_chip *chip, int port, int link,
return 0;
/* Port's MAC control must not be changed unless the link is down */
- err = chip->info->ops->port_set_link(chip, port, 0);
+ err = chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
if (err)
return err;
@@ -482,30 +481,6 @@ static int mv88e6xxx_phy_is_internal(struct dsa_switch *ds, int port)
return port < chip->info->num_internal_phys;
}
-/* We expect the switch to perform auto negotiation if there is a real
- * phy. However, in the case of a fixed link phy, we force the port
- * settings from the fixed link settings.
- */
-static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- if (!phy_is_pseudo_fixed_link(phydev) &&
- mv88e6xxx_phy_is_internal(ds, port))
- return;
-
- mv88e6xxx_reg_lock(chip);
- err = mv88e6xxx_port_setup_mac(chip, port, phydev->link, phydev->speed,
- phydev->duplex, phydev->pause,
- phydev->interface);
- mv88e6xxx_reg_unlock(chip);
-
- if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev, "p%d: failed to configure MAC\n", port);
-}
-
static void mv88e6065_phylink_validate(struct mv88e6xxx_chip *chip, int port,
unsigned long *mask,
struct phylink_link_state *state)
@@ -2747,6 +2722,7 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
err = mv88e6xxx_mdio_register(chip, child, true);
if (err) {
mv88e6xxx_mdios_unregister(chip);
+ of_node_put(child);
return err;
}
}
@@ -4720,7 +4696,6 @@ static int mv88e6xxx_port_egress_floods(struct dsa_switch *ds, int port,
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.setup = mv88e6xxx_setup,
- .adjust_link = mv88e6xxx_adjust_link,
.phylink_validate = mv88e6xxx_validate,
.phylink_mac_link_state = mv88e6xxx_link_state,
.phylink_mac_config = mv88e6xxx_mac_config,
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 232e8cc96f6d..16f15c93a102 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2009 Felix Fietkau <nbd@nbd.name>
* Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2016 John Crispin <john@phrozen.org>
*/
@@ -583,8 +583,11 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
for_each_available_child_of_node(ports, port) {
err = of_property_read_u32(port, "reg", &reg);
- if (err)
+ if (err) {
+ of_node_put(port);
+ of_node_put(ports);
return err;
+ }
if (!dsa_is_user_port(priv->ds, reg))
continue;
@@ -595,6 +598,7 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
internal_mdio_mask |= BIT(reg);
}
+ of_node_put(ports);
if (!external_mdio_mask && !internal_mdio_mask) {
dev_err(priv->dev, "no PHYs are defined.\n");
return -EINVAL;
@@ -935,6 +939,8 @@ qca8k_port_enable(struct dsa_switch *ds, int port,
qca8k_port_set_status(priv, port, 1);
priv->port_sts[port].enabled = 1;
+ phy_support_asym_pause(phy);
+
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 6bfb1696a6f2..9988c9d18567 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -277,6 +277,18 @@ sja1105et_l2_lookup_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
SJA1105ET_SIZE_L2_LOOKUP_ENTRY, op);
}
+static size_t sja1105et_dyn_l2_lookup_entry_packing(void *buf, void *entry_ptr,
+ enum packing_op op)
+{
+ struct sja1105_l2_lookup_entry *entry = entry_ptr;
+ u8 *cmd = buf + SJA1105ET_SIZE_L2_LOOKUP_ENTRY;
+ const int size = SJA1105_SIZE_DYN_CMD;
+
+ sja1105_packing(cmd, &entry->lockeds, 28, 28, size, op);
+
+ return sja1105et_l2_lookup_entry_packing(buf, entry_ptr, op);
+}
+
static void
sja1105et_mgmt_route_cmd_packing(void *buf, struct sja1105_dyn_cmd *cmd,
enum packing_op op)
@@ -477,7 +489,7 @@ sja1105et_general_params_entry_packing(void *buf, void *entry_ptr,
/* SJA1105E/T: First generation */
struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
[BLK_IDX_L2_LOOKUP] = {
- .entry_packing = sja1105et_l2_lookup_entry_packing,
+ .entry_packing = sja1105et_dyn_l2_lookup_entry_packing,
.cmd_packing = sja1105et_l2_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE | OP_DEL),
.max_entry_count = SJA1105_MAX_L2_LOOKUP_COUNT,
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 32bf3a7cc3b6..d073baffc20b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -218,7 +218,7 @@ static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
/* This selects between Independent VLAN Learning (IVL) and
* Shared VLAN Learning (SVL)
*/
- .shared_learn = false,
+ .shared_learn = true,
/* Don't discard management traffic based on ENFPORT -
* we don't perform SMAC port enforcement anyway, so
* what we are setting here doesn't matter.
@@ -625,6 +625,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (of_property_read_u32(child, "reg", &index) < 0) {
dev_err(dev, "Port number not defined in device tree "
"(property \"reg\")\n");
+ of_node_put(child);
return -ENODEV;
}
@@ -634,6 +635,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
dev_err(dev, "Failed to read phy-mode or "
"phy-interface-type property for port %d\n",
index);
+ of_node_put(child);
return -ENODEV;
}
ports[index].phy_mode = phy_mode;
@@ -643,6 +645,7 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
if (!of_phy_is_fixed_link(child)) {
dev_err(dev, "phy-handle or fixed-link "
"properties missing!\n");
+ of_node_put(child);
return -ENODEV;
}
/* phy-handle is missing, but fixed-link isn't.
@@ -1089,8 +1092,13 @@ int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- l2_lookup.mask_vlanid = VLAN_VID_MASK;
- l2_lookup.mask_iotag = BIT(0);
+ if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.mask_iotag = BIT(0);
+ } else {
+ l2_lookup.mask_vlanid = 0;
+ l2_lookup.mask_iotag = 0;
+ }
l2_lookup.destports = BIT(port);
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@@ -1147,8 +1155,13 @@ int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
l2_lookup.vlanid = vid;
l2_lookup.iotag = SJA1105_S_TAG;
l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
- l2_lookup.mask_vlanid = VLAN_VID_MASK;
- l2_lookup.mask_iotag = BIT(0);
+ if (dsa_port_is_vlan_filtering(&ds->ports[port])) {
+ l2_lookup.mask_vlanid = VLAN_VID_MASK;
+ l2_lookup.mask_iotag = BIT(0);
+ } else {
+ l2_lookup.mask_vlanid = 0;
+ l2_lookup.mask_iotag = 0;
+ }
l2_lookup.destports = BIT(port);
rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
@@ -1178,60 +1191,31 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct sja1105_private *priv = ds->priv;
- u16 rx_vid, tx_vid;
- int rc, i;
- if (dsa_port_is_vlan_filtering(&ds->ports[port]))
- return priv->info->fdb_add_cmd(ds, port, addr, vid);
-
- /* Since we make use of VLANs even when the bridge core doesn't tell us
- * to, translate these FDB entries into the correct dsa_8021q ones.
- * The basic idea (also repeats for removal below) is:
- * - Each of the other front-panel ports needs to be able to forward a
- * pvid-tagged (aka tagged with their rx_vid) frame that matches this
- * DMAC.
- * - The CPU port (aka the tx_vid of this port) needs to be able to
- * send a frame matching this DMAC to the specified port.
- * For a better picture see net/dsa/tag_8021q.c.
+ /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
+ * so the switch still does some VLAN processing internally.
+ * But Shared VLAN Learning (SVL) is also active, and it will take
+ * care of autonomous forwarding between the unique pvid's of each
+ * port. Here we just make sure that users can't add duplicate FDB
+ * entries when in this mode - the actual VID doesn't matter except
+ * for what gets printed in 'bridge fdb show'. In the case of zero,
+ * no VID gets printed at all.
*/
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (i == port)
- continue;
- if (i == dsa_upstream_port(priv->ds, port))
- continue;
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ vid = 0;
- rx_vid = dsa_8021q_rx_vid(ds, i);
- rc = priv->info->fdb_add_cmd(ds, port, addr, rx_vid);
- if (rc < 0)
- return rc;
- }
- tx_vid = dsa_8021q_tx_vid(ds, port);
- return priv->info->fdb_add_cmd(ds, port, addr, tx_vid);
+ return priv->info->fdb_add_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct sja1105_private *priv = ds->priv;
- u16 rx_vid, tx_vid;
- int rc, i;
-
- if (dsa_port_is_vlan_filtering(&ds->ports[port]))
- return priv->info->fdb_del_cmd(ds, port, addr, vid);
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- if (i == port)
- continue;
- if (i == dsa_upstream_port(priv->ds, port))
- continue;
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ vid = 0;
- rx_vid = dsa_8021q_rx_vid(ds, i);
- rc = priv->info->fdb_del_cmd(ds, port, addr, rx_vid);
- if (rc < 0)
- return rc;
- }
- tx_vid = dsa_8021q_tx_vid(ds, port);
- return priv->info->fdb_del_cmd(ds, port, addr, tx_vid);
+ return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
@@ -1270,39 +1254,9 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
continue;
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- /* On SJA1105 E/T, the switch doesn't implement the LOCKEDS
- * bit, so it doesn't tell us whether a FDB entry is static
- * or not.
- * But, of course, we can find out - we're the ones who added
- * it in the first place.
- */
- if (priv->info->device_id == SJA1105E_DEVICE_ID ||
- priv->info->device_id == SJA1105T_DEVICE_ID) {
- int match;
-
- match = sja1105_find_static_fdb_entry(priv, port,
- &l2_lookup);
- l2_lookup.lockeds = (match >= 0);
- }
-
- /* We need to hide the dsa_8021q VLANs from the user. This
- * basically means hiding the duplicates and only showing
- * the pvid that is supposed to be active in standalone and
- * non-vlan_filtering modes (aka 1).
- * - For statically added FDB entries (bridge fdb add), we
- * can convert the TX VID (coming from the CPU port) into the
- * pvid and ignore the RX VIDs of the other ports.
- * - For dynamically learned FDB entries, a single entry with
- * no duplicates is learned - that which has the real port's
- * pvid, aka RX VID.
- */
- if (!dsa_port_is_vlan_filtering(&ds->ports[port])) {
- if (l2_lookup.vlanid == tx_vid ||
- l2_lookup.vlanid == rx_vid)
- l2_lookup.vlanid = 1;
- else
- continue;
- }
+ /* We need to hide the dsa_8021q VLANs from the user. */
+ if (!dsa_port_is_vlan_filtering(&ds->ports[port]))
+ l2_lookup.vlanid = 0;
cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
}
return 0;
@@ -1594,6 +1548,7 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
*/
static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
{
+ struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
struct sja1105_private *priv = ds->priv;
struct sja1105_table *table;
@@ -1622,6 +1577,28 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
general_params->incl_srcpt1 = enabled;
general_params->incl_srcpt0 = enabled;
+ /* VLAN filtering => independent VLAN learning.
+ * No VLAN filtering => shared VLAN learning.
+ *
+ * In shared VLAN learning mode, untagged traffic still gets
+ * pvid-tagged, and the FDB table gets populated with entries
+ * containing the "real" (pvid or from VLAN tag) VLAN ID.
+ * However the switch performs a masked L2 lookup in the FDB,
+ * effectively only looking up a frame's DMAC (and not VID) for the
+ * forwarding decision.
+ *
+ * This is extremely convenient for us, because in modes with
+ * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
+ * each front panel port. This is good for identification but breaks
+ * learning badly - the VID of the learnt FDB entry is unique, aka
+ * no frames coming from any other port are going to have it. So
+ * for forwarding purposes, this is as though learning was broken
+ * (all frames get flooded).
+ */
+ table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
+ l2_lookup_params = table->entries;
+ l2_lookup_params->shared_learn = !enabled;
+
rc = sja1105_static_config_reload(priv);
if (rc)
dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
@@ -1751,6 +1728,8 @@ static void sja1105_teardown(struct dsa_switch *ds)
cancel_work_sync(&priv->tagger_data.rxtstamp_work);
skb_queue_purge(&priv->tagger_data.skb_rxtstamp_queue);
+ sja1105_ptp_clock_unregister(priv);
+ sja1105_static_config_free(&priv->static_config);
}
static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
@@ -2208,9 +2187,7 @@ static int sja1105_remove(struct spi_device *spi)
{
struct sja1105_private *priv = spi_get_drvdata(spi);
- sja1105_ptp_clock_unregister(priv);
dsa_unregister_switch(priv->ds);
- sja1105_static_config_free(&priv->static_config);
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index d19cfdf681af..d8e8dd59f3d1 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -369,16 +369,15 @@ int sja1105_ptp_clock_register(struct sja1105_private *priv)
.mult = SJA1105_CC_MULT,
};
mutex_init(&priv->ptp_lock);
- INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
-
- schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
-
priv->ptp_caps = sja1105_ptp_caps;
priv->clock = ptp_clock_register(&priv->ptp_caps, ds->dev);
if (IS_ERR_OR_NULL(priv->clock))
return PTR_ERR(priv->clock);
+ INIT_DELAYED_WORK(&priv->refresh_work, sja1105_ptp_overflow_check);
+ schedule_delayed_work(&priv->refresh_work, SJA1105_REFRESH_INTERVAL);
+
return sja1105_ptp_reset(priv);
}
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 2a3e2450968e..a9478577b495 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -12,8 +12,8 @@ config NET_VENDOR_8390
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about Western Digital cards. If you say Y, you will be
- asked for your specific card in the following questions.
+ the questions about National Semiconductor 8390 cards. If you say Y,
+ you will be asked for your specific card in the following questions.
if NET_VENDOR_8390
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index e43d922f043e..174344c450af 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2362,7 +2362,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
/* Allocate memory for the TCB's (Transmit Control Block) */
tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
- GFP_ATOMIC | GFP_DMA);
+ GFP_KERNEL | GFP_DMA);
if (!tx_ring->tcb_ring)
return -ENOMEM;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 3434730a7699..0537df06a9b5 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -860,7 +860,9 @@ static int emac_probe(struct platform_device *pdev)
goto out_clk_disable_unprepare;
}
- db->phy_node = of_parse_phandle(np, "phy", 0);
+ db->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!db->phy_node)
+ db->phy_node = of_parse_phandle(np, "phy", 0);
if (!db->phy_node) {
dev_err(&pdev->dev, "no associated PHY\n");
ret = -ENODEV;
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index de4950d2022e..9f965cdfff5c 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -14,7 +14,7 @@ config NET_VENDOR_AMD
say Y.
Note that the answer to this question does not directly affect
- the kernel: saying N will just case the configurator to skip all
+ the kernel: saying N will just cause the configurator to skip all
the questions regarding AMD chipsets. If you say Y, you will be asked
for your specific chipset/driver in the following questions.
diff --git a/drivers/net/ethernet/apple/Kconfig b/drivers/net/ethernet/apple/Kconfig
index fde7ae33e302..f78b9c841296 100644
--- a/drivers/net/ethernet/apple/Kconfig
+++ b/drivers/net/ethernet/apple/Kconfig
@@ -11,8 +11,8 @@ config NET_VENDOR_APPLE
If you have a network (Ethernet) card belonging to this class, say Y.
Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about IBM devices. If you say Y, you will be asked for
+ kernel: saying N will just cause the configurator to skip all the
+ questions about Apple devices. If you say Y, you will be asked for
your specific card in the following questions.
if NET_VENDOR_APPLE
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 40a8717f51b1..7548247455d7 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1141,7 +1141,7 @@ static int ag71xx_rings_init(struct ag71xx *ag)
tx->descs_cpu = dma_alloc_coherent(&ag->pdev->dev,
ring_size * AG71XX_DESC_SIZE,
- &tx->descs_dma, GFP_ATOMIC);
+ &tx->descs_dma, GFP_KERNEL);
if (!tx->descs_cpu) {
kfree(tx->buf);
tx->buf = NULL;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index e9017caf024d..e24f5d2b6afe 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -14,9 +14,9 @@ config NET_VENDOR_BROADCOM
say Y.
Note that the answer to this question does not directly affect
- the kernel: saying N will just case the configurator to skip all
- the questions regarding AMD chipsets. If you say Y, you will be asked
- for your specific chipset/driver in the following questions.
+ the kernel: saying N will just cause the configurator to skip all
+ the questions regarding Broadcom chipsets. If you say Y, you will
+ be asked for your specific chipset/driver in the following questions.
if NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b9c5cea8db16..9483553ce444 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -992,7 +992,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_priv *priv =
container_of(napi, struct bcm_sysport_priv, napi);
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
unsigned int work_done = 0;
work_done = bcm_sysport_desc_rx(priv, budget);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e2be5a685130..e47ea92e2ae3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1934,8 +1934,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return netdev_pick_tx(dev, skb, NULL) %
- (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ return netdev_pick_tx(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c23fbb34f0e9..94be97b7952c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2267,7 +2267,7 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
}
}
if (bp->flags & BNXT_FLAG_DIM) {
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
dim_update_sample(cpr->event_ctr,
cpr->rx_packets,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index a2b57807453b..d3a0b614dbfa 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1895,7 +1895,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
{
struct bcmgenet_rx_ring *ring = container_of(napi,
struct bcmgenet_rx_ring, napi);
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
unsigned int work_done;
work_done = bcmgenet_desc_rx(ring, budget);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index ad22554857bf..acb016834f04 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1381,24 +1381,18 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
u8 *dst)
{
u8 mac[ETH_ALEN];
- int ret;
+ u8 *addr;
- ret = fwnode_property_read_u8_array(acpi_fwnode_handle(adev),
- "mac-address", mac, ETH_ALEN);
- if (ret)
- goto out;
-
- if (!is_valid_ether_addr(mac)) {
+ addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN);
+ if (!addr) {
dev_err(dev, "MAC address invalid: %pM\n", mac);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
dev_info(dev, "MAC address set to: %pM\n", mac);
- memcpy(dst, mac, ETH_ALEN);
-out:
- return ret;
+ ether_addr_copy(dst, mac);
+ return 0;
}
/* Currently only sets the MAC address. */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 1e82b9efe447..58f89f6a040f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3269,7 +3269,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
- goto out_free_adapter;
+ goto out_free_adapter_nofail;
}
adapter->pdev = pdev;
@@ -3397,6 +3397,9 @@ out_free_dev:
if (adapter->port[i])
free_netdev(adapter->port[i]);
+out_free_adapter_nofail:
+ kfree_skb(adapter->nofail_skb);
+
out_free_adapter:
kfree(adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index ef5d61d57597..323976c811e9 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -550,7 +550,7 @@ int be_process_mcc(struct be_adapter *adapter)
int num = 0, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- spin_lock(&adapter->mcc_cq_lock);
+ spin_lock_bh(&adapter->mcc_cq_lock);
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
@@ -566,7 +566,7 @@ int be_process_mcc(struct be_adapter *adapter)
if (num)
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
- spin_unlock(&adapter->mcc_cq_lock);
+ spin_unlock_bh(&adapter->mcc_cq_lock);
return status;
}
@@ -581,9 +581,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (be_check_error(adapter, BE_ERROR_ANY))
return -EIO;
- local_bh_disable();
status = be_process_mcc(adapter);
- local_bh_enable();
if (atomic_read(&mcc_obj->q.used) == 0)
break;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 1c9883019767..314e9868b861 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5630,9 +5630,7 @@ static void be_worker(struct work_struct *work)
* mcc completions
*/
if (!netif_running(adapter->netdev)) {
- local_bh_disable();
be_process_mcc(adapter);
- local_bh_enable();
goto reschedule;
}
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 9c530f75134f..c219587bd334 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -2,6 +2,7 @@
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ select PHYLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
physical function (PF) devices, managing ENETC Ports at a privileged
@@ -12,6 +13,7 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
depends on PCI && PCI_MSI && (ARCH_LAYERSCAPE || COMPILE_TEST)
+ select PHYLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
virtual function (VF) devices enabled by the ENETC PF driver.
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index e80fedb27cee..210749bf1eac 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2439,9 +2439,6 @@ MODULE_PARM_DESC(fsl_fm_rx_extra_headroom, "Extra headroom for Rx buffers");
* buffers when not using jumbo frames.
* Must be large enough to accommodate the network MTU, but small enough
* to avoid wasting skb memory.
- *
- * Could be overridden once, at boot-time, via the
- * fm_set_max_frm() callback.
*/
static int fsl_fm_max_frm = FSL_FM_MAX_FRAME_SIZE;
module_param(fsl_fm_max_frm, int, 0);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 92372dc43be8..ebc37e256922 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -31,9 +31,6 @@
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
dma_addr_t bus; /* the bus for the desc_ring */
- u32 cnt; /* free-running total number of completed packets */
- u32 fill_cnt; /* free-running total number of descriptors posted */
- u32 mask; /* masks the cnt to the size of the ring */
u8 seqno; /* the next expected seqno for this desc*/
};
@@ -60,8 +57,6 @@ struct gve_rx_data_queue {
dma_addr_t data_bus; /* dma mapping of the slots */
struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
- u32 mask; /* masks the cnt to the size of the ring */
- u32 cnt; /* free-running total number of completed packets */
};
struct gve_priv;
@@ -73,6 +68,9 @@ struct gve_rx_ring {
struct gve_rx_data_queue data;
u64 rbytes; /* free-running bytes received */
u64 rpackets; /* free-running packets received */
+ u32 cnt; /* free-running total number of completed packets */
+ u32 fill_cnt; /* free-running total number of descs and buffs posted */
+ u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 26540b856541..d8fa816f4473 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -138,8 +138,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
struct gve_rx_ring *rx = &priv->rx[ring];
- data[i++] = rx->desc.cnt;
- data[i++] = rx->desc.fill_cnt;
+ data[i++] = rx->cnt;
+ data[i++] = rx->fill_cnt;
}
} else {
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 1914b8350da7..59564ac99d2a 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -37,7 +37,7 @@ static void gve_rx_free_ring(struct gve_priv *priv, int idx)
rx->data.qpl = NULL;
kvfree(rx->data.page_info);
- slots = rx->data.mask + 1;
+ slots = rx->mask + 1;
bytes = sizeof(*rx->data.data_ring) * slots;
dma_free_coherent(dev, bytes, rx->data.data_ring,
rx->data.data_bus);
@@ -64,7 +64,7 @@ static int gve_prefill_rx_pages(struct gve_rx_ring *rx)
/* Allocate one page per Rx queue slot. Each page is split into two
* packet buffers, when possible we "page flip" between the two.
*/
- slots = rx->data.mask + 1;
+ slots = rx->mask + 1;
rx->data.page_info = kvzalloc(slots *
sizeof(*rx->data.page_info), GFP_KERNEL);
@@ -111,7 +111,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->q_num = idx;
slots = priv->rx_pages_per_qpl;
- rx->data.mask = slots - 1;
+ rx->mask = slots - 1;
/* alloc rx data ring */
bytes = sizeof(*rx->data.data_ring) * slots;
@@ -125,7 +125,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
err = -ENOMEM;
goto abort_with_slots;
}
- rx->desc.fill_cnt = filled_pages;
+ rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
dma_wmb();
@@ -156,8 +156,8 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
err = -ENOMEM;
goto abort_with_q_resources;
}
- rx->desc.mask = slots - 1;
- rx->desc.cnt = 0;
+ rx->mask = slots - 1;
+ rx->cnt = 0;
rx->desc.seqno = 1;
gve_rx_add_to_block(priv, idx);
@@ -213,7 +213,7 @@ void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx)
{
u32 db_idx = be32_to_cpu(rx->q_resources->db_index);
- iowrite32be(rx->desc.fill_cnt, &priv->db_bar2[db_idx]);
+ iowrite32be(rx->fill_cnt, &priv->db_bar2[db_idx]);
}
static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
@@ -273,7 +273,7 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info,
}
static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
- netdev_features_t feat)
+ netdev_features_t feat, u32 idx)
{
struct gve_rx_slot_page_info *page_info;
struct gve_priv *priv = rx->gve;
@@ -282,14 +282,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
struct sk_buff *skb;
int pagecount;
u16 len;
- u32 idx;
/* drop this packet */
if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR))
return true;
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
- idx = rx->data.cnt & rx->data.mask;
page_info = &rx->data.page_info[idx];
/* gvnic can only receive into registered segments. If the buffer
@@ -340,8 +338,6 @@ have_skb:
if (!skb)
return true;
- rx->data.cnt++;
-
if (likely(feat & NETIF_F_RXCSUM)) {
/* NIC passes up the partial sum */
if (rx_desc->csum)
@@ -370,7 +366,7 @@ static bool gve_rx_work_pending(struct gve_rx_ring *rx)
__be16 flags_seq;
u32 next_idx;
- next_idx = rx->desc.cnt & rx->desc.mask;
+ next_idx = rx->cnt & rx->mask;
desc = rx->desc.desc_ring + next_idx;
flags_seq = desc->flags_seq;
@@ -385,8 +381,8 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
{
struct gve_priv *priv = rx->gve;
struct gve_rx_desc *desc;
- u32 cnt = rx->desc.cnt;
- u32 idx = cnt & rx->desc.mask;
+ u32 cnt = rx->cnt;
+ u32 idx = cnt & rx->mask;
u32 work_done = 0;
u64 bytes = 0;
@@ -401,10 +397,10 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
rx->q_num, GVE_SEQNO(desc->flags_seq),
rx->desc.seqno);
bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
- if (!gve_rx(rx, desc, feat))
+ if (!gve_rx(rx, desc, feat, idx))
gve_schedule_reset(priv);
cnt++;
- idx = cnt & rx->desc.mask;
+ idx = cnt & rx->mask;
desc = rx->desc.desc_ring + idx;
rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
work_done++;
@@ -417,8 +413,8 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
rx->rpackets += work_done;
rx->rbytes += bytes;
u64_stats_update_end(&rx->statss);
- rx->desc.cnt = cnt;
- rx->desc.fill_cnt += work_done;
+ rx->cnt = cnt;
+ rx->fill_cnt += work_done;
/* restock desc ring slots */
dma_wmb(); /* Ensure descs are visible before ringing doorbell */
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index d60452845539..c84167447abe 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -220,6 +220,7 @@ struct hip04_priv {
unsigned int reg_inten;
struct napi_struct napi;
+ struct device *dev;
struct net_device *ndev;
struct tx_desc *tx_desc;
@@ -248,7 +249,7 @@ struct hip04_priv {
static inline unsigned int tx_count(unsigned int head, unsigned int tail)
{
- return (head - tail) % (TX_DESC_NUM - 1);
+ return (head - tail) % TX_DESC_NUM;
}
static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
@@ -465,7 +466,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
}
if (priv->tx_phys[tx_tail]) {
- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
priv->tx_skb[tx_tail]->len,
DMA_TO_DEVICE);
priv->tx_phys[tx_tail] = 0;
@@ -516,8 +517,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys)) {
+ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, phys)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -585,6 +586,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
u16 len;
u32 err;
+ /* clean up tx descriptors */
+ tx_remaining = hip04_tx_reclaim(ndev, false);
+
while (cnt && !last) {
buf = priv->rx_buf[priv->rx_head];
skb = build_skb(buf, priv->rx_buf_size);
@@ -593,7 +597,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
goto refill;
}
- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
RX_BUF_SIZE, DMA_FROM_DEVICE);
priv->rx_phys[priv->rx_head] = 0;
@@ -622,9 +626,9 @@ refill:
buf = netdev_alloc_frag(priv->rx_buf_size);
if (!buf)
goto done;
- phys = dma_map_single(&ndev->dev, buf,
+ phys = dma_map_single(priv->dev, buf,
RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys))
+ if (dma_mapping_error(priv->dev, phys))
goto done;
priv->rx_buf[priv->rx_head] = buf;
priv->rx_phys[priv->rx_head] = phys;
@@ -645,8 +649,7 @@ refill:
}
napi_complete_done(napi, rx);
done:
- /* clean up tx descriptors and start a new timer if necessary */
- tx_remaining = hip04_tx_reclaim(ndev, false);
+ /* start a new timer if necessary */
if (rx < budget && tx_remaining)
hip04_start_tx_timer(priv);
@@ -728,9 +731,9 @@ static int hip04_mac_open(struct net_device *ndev)
for (i = 0; i < RX_DESC_NUM; i++) {
dma_addr_t phys;
- phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+ phys = dma_map_single(priv->dev, priv->rx_buf[i],
RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys))
+ if (dma_mapping_error(priv->dev, phys))
return -EIO;
priv->rx_phys[i] = phys;
@@ -764,7 +767,7 @@ static int hip04_mac_stop(struct net_device *ndev)
for (i = 0; i < RX_DESC_NUM; i++) {
if (priv->rx_phys[i]) {
- dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+ dma_unmap_single(priv->dev, priv->rx_phys[i],
RX_BUF_SIZE, DMA_FROM_DEVICE);
priv->rx_phys[i] = 0;
}
@@ -907,6 +910,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
return -ENOMEM;
priv = netdev_priv(ndev);
+ priv->dev = d;
priv->ndev = ndev;
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 4138a8480347..cca71ba7a74a 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3251,7 +3251,7 @@ static int ehea_mem_notifier(struct notifier_block *nb,
switch (action) {
case MEM_CANCEL_OFFLINE:
pr_info("memory offlining canceled");
- /* Fall through: re-add canceled memory block */
+ /* Fall through - re-add canceled memory block */
case MEM_ONLINE:
pr_info("memory is going online");
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index f660cc2b8258..0b9e851f3da4 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -319,20 +319,33 @@ static int orion_mdio_probe(struct platform_device *pdev)
init_waitqueue_head(&dev->smi_busy_wait);
- for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
- dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
- if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+ if (pdev->dev.of_node) {
+ for (i = 0; i < ARRAY_SIZE(dev->clk); i++) {
+ dev->clk[i] = of_clk_get(pdev->dev.of_node, i);
+ if (PTR_ERR(dev->clk[i]) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto out_clk;
+ }
+ if (IS_ERR(dev->clk[i]))
+ break;
+ clk_prepare_enable(dev->clk[i]);
+ }
+
+ if (!IS_ERR(of_clk_get(pdev->dev.of_node,
+ ARRAY_SIZE(dev->clk))))
+ dev_warn(&pdev->dev,
+ "unsupported number of clocks, limiting to the first "
+ __stringify(ARRAY_SIZE(dev->clk)) "\n");
+ } else {
+ dev->clk[0] = clk_get(&pdev->dev, NULL);
+ if (PTR_ERR(dev->clk[0]) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto out_clk;
}
- if (IS_ERR(dev->clk[i]))
- break;
- clk_prepare_enable(dev->clk[i]);
+ if (!IS_ERR(dev->clk[0]))
+ clk_prepare_enable(dev->clk[0]);
}
- if (!IS_ERR(of_clk_get(pdev->dev.of_node, ARRAY_SIZE(dev->clk))))
- dev_warn(&pdev->dev, "unsupported number of clocks, limiting to the first "
- __stringify(ARRAY_SIZE(dev->clk)) "\n");
dev->err_interrupt = platform_get_irq(pdev, 0);
if (dev->err_interrupt > 0 &&
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index e9d8ffe897e9..74fd9e171865 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -811,6 +811,26 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
return 0;
}
+static void mvpp2_set_hw_csum(struct mvpp2_port *port,
+ enum mvpp2_bm_pool_log_num new_long_pool)
+{
+ const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ /* Update L4 checksum when jumbo enable/disable on port.
+ * Only port 0 supports hardware checksum offload due to
+ * the Tx FIFO size limitation.
+ * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
+ * has 7 bits, so the maximum L3 offset is 128.
+ */
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+ port->dev->features &= ~csums;
+ port->dev->hw_features &= ~csums;
+ } else {
+ port->dev->features |= csums;
+ port->dev->hw_features |= csums;
+ }
+}
+
static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -843,15 +863,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
/* Add port to new short & long pool */
mvpp2_swf_bm_pool_init(port);
- /* Update L4 checksum when jumbo enable/disable on port */
- if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- dev->hw_features &= ~(NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM);
- } else {
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- }
+ mvpp2_set_hw_csum(port, new_long_pool);
}
dev->mtu = mtu;
@@ -3701,6 +3713,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
static int mvpp2_change_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
+ bool running = netif_running(dev);
int err;
if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
@@ -3709,40 +3722,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
}
- if (!netif_running(dev)) {
- err = mvpp2_bm_update_mtu(dev, mtu);
- if (!err) {
- port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
- return 0;
- }
-
- /* Reconfigure BM to the original MTU */
- err = mvpp2_bm_update_mtu(dev, dev->mtu);
- if (err)
- goto log_error;
- }
-
- mvpp2_stop_dev(port);
+ if (running)
+ mvpp2_stop_dev(port);
err = mvpp2_bm_update_mtu(dev, mtu);
- if (!err) {
+ if (err) {
+ netdev_err(dev, "failed to change MTU\n");
+ /* Reconfigure BM to the original MTU */
+ mvpp2_bm_update_mtu(dev, dev->mtu);
+ } else {
port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
- goto out_start;
}
- /* Reconfigure BM to the original MTU */
- err = mvpp2_bm_update_mtu(dev, dev->mtu);
- if (err)
- goto log_error;
-
-out_start:
- mvpp2_start_dev(port);
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
+ if (running) {
+ mvpp2_start_dev(port);
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ }
- return 0;
-log_error:
- netdev_err(dev, "failed to change MTU\n");
return err;
}
@@ -4740,9 +4737,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
else
ctrl0 &= ~MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
- ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
- ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
- MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
+ ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
+ MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
+ ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
if (old_ctrl0 != ctrl0)
writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
@@ -5207,10 +5204,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
dev->features |= NETIF_F_NTUPLE;
}
- if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
- dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
- }
+ mvpp2_set_hw_csum(port, port->pool_long->id);
dev->vlan_features |= features;
dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
@@ -5756,9 +5750,6 @@ static int mvpp2_remove(struct platform_device *pdev)
mvpp2_dbgfs_cleanup(priv);
- flush_workqueue(priv->stats_queue);
- destroy_workqueue(priv->stats_queue);
-
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i]) {
mutex_destroy(&priv->port_list[i]->gather_stats_lock);
@@ -5767,6 +5758,8 @@ static int mvpp2_remove(struct platform_device *pdev)
i++;
}
+ destroy_workqueue(priv->stats_queue);
+
for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 762fe0821923..c2e00bb587cd 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4924,6 +4924,13 @@ static const struct dmi_system_id msi_blacklist[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
},
},
+ {
+ .ident = "ASUS P6T",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "P6T"),
+ },
+ },
{}
};
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index 263cd0909fe0..1f7fff81f24d 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -9,7 +9,6 @@ if NET_VENDOR_MEDIATEK
config NET_MEDIATEK_SOC
tristate "MediaTek SoC Gigabit Ethernet support"
- depends on NET_VENDOR_MEDIATEK
select PHYLIB
---help---
This driver supports the gigabit ethernet MACs in the
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 5bb6a26ea267..50862275544e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -213,7 +213,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
struct mlx5_interface *intf;
mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
+ list_for_each_entry_reverse(intf, &intf_list, list)
mlx5_remove_device(intf, priv);
list_del(&priv->dev_list);
mutex_unlock(&mlx5_intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 30f13f81c965..0807992090b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -159,7 +159,7 @@ do { \
enum mlx5e_rq_group {
MLX5E_RQ_GROUP_REGULAR,
MLX5E_RQ_GROUP_XSK,
- MLX5E_NUM_RQ_GROUPS /* Keep last. */
+#define MLX5E_NUM_RQ_GROUPS(g) (1 + MLX5E_RQ_GROUP_##g)
};
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
@@ -182,14 +182,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS);
}
-/* Use this function to get max num channels after netdev was created */
-static inline int mlx5e_get_netdev_max_channels(struct net_device *netdev)
-{
- return min_t(unsigned int,
- netdev->num_rx_queues / MLX5E_NUM_RQ_GROUPS,
- netdev->num_tx_queues);
-}
-
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
@@ -829,6 +821,7 @@ struct mlx5e_priv {
struct net_device *netdev;
struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
+ u16 max_nch;
u8 max_opened_tc;
struct hwtstamp_config tstamp;
u16 q_counter;
@@ -870,6 +863,7 @@ struct mlx5e_profile {
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers;
int max_tc;
+ u8 rq_groups;
};
void mlx5e_build_ptys2ethtool_map(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index bd882b5ee9a7..3a615d663d84 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -66,9 +66,10 @@ static inline void mlx5e_qid_get_ch_and_group(struct mlx5e_params *params,
*group = qid / nch;
}
-static inline bool mlx5e_qid_validate(struct mlx5e_params *params, u64 qid)
+static inline bool mlx5e_qid_validate(const struct mlx5e_profile *profile,
+ struct mlx5e_params *params, u64 qid)
{
- return qid < params->num_channels * MLX5E_NUM_RQ_GROUPS;
+ return qid < params->num_channels * profile->rq_groups;
}
/* Parameter calculations */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index d5e5afbdca6d..f777994f3005 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -78,9 +78,10 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
};
static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
- const u32 **arr, u32 *size)
+ const u32 **arr, u32 *size,
+ bool force_legacy)
{
- bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
*size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
ARRAY_SIZE(mlx5e_link_speed);
@@ -152,7 +153,8 @@ int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
sizeof(out), MLX5_REG_PTYS, 0, 1);
}
-u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
+u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
+ bool force_legacy)
{
unsigned long temp = eth_proto_oper;
const u32 *table;
@@ -160,7 +162,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size);
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
i = find_first_bit(&temp, max_size);
if (i < max_size)
speed = table[i];
@@ -170,6 +172,7 @@ u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper)
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
{
struct mlx5e_port_eth_proto eproto;
+ bool force_legacy = false;
bool ext;
int err;
@@ -177,8 +180,13 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
goto out;
-
- *speed = mlx5e_port_ptys2speed(mdev, eproto.oper);
+ if (ext && !eproto.admin) {
+ force_legacy = true;
+ err = mlx5_port_query_eth_proto(mdev, 1, false, &eproto);
+ if (err)
+ goto out;
+ }
+ *speed = mlx5e_port_ptys2speed(mdev, eproto.oper, force_legacy);
if (!(*speed))
err = -EINVAL;
@@ -201,7 +209,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
if (err)
return err;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size);
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size, false);
for (i = 0; i < max_size; ++i)
if (eproto.cap & MLX5E_PROT_MASK(i))
max_speed = max(max_speed, table[i]);
@@ -210,14 +218,15 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
return 0;
}
-u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed)
+u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
+ bool force_legacy)
{
u32 link_modes = 0;
const u32 *table;
u32 max_size;
int i;
- mlx5e_port_get_speed_arr(mdev, &table, &max_size);
+ mlx5e_port_get_speed_arr(mdev, &table, &max_size, force_legacy);
for (i = 0; i < max_size; ++i) {
if (table[i] == speed)
link_modes |= MLX5E_PROT_MASK(i);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index 70f536ec51c4..4a7f4497692b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -48,10 +48,12 @@ void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
u8 *an_disable_cap, u8 *an_disable_admin);
int mlx5_port_set_eth_ptys(struct mlx5_core_dev *dev, bool an_disable,
u32 proto_admin, bool ext);
-u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper);
+u32 mlx5e_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
+ bool force_legacy);
int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
-u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed);
+u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
+ bool force_legacy);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index ea032f54197e..3766545ce259 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -412,7 +412,7 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
goto out;
tls_ctx = tls_get_ctx(skb->sk);
- if (unlikely(tls_ctx->netdev != netdev))
+ if (unlikely(WARN_ON_ONCE(tls_ctx->netdev != netdev)))
goto err_out;
priv_tx = mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a6b0eda0bd1a..02530b50609c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -391,7 +391,7 @@ void mlx5e_ethtool_get_channels(struct mlx5e_priv *priv,
{
mutex_lock(&priv->state_lock);
- ch->max_combined = mlx5e_get_netdev_max_channels(priv->netdev);
+ ch->max_combined = priv->max_nch;
ch->combined_count = priv->channels.params.num_channels;
if (priv->xsk.refcnt) {
/* The upper half are XSK queues. */
@@ -785,7 +785,7 @@ static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings
}
static void get_speed_duplex(struct net_device *netdev,
- u32 eth_proto_oper,
+ u32 eth_proto_oper, bool force_legacy,
struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -795,7 +795,7 @@ static void get_speed_duplex(struct net_device *netdev,
if (!netif_carrier_ok(netdev))
goto out;
- speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper);
+ speed = mlx5e_port_ptys2speed(priv->mdev, eth_proto_oper, force_legacy);
if (!speed) {
speed = SPEED_UNKNOWN;
goto out;
@@ -914,8 +914,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
/* Fields: eth_proto_admin and ext_eth_proto_admin are
* mutually exclusive. Hence try reading legacy advertising
* when extended advertising is zero.
- * admin_ext indicates how eth_proto_admin should be
- * interpreted
+ * admin_ext indicates which proto_admin (ext vs. legacy)
+ * should be read and interpreted
*/
admin_ext = ext;
if (ext && !eth_proto_admin) {
@@ -924,7 +924,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
admin_ext = false;
}
- eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
+ eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, admin_ext,
eth_proto_oper);
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
@@ -939,7 +939,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
get_supported(mdev, eth_proto_cap, link_ksettings);
get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
admin_ext);
- get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
+ get_speed_duplex(priv->netdev, eth_proto_oper, !admin_ext,
+ link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -1016,45 +1017,69 @@ static u32 mlx5e_ethtool2ptys_ext_adver_link(const unsigned long *link_modes)
return ptys_modes;
}
+static bool ext_link_mode_requested(const unsigned long *adver)
+{
+#define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
+ int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
+
+ bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
+ return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
+static bool ext_speed_requested(u32 speed)
+{
+#define MLX5E_MAX_PTYS_LEGACY_SPEED 100000
+ return !!(speed > MLX5E_MAX_PTYS_LEGACY_SPEED);
+}
+
+static bool ext_requested(u8 autoneg, const unsigned long *adver, u32 speed)
+{
+ bool ext_link_mode = ext_link_mode_requested(adver);
+ bool ext_speed = ext_speed_requested(speed);
+
+ return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_speed;
+}
+
int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_port_eth_proto eproto;
+ const unsigned long *adver;
bool an_changes = false;
u8 an_disable_admin;
bool ext_supported;
- bool ext_requested;
u8 an_disable_cap;
bool an_disable;
u32 link_modes;
u8 an_status;
+ u8 autoneg;
u32 speed;
+ bool ext;
int err;
u32 (*ethtool2ptys_adver_func)(const unsigned long *adver);
-#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
+ adver = link_ksettings->link_modes.advertising;
+ autoneg = link_ksettings->base.autoneg;
+ speed = link_ksettings->base.speed;
- ext_requested = !!(link_ksettings->link_modes.advertising[0] >
- MLX5E_PTYS_EXT ||
- link_ksettings->link_modes.advertising[1]);
+ ext = ext_requested(autoneg, adver, speed),
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
- ext_requested &= ext_supported;
+ if (!ext_supported && ext)
+ return -EOPNOTSUPP;
- speed = link_ksettings->base.speed;
- ethtool2ptys_adver_func = ext_requested ?
- mlx5e_ethtool2ptys_ext_adver_link :
+ ethtool2ptys_adver_func = ext ? mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
- err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
goto out;
}
- link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
- ethtool2ptys_adver_func(link_ksettings->link_modes.advertising) :
- mlx5e_port_speed2linkmodes(mdev, speed);
+ link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
+ mlx5e_port_speed2linkmodes(mdev, speed, !ext);
link_modes = link_modes & eproto.cap;
if (!link_modes) {
@@ -1067,14 +1092,14 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
mlx5_port_query_eth_autoneg(mdev, &an_status, &an_disable_cap,
&an_disable_admin);
- an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
+ an_disable = autoneg == AUTONEG_DISABLE;
an_changes = ((!an_disable && an_disable_admin) ||
(an_disable && !an_disable_admin));
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
+ mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
mlx5_toggle_port_link(mdev);
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index a66589816e21..eed7101e8bb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -611,7 +611,8 @@ static int validate_flow(struct mlx5e_priv *priv,
return -ENOSPC;
if (fs->ring_cookie != RX_CLS_FLOW_DISC)
- if (!mlx5e_qid_validate(&priv->channels.params, fs->ring_cookie))
+ if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
+ fs->ring_cookie))
return -EINVAL;
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 4db595a7eb03..9a2fcef6e7f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -331,12 +331,11 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
{
- struct mlx5e_wqe_frag_info next_frag, *prev;
+ struct mlx5e_wqe_frag_info next_frag = {};
+ struct mlx5e_wqe_frag_info *prev = NULL;
int i;
next_frag.di = &rq->wqe.di[0];
- next_frag.offset = 0;
- prev = NULL;
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
@@ -1679,10 +1678,10 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_channel_param *cparam)
{
struct mlx5e_priv *priv = c->priv;
- int err, tc, max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
+ int err, tc;
for (tc = 0; tc < params->num_tc; tc++) {
- int txq_ix = c->ix + tc * max_nch;
+ int txq_ix = c->ix + tc * priv->max_nch;
err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
params, &cparam->sq, &c->sq[tc], tc);
@@ -2440,11 +2439,10 @@ int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int err;
int ix;
- for (ix = 0; ix < max_nch; ix++) {
+ for (ix = 0; ix < priv->max_nch; ix++) {
err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt);
if (unlikely(err))
goto err_destroy_rqts;
@@ -2462,10 +2460,9 @@ err_destroy_rqts:
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i;
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < priv->max_nch; i++)
mlx5e_destroy_rqt(priv, &tirs[i].rqt);
}
@@ -2559,7 +2556,7 @@ static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
}
- for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
+ for (ix = 0; ix < priv->max_nch; ix++) {
struct mlx5e_redirect_rqt_param direct_rrp = {
.is_rss = false,
{
@@ -2760,7 +2757,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
goto free_in;
}
- for (ix = 0; ix < mlx5e_get_netdev_max_channels(priv->netdev); ix++) {
+ for (ix = 0; ix < priv->max_nch; ix++) {
err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
in, inlen);
if (err)
@@ -2860,12 +2857,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
{
- int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i, tc;
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < priv->max_nch; i++)
for (tc = 0; tc < priv->profile->max_tc; tc++)
- priv->channel_tc2txq[i][tc] = i + tc * max_nch;
+ priv->channel_tc2txq[i][tc] = i + tc * priv->max_nch;
}
static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
@@ -2886,7 +2882,7 @@ static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
{
int num_txqs = priv->channels.num * priv->channels.params.num_tc;
- int num_rxqs = priv->channels.num * MLX5E_NUM_RQ_GROUPS;
+ int num_rxqs = priv->channels.num * priv->profile->rq_groups;
struct net_device *netdev = priv->netdev;
mlx5e_netdev_set_tcs(netdev);
@@ -3308,7 +3304,6 @@ err_destroy_inner_tirs:
int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
struct mlx5e_tir *tir;
void *tirc;
int inlen;
@@ -3321,7 +3316,7 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
if (!in)
return -ENOMEM;
- for (ix = 0; ix < max_nch; ix++) {
+ for (ix = 0; ix < priv->max_nch; ix++) {
memset(in, 0, inlen);
tir = &tirs[ix];
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
@@ -3360,10 +3355,9 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
{
- const int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
int i;
- for (i = 0; i < max_nch; i++)
+ for (i = 0; i < priv->max_nch; i++)
mlx5e_destroy_tir(priv->mdev, &tirs[i]);
}
@@ -3489,7 +3483,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s)
{
int i;
- for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
+ for (i = 0; i < priv->max_nch; i++) {
struct mlx5e_channel_stats *channel_stats = &priv->channel_stats[i];
struct mlx5e_rq_stats *xskrq_stats = &channel_stats->xskrq;
struct mlx5e_rq_stats *rq_stats = &channel_stats->rq;
@@ -4964,8 +4958,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
return err;
mlx5e_build_nic_params(mdev, &priv->xsk, rss, &priv->channels.params,
- mlx5e_get_netdev_max_channels(netdev),
- netdev->mtu);
+ priv->max_nch, netdev->mtu);
mlx5e_timestamp_init(priv);
@@ -5168,6 +5161,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(XSK),
};
/* mlx5e generic netdev management API (move to en_common.c) */
@@ -5185,6 +5179,7 @@ int mlx5e_netdev_init(struct net_device *netdev,
priv->profile = profile;
priv->ppriv = ppriv;
priv->msglevel = MLX5E_MSG_LEVEL;
+ priv->max_nch = netdev->num_rx_queues / max_t(u8, profile->rq_groups, 1);
priv->max_opened_tc = 1;
mutex_init(&priv->state_lock);
@@ -5222,7 +5217,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
nch * profile->max_tc,
- nch * MLX5E_NUM_RQ_GROUPS);
+ nch * profile->rq_groups);
if (!netdev) {
mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index bf6f4835457e..fbb9de633578 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1718,6 +1718,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1,
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
@@ -1735,6 +1736,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = MLX5E_MAX_NUM_TC,
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
static bool
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 6eee3c7d4b06..94a32c76c182 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -174,7 +174,7 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
memset(s, 0, sizeof(*s));
- for (i = 0; i < mlx5e_get_netdev_max_channels(priv->netdev); i++) {
+ for (i = 0; i < priv->max_nch; i++) {
struct mlx5e_channel_stats *channel_stats =
&priv->channel_stats[i];
struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq;
@@ -1401,7 +1401,7 @@ static const struct counter_desc ch_stats_desc[] = {
static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
{
- int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
+ int max_nch = priv->max_nch;
return (NUM_RQ_STATS * max_nch) +
(NUM_CH_STATS * max_nch) +
@@ -1415,8 +1415,8 @@ static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
int idx)
{
- int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
bool is_xsk = priv->xsk.ever_used;
+ int max_nch = priv->max_nch;
int i, j, tc;
for (i = 0; i < max_nch; i++)
@@ -1458,8 +1458,8 @@ static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
int idx)
{
- int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
bool is_xsk = priv->xsk.ever_used;
+ int max_nch = priv->max_nch;
int i, j, tc;
for (i = 0; i < max_nch; i++)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index c5d75e2ecf54..4d97cc47835f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1320,13 +1320,13 @@ static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
- u64 bytes, packets, lastuse = 0;
struct mlx5e_tc_flow *flow;
struct mlx5e_encap_entry *e;
struct mlx5_fc *counter;
struct neigh_table *tbl;
bool neigh_used = false;
struct neighbour *n;
+ u64 lastuse;
if (m_neigh->family == AF_INET)
tbl = &arp_tbl;
@@ -1349,7 +1349,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
if (mlx5e_is_offloaded_flow(flow)) {
counter = mlx5e_tc_get_counter(flow);
- mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ lastuse = mlx5_fc_query_lastuse(counter);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
mlx5e_flow_put(netdev_priv(e->out_dev), flow);
neigh_used = true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index c50b6f0769c8..49b06b256c92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -49,7 +49,7 @@ static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
{
struct mlx5e_sq_stats *stats = sq->stats;
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state)))
return;
@@ -61,7 +61,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
{
struct mlx5e_rq_stats *stats = rq->stats;
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = {};
if (unlikely(!test_bit(MLX5E_RQ_STATE_AM, &rq->state)))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index c48c382f926f..c1252d6be0ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -68,7 +68,7 @@ enum fs_flow_table_type {
FS_FT_SNIFFER_RX = 0X5,
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
- FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
+ FS_FT_MAX_TYPE = FS_FT_RDMA_RX,
};
enum fs_flow_table_op_mod {
@@ -275,7 +275,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
(type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
(type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_RDMA_RX != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 51f1736c455d..1804cf3c3814 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -426,6 +426,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
}
EXPORT_SYMBOL(mlx5_fc_query);
+u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
+{
+ return counter->cache.lastuse;
+}
+
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 6bfaaab362dc..1a2560e3bf7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -88,8 +88,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev,
netdev->mtu = netdev->max_mtu;
mlx5e_build_nic_params(mdev, NULL, &priv->rss_params, &priv->channels.params,
- mlx5e_get_netdev_max_channels(netdev),
- netdev->mtu);
+ priv->max_nch, netdev->mtu);
mlx5i_build_nic_params(mdev, &priv->channels.params);
mlx5e_timestamp_init(priv);
@@ -118,11 +117,10 @@ void mlx5i_cleanup(struct mlx5e_priv *priv)
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
{
- int max_nch = mlx5e_get_netdev_max_channels(priv->netdev);
struct mlx5e_sw_stats s = { 0 };
int i, j;
- for (i = 0; i < max_nch; i++) {
+ for (i = 0; i < priv->max_nch; i++) {
struct mlx5e_channel_stats *channel_stats;
struct mlx5e_rq_stats *rq_stats;
@@ -436,6 +434,7 @@ static const struct mlx5e_profile mlx5i_nic_profile = {
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
/* mlx5i netdev NDos */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index 6e56fa769d2e..c5a491e22e55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -355,6 +355,7 @@ static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
.rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
.rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
.max_tc = MLX5I_MAX_NUM_TC,
+ .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
};
const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 9277b3f125e8..5a8e94c0a95a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -6350,7 +6350,7 @@ static int __init mlxsw_sp_module_init(void)
return 0;
err_sp2_pci_driver_register:
- mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
err_sp1_pci_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
err_sp2_core_driver_register:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index c78d93afbb9d..db17ba35ec84 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -954,4 +954,8 @@ void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp);
+/* spectrum_nve_vxlan.c */
+int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 1537f70bc26d..888ba4300bcc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -437,8 +437,8 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
};
-#define MLXSW_SP2_SB_PR_INGRESS_SIZE 38128752
-#define MLXSW_SP2_SB_PR_EGRESS_SIZE 38128752
+#define MLXSW_SP2_SB_PR_INGRESS_SIZE 35297568
+#define MLXSW_SP2_SB_PR_EGRESS_SIZE 35297568
#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
/* Order according to mlxsw_sp2_sb_pool_dess */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
index 1df164a4b06d..17f334b46c40 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c
@@ -775,6 +775,7 @@ static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp)
ops->fini(nve);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
nve->tunnel_index);
+ memset(&nve->config, 0, sizeof(nve->config));
}
nve->num_nve_tunnels--;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
index 0035640156a1..12f664f42f21 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h
@@ -29,6 +29,7 @@ struct mlxsw_sp_nve {
unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX];
u32 tunnel_index;
u16 ul_rif_index; /* Reserved for Spectrum */
+ unsigned int inc_parsing_depth_refs;
};
struct mlxsw_sp_nve_ops {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index 93ccd9fc2266..05517c7feaa5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -103,9 +103,9 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve,
config->udp_dport = cfg->dst_port;
}
-static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
- unsigned int parsing_depth,
- __be16 udp_dport)
+static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
+ unsigned int parsing_depth,
+ __be16 udp_dport)
{
char mprs_pl[MLXSW_REG_MPRS_LEN];
@@ -113,6 +113,56 @@ static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
}
+static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp,
+ __be16 udp_dport)
+{
+ int parsing_depth = mlxsw_sp->nve->inc_parsing_depth_refs ?
+ MLXSW_SP_NVE_VXLAN_PARSING_DEPTH :
+ MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH;
+
+ return __mlxsw_sp_nve_parsing_set(mlxsw_sp, parsing_depth, udp_dport);
+}
+
+static int
+__mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp,
+ __be16 udp_dport)
+{
+ int err;
+
+ mlxsw_sp->nve->inc_parsing_depth_refs++;
+
+ err = mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport);
+ if (err)
+ goto err_nve_parsing_set;
+ return 0;
+
+err_nve_parsing_set:
+ mlxsw_sp->nve->inc_parsing_depth_refs--;
+ return err;
+}
+
+static void
+__mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp,
+ __be16 udp_dport)
+{
+ mlxsw_sp->nve->inc_parsing_depth_refs--;
+ mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport);
+}
+
+int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp)
+{
+ __be16 udp_dport = mlxsw_sp->nve->config.udp_dport;
+
+ return __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, udp_dport);
+}
+
+void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp)
+{
+ __be16 udp_dport = mlxsw_sp->nve->config.udp_dport;
+
+ __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, udp_dport);
+}
+
static void
mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl,
const struct mlxsw_sp_nve_config *config)
@@ -176,9 +226,7 @@ static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve,
struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
int err;
- err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
- MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
- config->udp_dport);
+ err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport);
if (err)
return err;
@@ -203,8 +251,7 @@ err_promote_decap:
err_rtdp_set:
mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
err_config_set:
- mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
- config->udp_dport);
+ __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
return err;
}
@@ -216,8 +263,7 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
config->ul_proto, &config->ul_sip);
mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp);
- mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
- config->udp_dport);
+ __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
}
static int
@@ -320,9 +366,7 @@ static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve,
struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp;
int err;
- err = mlxsw_sp_nve_parsing_set(mlxsw_sp,
- MLXSW_SP_NVE_VXLAN_PARSING_DEPTH,
- config->udp_dport);
+ err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport);
if (err)
return err;
@@ -348,8 +392,7 @@ err_promote_decap:
err_rtdp_set:
mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
err_config_set:
- mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
- config->udp_dport);
+ __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
return err;
}
@@ -361,8 +404,7 @@ static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve)
mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id,
config->ul_proto, &config->ul_sip);
mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp);
- mlxsw_sp_nve_parsing_set(mlxsw_sp, MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH,
- config->udp_dport);
+ __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0);
}
const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index bd9c2bc2d5d6..63b07edd9d81 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -979,6 +979,9 @@ static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port *tmp;
+ u16 orig_ing_types = 0;
+ u16 orig_egr_types = 0;
+ int err;
int i;
/* MTPPPC configures timestamping globally, not per port. Find the
@@ -986,12 +989,26 @@ static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port,
*/
for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
tmp = mlxsw_sp->ports[i];
+ if (tmp) {
+ orig_ing_types |= tmp->ptp.ing_types;
+ orig_egr_types |= tmp->ptp.egr_types;
+ }
if (tmp && tmp != mlxsw_sp_port) {
ing_types |= tmp->ptp.ing_types;
egr_types |= tmp->ptp.egr_types;
}
}
+ if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) {
+ err = mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp);
+ if (err) {
+ netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth");
+ return err;
+ }
+ }
+ if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types))
+ mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp);
+
return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp,
ing_types, egr_types);
}
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index b71e4ecbe469..6932e615d4b0 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1818,6 +1818,7 @@ EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
+ cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
ocelot_ace_deinit();
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index d9cbe84ac6ad..1b840ee47339 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -444,12 +444,12 @@ static u8 *nfp_vnic_get_sw_stats_strings(struct net_device *netdev, u8 *data)
data = nfp_pr_et(data, "hw_rx_csum_complete");
data = nfp_pr_et(data, "hw_rx_csum_err");
data = nfp_pr_et(data, "rx_replace_buf_alloc_fail");
- data = nfp_pr_et(data, "rx_tls_decrypted");
+ data = nfp_pr_et(data, "rx_tls_decrypted_packets");
data = nfp_pr_et(data, "hw_tx_csum");
data = nfp_pr_et(data, "hw_tx_inner_csum");
data = nfp_pr_et(data, "tx_gather");
data = nfp_pr_et(data, "tx_lso");
- data = nfp_pr_et(data, "tx_tls_encrypted");
+ data = nfp_pr_et(data, "tx_tls_encrypted_packets");
data = nfp_pr_et(data, "tx_tls_ooo");
data = nfp_pr_et(data, "tx_tls_drop_no_sync_data");
diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig
index 70b1a03c0953..01229190132d 100644
--- a/drivers/net/ethernet/ni/Kconfig
+++ b/drivers/net/ethernet/ni/Kconfig
@@ -11,7 +11,7 @@ config NET_VENDOR_NI
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about National Instrument devices.
+ the questions about National Instruments devices.
If you say Y, you will be asked for your specific device in the
following questions.
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index 8161e308e64b..ead3750b4489 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Packet engine device configuration
+# Packet Engines device configuration
#
config NET_VENDOR_PACKET_ENGINES
- bool "Packet Engine devices"
+ bool "Packet Engines devices"
default y
depends on PCI
---help---
@@ -12,7 +12,7 @@ config NET_VENDOR_PACKET_ENGINES
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about packet engine devices. If you say Y, you will
+ the questions about Packet Engines devices. If you say Y, you will
be asked for your specific card in the following questions.
if NET_VENDOR_PACKET_ENGINES
diff --git a/drivers/net/ethernet/packetengines/Makefile b/drivers/net/ethernet/packetengines/Makefile
index 1553c9cfc254..cf054b796d11 100644
--- a/drivers/net/ethernet/packetengines/Makefile
+++ b/drivers/net/ethernet/packetengines/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Makefile for the Packet Engine network device drivers.
+# Makefile for the Packet Engines network device drivers.
#
obj-$(CONFIG_HAMACHI) += hamachi.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 4e8118a08654..9f5113639eaf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1093,7 +1093,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
snprintf(bit_name, 30,
p_aeu->bit_name, num);
else
- strncpy(bit_name,
+ strlcpy(bit_name,
p_aeu->bit_name, 30);
/* We now need to pass bitmask in its
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 17c64e43d6c3..158ac0738911 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
/* Vendor specific information */
dev->vendor_id = cdev->vendor_id;
dev->vendor_part_id = cdev->device_id;
- dev->hw_ver = 0;
+ dev->hw_ver = cdev->chip_rev;
dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
(FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 60189923737a..21d38167f961 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -206,9 +206,9 @@ rmnet_map_ipv4_ul_csum_header(void *iphdr,
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
if (ip4h->protocol == IPPROTO_UDP)
- ul_header->udp_ip4_ind = 1;
+ ul_header->udp_ind = 1;
else
- ul_header->udp_ip4_ind = 0;
+ ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
@@ -239,6 +239,7 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
struct rmnet_map_ul_csum_header *ul_header,
struct sk_buff *skb)
{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
__be16 *hdr = (__be16 *)ul_header, offset;
offset = htons((__force u16)(skb_transport_header(skb) -
@@ -246,7 +247,11 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
ul_header->csum_start_offset = offset;
ul_header->csum_insert_offset = skb->csum_offset;
ul_header->csum_enabled = 1;
- ul_header->udp_ip4_ind = 0;
+
+ if (ip6h->nexthdr == IPPROTO_UDP)
+ ul_header->udp_ind = 1;
+ else
+ ul_header->udp_ind = 0;
/* Changing remaining fields to network order */
hdr++;
@@ -419,7 +424,7 @@ sw_csum:
ul_header->csum_start_offset = 0;
ul_header->csum_insert_offset = 0;
ul_header->csum_enabled = 0;
- ul_header->udp_ip4_ind = 0;
+ ul_header->udp_ind = 0;
priv->stats.csum_sw++;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index a10ff9e1efec..fa6eae2e7ed8 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -6098,10 +6098,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
if (ret)
return ret;
- if (tp->supports_gmii)
- phy_remove_link_mode(phydev,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
- else
+ if (!tp->supports_gmii)
phy_set_max_speed(phydev, SPEED_100);
phy_support_asym_pause(phydev);
@@ -6552,13 +6549,18 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
{
unsigned int flags;
- if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
rtl_unlock_config_regs(tp);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
rtl_lock_config_regs(tp);
+ /* fall through */
+ case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
flags = PCI_IRQ_LEGACY;
- } else {
+ break;
+ default:
flags = PCI_IRQ_ALL_TYPES;
+ break;
}
return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 079f459c73a5..2c5d3f5b84dd 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2208,10 +2208,12 @@ static int rocker_router_fib_event(struct notifier_block *nb,
if (fen_info->fi->fib_nh_is_v6) {
NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
+ kfree(fib_work);
return notifier_from_errno(-EINVAL);
}
if (fen_info->fi->nh) {
NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ kfree(fib_work);
return notifier_from_errno(-EINVAL);
}
}
diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig
index 027938017579..e92a178a76df 100644
--- a/drivers/net/ethernet/samsung/Kconfig
+++ b/drivers/net/ethernet/samsung/Kconfig
@@ -11,7 +11,7 @@ config NET_VENDOR_SAMSUNG
say Y.
Note that the answer to this question does not directly affect
- the kernel: saying N will just case the configurator to skip all
+ the kernel: saying N will just cause the configurator to skip all
the questions about Samsung chipsets. If you say Y, you will be asked
for your specific chipset/driver in the following questions.
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index bd14803545de..8d88e4083456 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -712,6 +712,7 @@ static void smc911x_phy_detect(struct net_device *dev)
/* Found an external PHY */
break;
}
+ /* Else, fall through */
default:
/* Internal media only */
SMC_GET_PHY_ID1(lp, 1, id1);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 01c2e2d83e76..fc9954e4a772 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -85,6 +85,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
u32 value;
base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + base_register);
@@ -102,6 +104,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
u32 value;
base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + base_register);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
index 7f86dffb264d..3174b701aa90 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -44,11 +44,13 @@
#define XGMAC_CORE_INIT_RX 0
#define XGMAC_PACKET_FILTER 0x00000008
#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_HPF BIT(10)
#define XGMAC_FILTER_PCF BIT(7)
#define XGMAC_FILTER_PM BIT(4)
#define XGMAC_FILTER_HMC BIT(2)
#define XGMAC_FILTER_PR BIT(0)
#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
+#define XGMAC_MAX_HASH_TABLE 8
#define XGMAC_RXQ_CTRL0 0x000000a0
#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
@@ -99,11 +101,12 @@
#define XGMAC_MDIO_ADDR 0x00000200
#define XGMAC_MDIO_DATA 0x00000204
#define XGMAC_MDIO_C22P 0x00000220
-#define XGMAC_ADDR0_HIGH 0x00000300
+#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8)
+#define XGMAC_ADDR_MAX 32
#define XGMAC_AE BIT(31)
#define XGMAC_DCS GENMASK(19, 16)
#define XGMAC_DCS_SHIFT 16
-#define XGMAC_ADDR0_LOW 0x00000304
+#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
#define XGMAC_ARP_ADDR 0x00000c10
#define XGMAC_TIMESTAMP_STATUS 0x00000d20
#define XGMAC_TXTSC BIT(15)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 0a32c96a7854..85c68b7ee8c6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -4,6 +4,8 @@
* stmmac XGMAC support.
*/
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
#include "stmmac.h"
#include "dwxgmac2.h"
@@ -106,6 +108,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
u32 value, reg;
reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + reg);
value &= ~XGMAC_PSRQ(queue);
@@ -169,6 +173,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
u32 value, reg;
reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + reg);
value &= ~XGMAC_QxMDMACH(queue);
@@ -278,10 +284,10 @@ static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
u32 value;
value = (addr[5] << 8) | addr[4];
- writel(value | XGMAC_AE, ioaddr + XGMAC_ADDR0_HIGH);
+ writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- writel(value, ioaddr + XGMAC_ADDR0_LOW);
+ writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
}
static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
@@ -291,8 +297,8 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
u32 hi_addr, lo_addr;
/* Read the MAC address from the hardware */
- hi_addr = readl(ioaddr + XGMAC_ADDR0_HIGH);
- lo_addr = readl(ioaddr + XGMAC_ADDR0_LOW);
+ hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
+ lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
/* Extract the MAC address from the high and low words */
addr[0] = lo_addr & 0xff;
@@ -303,19 +309,82 @@ static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
addr[5] = (hi_addr >> 8) & 0xff;
}
+static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
+ int mcbitslog2)
+{
+ int numhashregs, regs;
+
+ switch (mcbitslog2) {
+ case 6:
+ numhashregs = 2;
+ break;
+ case 7:
+ numhashregs = 4;
+ break;
+ case 8:
+ numhashregs = 8;
+ break;
+ default:
+ return;
+ }
+
+ for (regs = 0; regs < numhashregs; regs++)
+ writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
+}
+
static void dwxgmac2_set_filter(struct mac_device_info *hw,
struct net_device *dev)
{
void __iomem *ioaddr = (void __iomem *)dev->base_addr;
- u32 value = XGMAC_FILTER_RA;
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+ int mcbitslog2 = hw->mcast_bits_log2;
+ u32 mc_filter[8];
+ int i;
+
+ value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
+ value |= XGMAC_FILTER_HPF;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
if (dev->flags & IFF_PROMISC) {
- value |= XGMAC_FILTER_PR | XGMAC_FILTER_PCF;
+ value |= XGMAC_FILTER_PR;
+ value |= XGMAC_FILTER_PCF;
} else if ((dev->flags & IFF_ALLMULTI) ||
- (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
+ (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
value |= XGMAC_FILTER_PM;
- writel(~0x0, ioaddr + XGMAC_HASH_TABLE(0));
- writel(~0x0, ioaddr + XGMAC_HASH_TABLE(1));
+
+ for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
+ writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+
+ value |= XGMAC_FILTER_HMC;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
+ (32 - mcbitslog2));
+ mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
+ }
+ }
+
+ dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
+
+ /* Handle multiple unicast addresses */
+ if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
+ value |= XGMAC_FILTER_PR;
+ } else {
+ struct netdev_hw_addr *ha;
+ int reg = 1;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwxgmac2_set_umac_addr(hw, ha->addr, reg);
+ reg++;
+ }
+
+ for ( ; reg < XGMAC_ADDR_MAX; reg++) {
+ writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
+ writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
+ }
}
writel(value, ioaddr + XGMAC_PACKET_FILTER);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index c7c9e5f162e6..fd54c7c87485 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -814,20 +814,15 @@ static void stmmac_validate(struct phylink_config *config,
phylink_set(mac_supported, 10baseT_Full);
phylink_set(mac_supported, 100baseT_Half);
phylink_set(mac_supported, 100baseT_Full);
+ phylink_set(mac_supported, 1000baseT_Half);
+ phylink_set(mac_supported, 1000baseT_Full);
+ phylink_set(mac_supported, 1000baseKX_Full);
phylink_set(mac_supported, Autoneg);
phylink_set(mac_supported, Pause);
phylink_set(mac_supported, Asym_Pause);
phylink_set_port_modes(mac_supported);
- if (priv->plat->has_gmac ||
- priv->plat->has_gmac4 ||
- priv->plat->has_xgmac) {
- phylink_set(mac_supported, 1000baseT_Half);
- phylink_set(mac_supported, 1000baseT_Full);
- phylink_set(mac_supported, 1000baseKX_Full);
- }
-
/* Cut down 1G if asked to */
if ((max_speed > 0) && (max_speed < 1000)) {
phylink_set(mask, 1000baseT_Full);
@@ -1295,6 +1290,8 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
"(%s) dma_rx_phy=0x%08x\n", __func__,
(u32)rx_q->dma_rx_phy);
+ stmmac_clear_rx_descriptors(priv, queue);
+
for (i = 0; i < DMA_RX_SIZE; i++) {
struct dma_desc *p;
@@ -1312,8 +1309,6 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
rx_q->cur_rx = 0;
rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
- stmmac_clear_rx_descriptors(priv, queue);
-
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
@@ -1555,9 +1550,8 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
}
- rx_q->buf_pool = kmalloc_array(DMA_RX_SIZE,
- sizeof(*rx_q->buf_pool),
- GFP_KERNEL);
+ rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
+ GFP_KERNEL);
if (!rx_q->buf_pool)
goto err_dma;
@@ -1608,15 +1602,15 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
tx_q->queue_index = queue;
tx_q->priv_data = priv;
- tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
- sizeof(*tx_q->tx_skbuff_dma),
- GFP_KERNEL);
+ tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
+ sizeof(*tx_q->tx_skbuff_dma),
+ GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
goto err_dma;
- tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
- sizeof(struct sk_buff *),
- GFP_KERNEL);
+ tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
if (!tx_q->tx_skbuff)
goto err_dma;
@@ -3277,9 +3271,11 @@ static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
- int dirty = stmmac_rx_dirty(priv, queue);
+ int len, dirty = stmmac_rx_dirty(priv, queue);
unsigned int entry = rx_q->dirty_rx;
+ len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+
while (dirty-- > 0) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
struct dma_desc *p;
@@ -3297,6 +3293,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
}
buf->addr = page_pool_get_dma_addr(buf->page);
+
+ /* Sync whole allocation to device. This will invalidate old
+ * data.
+ */
+ dma_sync_single_for_device(priv->device, buf->addr, len,
+ DMA_FROM_DEVICE);
+
stmmac_set_desc_addr(priv, p, buf->addr);
stmmac_refill_desc3(priv, rx_q, p);
@@ -3431,8 +3434,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
skb_copy_to_linear_data(skb, page_address(buf->page),
frame_len);
skb_put(skb, frame_len);
- dma_sync_single_for_device(priv->device, buf->addr,
- frame_len, DMA_FROM_DEVICE);
if (netif_msg_pktdata(priv)) {
netdev_dbg(priv->dev, "frame received (%dbytes)",
@@ -4319,8 +4320,9 @@ int stmmac_dvr_probe(struct device *device,
NAPI_POLL_WEIGHT);
}
if (queue < priv->plat->tx_queues_to_use) {
- netif_napi_add(ndev, &ch->tx_napi, stmmac_napi_poll_tx,
- NAPI_POLL_WEIGHT);
+ netif_tx_napi_add(ndev, &ch->tx_napi,
+ stmmac_napi_poll_tx,
+ NAPI_POLL_WEIGHT);
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 0f3e6ce7f6ec..eaf8f08f2e91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -376,6 +376,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
return ERR_PTR(-ENOMEM);
*mac = of_get_mac_address(np);
+ if (IS_ERR(*mac)) {
+ if (PTR_ERR(*mac) == -EPROBE_DEFER)
+ return ERR_CAST(*mac);
+
+ *mac = NULL;
+ }
+
plat->interface = of_get_phy_mode(np);
/* Some wrapper drivers still rely on phy_node. Let's save it while
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 58ea18af9813..37c0bc699cd9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
entry = &priv->tc_entries[i];
if (!entry->in_use && !first && free)
first = entry;
- if (entry->handle == loc && !free)
+ if ((entry->handle == loc) && !free && !entry->is_frag)
dup = entry;
}
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 5b196ebfed49..0f346761a2b2 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -788,6 +788,7 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
/* fallthrough, if we release the descriptors
* brutally (then we don't care about
* SPIDER_NET_DESCR_CARDOWNED) */
+ /* Fall through */
case SPIDER_NET_DESCR_RESPONSE_ERROR:
case SPIDER_NET_DESCR_PROTECTION_ERROR:
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index 2f354ba029a6..cd0a8f46e7c6 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -13,7 +13,7 @@ config NET_VENDOR_XSCALE
Note that the answer to this question does not directly affect the
kernel: saying N will just cause the configurator to skip all
- the questions about XSacle IXP devices. If you say Y, you will be
+ the questions about XScale IXP devices. If you say Y, you will be
asked for your specific card in the following questions.
if NET_VENDOR_XSCALE
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index daab2c07d891..9303aeb2595f 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -500,8 +500,9 @@ static int transmit(struct baycom_state *bc, int cnt, unsigned char stat)
}
break;
}
+ /* fall through */
- default: /* fall through */
+ default:
if (bc->hdlctx.calibrate <= 0)
return 0;
i = min_t(int, cnt, bc->hdlctx.calibrate);
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index 3ffe46df249e..7c5265fd2b94 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -216,8 +216,10 @@ static struct gpio_desc *fixed_phy_get_gpiod(struct device_node *np)
if (IS_ERR(gpiod)) {
if (PTR_ERR(gpiod) == -EPROBE_DEFER)
return gpiod;
- pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
- fixed_link_node);
+
+ if (PTR_ERR(gpiod) != -ENOENT)
+ pr_err("error getting GPIO for fixed link %pOF, proceed without\n",
+ fixed_link_node);
gpiod = NULL;
}
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 28676af97b42..645d354ffb48 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -2226,8 +2226,8 @@ static int vsc8514_probe(struct phy_device *phydev)
vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
vsc8531->hw_stats = vsc85xx_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
+ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
@@ -2251,8 +2251,8 @@ static int vsc8574_probe(struct phy_device *phydev)
vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
vsc8531->hw_stats = vsc8584_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
+ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
@@ -2281,8 +2281,8 @@ static int vsc8584_probe(struct phy_device *phydev)
vsc8531->supp_led_modes = VSC8584_SUPP_LED_MODES;
vsc8531->hw_stats = vsc8584_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc8584_hw_stats);
- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
+ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
@@ -2311,8 +2311,8 @@ static int vsc85xx_probe(struct phy_device *phydev)
vsc8531->supp_led_modes = VSC85XX_SUPP_LED_MODES;
vsc8531->hw_stats = vsc85xx_hw_stats;
vsc8531->nstats = ARRAY_SIZE(vsc85xx_hw_stats);
- vsc8531->stats = devm_kmalloc_array(&phydev->mdio.dev, vsc8531->nstats,
- sizeof(u64), GFP_KERNEL);
+ vsc8531->stats = devm_kcalloc(&phydev->mdio.dev, vsc8531->nstats,
+ sizeof(u64), GFP_KERNEL);
if (!vsc8531->stats)
return -ENOMEM;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 6b5cb87f3866..7ddd91df99e3 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1774,6 +1774,12 @@ done:
phydev->link = status & BMSR_LSTATUS ? 1 : 0;
phydev->autoneg_complete = status & BMSR_ANEGCOMPLETE ? 1 : 0;
+ /* Consider the case that autoneg was started and "aneg complete"
+ * bit has been reset, but "link up" bit not yet.
+ */
+ if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
+ phydev->link = 0;
+
return 0;
}
EXPORT_SYMBOL(genphy_update_link);
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index b86a4b2116f8..59a94e07e7c5 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -48,8 +48,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
if (!phy->last_triggered)
led_trigger_event(&phy->led_link_trigger->trigger,
LED_FULL);
+ else
+ led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
- led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
led_trigger_event(&plt->trigger, LED_FULL);
phy->last_triggered = plt;
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 5d0af041b8f9..a45c5de96ab1 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -216,6 +216,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
pl->supported, true);
linkmode_zero(pl->supported);
phylink_set(pl->supported, MII);
+ phylink_set(pl->supported, Pause);
+ phylink_set(pl->supported, Asym_Pause);
if (s) {
__set_bit(s->bit, pl->supported);
} else {
@@ -990,10 +992,10 @@ void phylink_start(struct phylink *pl)
}
if (pl->link_an_mode == MLO_AN_FIXED && pl->get_fixed_state)
mod_timer(&pl->link_poll, jiffies + HZ);
- if (pl->sfp_bus)
- sfp_upstream_start(pl->sfp_bus);
if (pl->phydev)
phy_start(pl->phydev);
+ if (pl->sfp_bus)
+ sfp_upstream_start(pl->sfp_bus);
}
EXPORT_SYMBOL_GPL(phylink_start);
@@ -1010,10 +1012,10 @@ void phylink_stop(struct phylink *pl)
{
ASSERT_RTNL();
- if (pl->phydev)
- phy_stop(pl->phydev);
if (pl->sfp_bus)
sfp_upstream_stop(pl->sfp_bus);
+ if (pl->phydev)
+ phy_stop(pl->phydev);
del_timer_sync(&pl->link_poll);
if (pl->link_irq) {
free_irq(pl->link_irq, pl);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 1d902ecb4aa8..a44dd3c8af63 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1115,6 +1115,9 @@ static const struct proto_ops pppoe_ops = {
.recvmsg = pppoe_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppoe_proto = {
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
index 5ef422a43d70..08364f10a43f 100644
--- a/drivers/net/ppp/pppox.c
+++ b/drivers/net/ppp/pppox.c
@@ -17,6 +17,7 @@
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/net.h>
@@ -98,6 +99,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
EXPORT_SYMBOL(pppox_ioctl);
+#ifdef CONFIG_COMPAT
+int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ if (cmd == PPPOEIOCSFWD32)
+ cmd = PPPOEIOCSFWD;
+
+ return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
+}
+
+EXPORT_SYMBOL(pppox_compat_ioctl);
+#endif
+
static int pppox_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index a8e52c8e4128..734de7de03f7 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -623,6 +623,9 @@ static const struct proto_ops pptp_ops = {
.recvmsg = sock_no_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppox_pptp_proto = {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3d443597bd04..db16d7a13e00 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1599,7 +1599,8 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
return true;
}
-static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf,
+static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
+ struct page_frag *alloc_frag, char *buf,
int buflen, int len, int pad)
{
struct sk_buff *skb = build_skb(buf, buflen);
@@ -1609,6 +1610,7 @@ static struct sk_buff *__tun_build_skb(struct page_frag *alloc_frag, char *buf,
skb_reserve(skb, pad);
skb_put(skb, len);
+ skb_set_owner_w(skb, tfile->socket.sk);
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
@@ -1686,7 +1688,8 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
*/
if (hdr->gso_type || !xdp_prog) {
*skb_xdp = 1;
- return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
+ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
+ pad);
}
*skb_xdp = 0;
@@ -1723,7 +1726,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
rcu_read_unlock();
local_bh_enable();
- return __tun_build_skb(alloc_frag, buf, buflen, len, pad);
+ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
err_xdp:
put_page(alloc_frag->page);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6d25dea5ad4b..f7d117d80cfb 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -282,7 +282,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
{
int i;
- __u8 tmp;
+ __u8 tmp = 0;
__le16 retdatai;
int ret;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 69e0a2acfcb0..b6dc5d714b5e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1295,6 +1295,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2001, 0x7e3d, 4)}, /* D-Link DWM-222 A2 */
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
+ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 39e0768d734d..0cc03a9ff545 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -50,7 +50,7 @@
#define PLA_TEREDO_WAKE_BASE 0xc0c4
#define PLA_MAR 0xcd00
#define PLA_BACKUP 0xd000
-#define PAL_BDC_CR 0xd1a0
+#define PLA_BDC_CR 0xd1a0
#define PLA_TEREDO_TIMER 0xd2cc
#define PLA_REALWOW_TIMER 0xd2e8
#define PLA_SUSPEND_FLAG 0xd38a
@@ -274,7 +274,7 @@
#define TEREDO_RS_EVENT_MASK 0x00fe
#define OOB_TEREDO_EN 0x0001
-/* PAL_BDC_CR */
+/* PLA_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
/* PLA_EFUSE_CMD */
@@ -3191,9 +3191,9 @@ static void r8152b_enter_oob(struct r8152 *tp)
rtl_rx_vlan_en(tp, true);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR);
ocp_data |= ALDPS_PROXY_MODE;
- ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
@@ -3577,9 +3577,9 @@ static void r8153_enter_oob(struct r8152 *tp)
rtl_rx_vlan_en(tp, true);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PAL_BDC_CR);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_BDC_CR);
ocp_data |= ALDPS_PROXY_MODE;
- ocp_write_word(tp, MCU_TYPE_PLA, PAL_BDC_CR, ocp_data);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BDC_CR, ocp_data);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index a9ac3f37b904..e2e679a01b65 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -413,6 +413,7 @@ static void sdla_errors(struct net_device *dev, int cmd, int dlci, int ret, int
case SDLA_RET_NO_BUFS:
if (cmd == SDLA_INFORMATION_WRITE)
break;
+ /* Else, fall through */
default:
netdev_dbg(dev, "Cmd 0x%02X generated return code 0x%02X\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index d55312ef58c9..9b0bb89599fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -776,7 +776,6 @@ struct iwl_rss_config_cmd {
u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
-#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128
#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
@@ -812,10 +811,12 @@ struct iwl_rxq_sync_notification {
*
* @IWL_MVM_RXQ_EMPTY: empty sync notification
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
+ * @IWL_MVM_RXQ_NSSN_SYNC: notify all the RSS queues with the new NSSN
*/
enum iwl_mvm_rxq_notif_type {
IWL_MVM_RXQ_EMPTY,
IWL_MVM_RXQ_NOTIF_DEL_BA,
+ IWL_MVM_RXQ_NSSN_SYNC,
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index e411ac98290d..4d81776f576d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -2438,17 +2438,19 @@ static void iwl_fw_dbg_info_apply(struct iwl_fw_runtime *fwrt,
{
u32 img_name_len = le32_to_cpu(dbg_info->img_name_len);
u32 dbg_cfg_name_len = le32_to_cpu(dbg_info->dbg_cfg_name_len);
- const char err_str[] =
- "WRT: ext=%d. Invalid %s name length %d, expected %d\n";
if (img_name_len != IWL_FW_INI_MAX_IMG_NAME_LEN) {
- IWL_WARN(fwrt, err_str, ext, "image", img_name_len,
+ IWL_WARN(fwrt,
+ "WRT: ext=%d. Invalid image name length %d, expected %d\n",
+ ext, img_name_len,
IWL_FW_INI_MAX_IMG_NAME_LEN);
return;
}
if (dbg_cfg_name_len != IWL_FW_INI_MAX_DBG_CFG_NAME_LEN) {
- IWL_WARN(fwrt, err_str, ext, "debug cfg", dbg_cfg_name_len,
+ IWL_WARN(fwrt,
+ "WRT: ext=%d. Invalid debug cfg name length %d, expected %d\n",
+ ext, dbg_cfg_name_len,
IWL_FW_INI_MAX_DBG_CFG_NAME_LEN);
return;
}
@@ -2775,8 +2777,6 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
struct iwl_ucode_tlv *tlv = iter;
void *ini_tlv = (void *)tlv->data;
u32 type = le32_to_cpu(tlv->type);
- const char invalid_ap_str[] =
- "WRT: ext=%d. Invalid apply point %d for %s\n";
switch (type) {
case IWL_UCODE_TLV_TYPE_DEBUG_INFO:
@@ -2786,8 +2786,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
struct iwl_fw_ini_allocation_data *buf_alloc = ini_tlv;
if (pnt != IWL_FW_INI_APPLY_EARLY) {
- IWL_ERR(fwrt, invalid_ap_str, ext, pnt,
- "buffer allocation");
+ IWL_ERR(fwrt,
+ "WRT: ext=%d. Invalid apply point %d for buffer allocation\n",
+ ext, pnt);
goto next;
}
@@ -2797,8 +2798,9 @@ static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt,
}
case IWL_UCODE_TLV_TYPE_HCMD:
if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) {
- IWL_ERR(fwrt, invalid_ap_str, ext, pnt,
- "host command");
+ IWL_ERR(fwrt,
+ "WRT: ext=%d. Invalid apply point %d for host command\n",
+ ext, pnt);
goto next;
}
iwl_fw_dbg_send_hcmd(fwrt, tlv, ext);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 57d09049e615..38672dd5aae9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1640,6 +1640,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
init_completion(&drv->request_firmware_complete);
INIT_LIST_HEAD(&drv->list);
+ iwl_load_fw_dbg_tlv(drv->trans->dev, drv->trans);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* Create the device debugfs entries. */
drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
@@ -1660,8 +1662,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
err_fw:
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(drv->dbgfs_drv);
- iwl_fw_dbg_free(drv->trans);
#endif
+ iwl_fw_dbg_free(drv->trans);
kfree(drv);
err:
return ERR_PTR(ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 1d608e9e9101..5de54d1559dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -755,7 +755,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
for (i = 0; i < n_profiles; i++) {
/* the tables start at element 3 */
- static int pos = 3;
+ int pos = 3;
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
@@ -880,6 +880,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
+static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
+{
+ /*
+ * The GEO_TX_POWER_LIMIT command is not supported on earlier
+ * firmware versions. Unfortunately, we don't have a TLV API
+ * flag to rely on, so rely on the major version which is in
+ * the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 36, 29 and
+ * 17.
+ */
+ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+}
+
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
struct iwl_geo_tx_power_profiles_resp *resp;
@@ -909,6 +925,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
.data = { data },
};
+ if (!iwl_mvm_sar_geo_support(mvm))
+ return -EOPNOTSUPP;
+
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
@@ -934,13 +953,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
int ret, i, j;
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
- /*
- * This command is not supported on earlier firmware versions.
- * Unfortunately, we don't have a TLV API flag to rely on, so
- * rely on the major version which is in the first byte of
- * ucode_ver.
- */
- if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+ if (!iwl_mvm_sar_geo_support(mvm))
return 0;
ret = iwl_mvm_sar_get_wgds_table(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index e63623251d61..b74bd58f3f45 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -207,11 +207,11 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
},
};
-static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
- enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key);
+static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
@@ -474,7 +474,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
ieee80211_hw_set(hw, BUFF_MMPDU_TXQ);
ieee80211_hw_set(hw, STA_MMPDU_TXQ);
- ieee80211_hw_set(hw, TX_AMSDU);
+ /*
+ * On older devices, enabling TX A-MSDU occasionally leads to
+ * something getting messed up, the command read from the FIFO
+ * gets out of sync and isn't a TX command, so that we have an
+ * assert EDC.
+ *
+ * It's not clear where the bug is, but since we didn't used to
+ * support A-MSDU until moving the mac80211 iTXQs, just leave it
+ * for older devices. We also don't see this issue on any newer
+ * devices.
+ */
+ if (mvm->cfg->device_family >= IWL_DEVICE_FAMILY_9000)
+ ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
if (iwl_mvm_has_tlc_offload(mvm)) {
@@ -2726,7 +2738,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
mvmvif->ap_early_keys[i] = NULL;
- ret = iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
+ ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key);
if (ret)
goto out_quota_failed;
}
@@ -3494,11 +3506,11 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
return ret;
}
-static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
- enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
@@ -3553,8 +3565,6 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
}
- mutex_lock(&mvm->mutex);
-
switch (cmd) {
case SET_KEY:
if ((vif->type == NL80211_IFTYPE_ADHOC ||
@@ -3700,7 +3710,22 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
ret = -EINVAL;
}
+ return ret;
+}
+
+static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+ enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
mutex_unlock(&mvm->mutex);
+
return ret;
}
@@ -5042,7 +5067,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
int ret;
- lockdep_assert_held(&mvm->mutex);
if (!iwl_mvm_has_new_rx_api(mvm))
return;
@@ -5053,13 +5077,15 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues);
- ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
+ ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif,
+ size, !notif->sync);
if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
goto out;
}
if (notif->sync) {
+ lockdep_assert_held(&mvm->mutex);
ret = wait_event_timeout(mvm->rx_sync_waitq,
atomic_read(&mvm->queue_sync_counter) == 0 ||
iwl_mvm_is_radio_killed(mvm),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 48c77af54e99..a263cc629d75 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1664,9 +1664,9 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
- const u8 *data, u32 count);
-void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- int queue);
+ const u8 *data, u32 count, bool async);
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue);
void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
@@ -1813,7 +1813,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#endif /* CONFIG_IWLWIFI_DEBUGFS */
/* rate scaling */
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync);
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq);
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
void rs_update_last_rssi(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index 719f793b3487..a9bb43a2f27b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -620,7 +620,7 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
enum iwl_mcc_source src;
char mcc[3];
struct ieee80211_regdomain *regd;
- u32 wgds_tbl_idx;
+ int wgds_tbl_idx;
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index d7d6f3398f86..4888054dc3d8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1088,7 +1088,7 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
RX_QUEUES_NOTIFICATION)))
- iwl_mvm_rx_queue_notif(mvm, rxb, 0);
+ iwl_mvm_rx_queue_notif(mvm, napi, rxb, 0);
else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF))
@@ -1812,7 +1812,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
RX_QUEUES_NOTIFICATION)))
- iwl_mvm_rx_queue_notif(mvm, rxb, queue);
+ iwl_mvm_rx_queue_notif(mvm, napi, rxb, queue);
else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 009e72abcd51..e4415e58fa78 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -1197,239 +1197,6 @@ static u8 rs_get_tid(struct ieee80211_hdr *hdr)
return tid;
}
-void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, struct ieee80211_tx_info *info, bool ndp)
-{
- int legacy_success;
- int retries;
- int i;
- struct iwl_lq_cmd *table;
- u32 lq_hwrate;
- struct rs_rate lq_rate, tx_resp_rate;
- struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
- u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
- u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
- u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (!lq_sta) {
- IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
- return;
- } else if (!lq_sta->pers.drv) {
- IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
- return;
- }
-
- /* This packet was aggregated but doesn't carry status info */
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !(info->flags & IEEE80211_TX_STAT_AMPDU))
- return;
-
- if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
- &tx_resp_rate)) {
- WARN_ON_ONCE(1);
- return;
- }
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- /* Disable last tx check if we are debugging with fixed rate but
- * update tx stats */
- if (lq_sta->pers.dbg_fixed_rate) {
- int index = tx_resp_rate.index;
- enum rs_column column;
- int attempts, success;
-
- column = rs_get_column_from_rate(&tx_resp_rate);
- if (WARN_ONCE(column == RS_COLUMN_INVALID,
- "Can't map rate 0x%x to column",
- tx_resp_hwrate))
- return;
-
- if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- attempts = info->status.ampdu_len;
- success = info->status.ampdu_ack_len;
- } else {
- attempts = info->status.rates[0].count;
- success = !!(info->flags & IEEE80211_TX_STAT_ACK);
- }
-
- lq_sta->pers.tx_stats[column][index].total += attempts;
- lq_sta->pers.tx_stats[column][index].success += success;
-
- IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
- tx_resp_hwrate, success, attempts);
- return;
- }
-#endif
-
- if (time_after(jiffies,
- (unsigned long)(lq_sta->last_tx +
- (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
- IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
- iwl_mvm_rs_rate_init(mvm, sta, info->band, true);
- return;
- }
- lq_sta->last_tx = jiffies;
-
- /* Ignore this Tx frame response if its initial rate doesn't match
- * that of latest Link Quality command. There may be stragglers
- * from a previous Link Quality command, but we're no longer interested
- * in those; they're either from the "active" mode while we're trying
- * to check "search" mode, or a prior "search" mode after we've moved
- * to a new "search" mode (which might become the new "active" mode).
- */
- table = &lq_sta->lq;
- lq_hwrate = le32_to_cpu(table->rs_table[0]);
- if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
- WARN_ON_ONCE(1);
- return;
- }
-
- /* Here we actually compare this rate to the latest LQ command */
- if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
- IWL_DEBUG_RATE(mvm,
- "tx resp color 0x%x does not match 0x%x\n",
- lq_color, LQ_FLAG_COLOR_GET(table->flags));
-
- /*
- * Since rates mis-match, the last LQ command may have failed.
- * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
- * ... driver.
- */
- lq_sta->missed_rate_counter++;
- if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
- lq_sta->missed_rate_counter = 0;
- IWL_DEBUG_RATE(mvm,
- "Too many rates mismatch. Send sync LQ. rs_state %d\n",
- lq_sta->rs_state);
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
- }
- /* Regardless, ignore this status info for outdated rate */
- return;
- } else
- /* Rate did match, so reset the missed_rate_counter */
- lq_sta->missed_rate_counter = 0;
-
- if (!lq_sta->search_better_tbl) {
- curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else {
- curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- }
-
- if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
- IWL_DEBUG_RATE(mvm,
- "Neither active nor search matches tx rate\n");
- tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
- tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
- rs_dump_rate(mvm, &lq_rate, "ACTUAL");
-
- /*
- * no matching table found, let's by-pass the data collection
- * and continue to perform rate scale to find the rate table
- */
- rs_stay_in_table(lq_sta, true);
- goto done;
- }
-
- /*
- * Updating the frame history depends on whether packets were
- * aggregated.
- *
- * For aggregation, all packets were transmitted at the same rate, the
- * first index into rate scale table.
- */
- if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
- info->status.ampdu_len,
- info->status.ampdu_ack_len,
- reduced_txp);
-
- /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
- * it as a single frame loss as we don't want the success ratio
- * to dip too quickly because a BA wasn't received.
- * For TPC, there's no need for this optimisation since we want
- * to recover very quickly from a bad power reduction and,
- * therefore we'd like the success ratio to get an immediate hit
- * when failing to get a BA, so we'd switch back to a lower or
- * zero power reduction. When FW transmits agg with a rate
- * different from the initial rate, it will not use reduced txp
- * and will send BA notification twice (one empty with reduced
- * txp equal to the value from LQ and one with reduced txp 0).
- * We need to update counters for each txp level accordingly.
- */
- if (info->status.ampdu_ack_len == 0)
- info->status.ampdu_len = 1;
-
- rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index,
- info->status.ampdu_len,
- info->status.ampdu_ack_len);
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
- lq_sta->total_success += info->status.ampdu_ack_len;
- lq_sta->total_failed += (info->status.ampdu_len -
- info->status.ampdu_ack_len);
- }
- } else {
- /* For legacy, update frame history with for each Tx retry. */
- retries = info->status.rates[0].count - 1;
- /* HW doesn't send more than 15 retries */
- retries = min(retries, 15);
-
- /* The last transmission may have been successful */
- legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
- /* Collect data for each rate used during failed TX attempts */
- for (i = 0; i <= retries; ++i) {
- lq_hwrate = le32_to_cpu(table->rs_table[i]);
- if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
- &lq_rate)) {
- WARN_ON_ONCE(1);
- return;
- }
-
- /*
- * Only collect stats if retried rate is in the same RS
- * table as active/search.
- */
- if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
- tmp_tbl = curr_tbl;
- else if (rs_rate_column_match(&lq_rate,
- &other_tbl->rate))
- tmp_tbl = other_tbl;
- else
- continue;
-
- rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
- tx_resp_rate.index, 1,
- i < retries ? 0 : legacy_success,
- reduced_txp);
- rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
- tx_resp_rate.index, 1,
- i < retries ? 0 : legacy_success);
- }
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
- lq_sta->total_success += legacy_success;
- lq_sta->total_failed += retries + (1 - legacy_success);
- }
- }
- /* The last TX rate is cached in lq_sta; it's set in if/else above */
- lq_sta->last_rate_n_flags = lq_hwrate;
- IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
-done:
- /* See if there's a better rate or modulation mode to try. */
- if (sta->supp_rates[info->band])
- rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
-}
-
/*
* mac80211 sends us Tx status
*/
@@ -1442,8 +1209,9 @@ static void rs_drv_mac80211_tx_status(void *mvm_r,
struct iwl_op_mode *op_mode = mvm_r;
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+ if (!mvmsta->vif)
return;
if (!ieee80211_is_data(hdr->frame_control) ||
@@ -1584,6 +1352,18 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
}
+/* rs uses two tables, one is active and the second is for searching better
+ * configuration. This function, according to the index of the currently
+ * active table returns the search table, which is located at the
+ * index complementary to 1 according to the active table (active = 1,
+ * search = 0 or active = 0, search = 1).
+ * Since lq_info is an arary of size 2, make sure index cannot be out of bounds.
+ */
+static inline u8 rs_search_tbl(u8 active_tbl)
+{
+ return (active_tbl ^ 1) & 1;
+}
+
static s32 rs_get_best_rate(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl, /* "search" */
@@ -1794,7 +1574,7 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
struct iwl_scale_tbl_info *tbl)
{
rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
}
static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
@@ -1931,9 +1711,9 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum rs_column col_id)
{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
struct rs_rate *rate = &search_tbl->rate;
const struct rs_tx_column *column = &rs_tx_columns[col_id];
const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
@@ -2341,7 +2121,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
else
- active_tbl = 1 - lq_sta->active_tbl;
+ active_tbl = rs_search_tbl(lq_sta->active_tbl);
tbl = &(lq_sta->lq_info[active_tbl]);
rate = &tbl->rate;
@@ -2565,7 +2345,7 @@ lq_update:
/* If new "search" mode was selected, set up in uCode table */
if (lq_sta->search_better_tbl) {
/* Access the "search" table, clear its history. */
- tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
rs_rate_scale_clear_tbl_windows(mvm, tbl);
/* Use new "search" start rate */
@@ -2896,7 +2676,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
static void rs_initialize_lq(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- enum nl80211_band band, bool update)
+ enum nl80211_band band)
{
struct iwl_scale_tbl_info *tbl;
struct rs_rate *rate;
@@ -2908,7 +2688,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
if (!lq_sta->search_better_tbl)
active_tbl = lq_sta->active_tbl;
else
- active_tbl = 1 - lq_sta->active_tbl;
+ active_tbl = rs_search_tbl(lq_sta->active_tbl);
tbl = &(lq_sta->lq_info[active_tbl]);
rate = &tbl->rate;
@@ -2926,7 +2706,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
rs_set_expected_tpt_table(lq_sta, tbl);
rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
/* TODO restore station should remember the lq cmd */
- iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, !update);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
}
static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
@@ -3175,7 +2955,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
* Called after adding a new station to initialize rate scaling
*/
static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum nl80211_band band, bool update)
+ enum nl80211_band band)
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
@@ -3186,6 +2966,8 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband;
unsigned long supp; /* must be unsigned long for for_each_set_bit */
+ lockdep_assert_held(&mvmsta->lq_sta.rs_drv.pers.lock);
+
/* clear all non-persistent lq data */
memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
@@ -3255,7 +3037,7 @@ static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_reset_frame_stats(mvm);
#endif
- rs_initialize_lq(mvm, sta, lq_sta, band, update);
+ rs_initialize_lq(mvm, sta, lq_sta, band);
}
static void rs_drv_rate_update(void *mvm_r,
@@ -3278,6 +3060,258 @@ static void rs_drv_rate_update(void *mvm_r,
iwl_mvm_rs_rate_init(mvm, sta, sband->band, true);
}
+static void __iwl_mvm_rs_tx_status(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ int tid, struct ieee80211_tx_info *info,
+ bool ndp)
+{
+ int legacy_success;
+ int retries;
+ int i;
+ struct iwl_lq_cmd *table;
+ u32 lq_hwrate;
+ struct rs_rate lq_rate, tx_resp_rate;
+ struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
+ u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
+ u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
+ u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->pers.drv) {
+ IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
+ &tx_resp_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* Disable last tx check if we are debugging with fixed rate but
+ * update tx stats
+ */
+ if (lq_sta->pers.dbg_fixed_rate) {
+ int index = tx_resp_rate.index;
+ enum rs_column column;
+ int attempts, success;
+
+ column = rs_get_column_from_rate(&tx_resp_rate);
+ if (WARN_ONCE(column == RS_COLUMN_INVALID,
+ "Can't map rate 0x%x to column",
+ tx_resp_hwrate))
+ return;
+
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ attempts = info->status.ampdu_len;
+ success = info->status.ampdu_ack_len;
+ } else {
+ attempts = info->status.rates[0].count;
+ success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ }
+
+ lq_sta->pers.tx_stats[column][index].total += attempts;
+ lq_sta->pers.tx_stats[column][index].success += success;
+
+ IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
+ tx_resp_hwrate, success, attempts);
+ return;
+ }
+#endif
+
+ if (time_after(jiffies,
+ (unsigned long)(lq_sta->last_tx +
+ (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
+ IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
+ /* reach here only in case of driver RS, call directly
+ * the unlocked version
+ */
+ rs_drv_rate_init(mvm, sta, info->band);
+ return;
+ }
+ lq_sta->last_tx = jiffies;
+
+ /* Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ lq_hwrate = le32_to_cpu(table->rs_table[0]);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ /* Here we actually compare this rate to the latest LQ command */
+ if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
+ IWL_DEBUG_RATE(mvm,
+ "tx resp color 0x%x does not match 0x%x\n",
+ lq_color, LQ_FLAG_COLOR_GET(table->flags));
+
+ /* Since rates mis-match, the last LQ command may have failed.
+ * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ IWL_DEBUG_RATE(mvm,
+ "Too many rates mismatch. Send sync LQ. rs_state %d\n",
+ lq_sta->rs_state);
+ iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ }
+
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ if (!lq_sta->search_better_tbl) {
+ curr_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+ other_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
+ } else {
+ curr_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
+ other_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+ }
+
+ if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
+ IWL_DEBUG_RATE(mvm,
+ "Neither active nor search matches tx rate\n");
+ tmp_tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+ rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
+ tmp_tbl = &lq_sta->lq_info[rs_search_tbl(lq_sta->active_tbl)];
+ rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
+ rs_dump_rate(mvm, &lq_rate, "ACTUAL");
+
+ /* no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /* Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first index into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len,
+ reduced_txp);
+
+ /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
+ * it as a single frame loss as we don't want the success ratio
+ * to dip too quickly because a BA wasn't received.
+ * For TPC, there's no need for this optimisation since we want
+ * to recover very quickly from a bad power reduction and,
+ * therefore we'd like the success ratio to get an immediate hit
+ * when failing to get a BA, so we'd switch back to a lower or
+ * zero power reduction. When FW transmits agg with a rate
+ * different from the initial rate, it will not use reduced txp
+ * and will send BA notification twice (one empty with reduced
+ * txp equal to the value from LQ and one with reduced txp 0).
+ * We need to update counters for each txp level accordingly.
+ */
+ if (info->status.ampdu_ack_len == 0)
+ info->status.ampdu_len = 1;
+
+ rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl,
+ tx_resp_rate.index,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed += (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /* For legacy, update frame history with for each Tx retry. */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ lq_hwrate = le32_to_cpu(table->rs_table[i]);
+ if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
+ &lq_rate)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ /* Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
+ tmp_tbl = curr_tbl;
+ else if (rs_rate_column_match(&lq_rate,
+ &other_tbl->rate))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+
+ rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
+ tx_resp_rate.index, 1,
+ i < retries ? 0 : legacy_success,
+ reduced_txp);
+ rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
+ tx_resp_rate.index, 1,
+ i < retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = lq_hwrate;
+ IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta->supp_rates[info->band])
+ rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
+}
+
+void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+ int tid, struct ieee80211_tx_info *info, bool ndp)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* If it's locked we are in middle of init flow
+ * just wait for next tx status to update the lq_sta data
+ */
+ if (!spin_trylock(&mvmsta->lq_sta.rs_drv.pers.lock))
+ return;
+
+ __iwl_mvm_rs_tx_status(mvm, sta, tid, info, ndp);
+ spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock);
+}
+
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
struct iwl_lq_cmd *lq_cmd,
@@ -3569,7 +3603,7 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
- iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
+ iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd);
ss_params |= LQ_SS_BFER_ALLOWED;
IWL_DEBUG_RATE(mvm,
@@ -3735,7 +3769,7 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
if (lq_sta->pers.dbg_fixed_rate) {
rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
- iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false);
+ iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq);
}
}
@@ -4127,10 +4161,15 @@ static const struct rate_control_ops rs_mvm_ops_drv = {
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
enum nl80211_band band, bool update)
{
- if (iwl_mvm_has_tlc_offload(mvm))
+ if (iwl_mvm_has_tlc_offload(mvm)) {
rs_fw_rate_init(mvm, sta, band, update);
- else
- rs_drv_rate_init(mvm, sta, band, update);
+ } else {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+ spin_lock(&mvmsta->lq_sta.rs_drv.pers.lock);
+ rs_drv_rate_init(mvm, sta, band);
+ spin_unlock(&mvmsta->lq_sta.rs_drv.pers.lock);
+ }
}
int iwl_mvm_rate_control_register(void)
@@ -4160,7 +4199,7 @@ static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
}
- return iwl_mvm_send_lq_cmd(mvm, lq, false);
+ return iwl_mvm_send_lq_cmd(mvm, lq);
}
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index f7eb60dbaf20..428642e66658 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -4,7 +4,7 @@
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
@@ -390,6 +390,7 @@ struct iwl_lq_sta {
s8 last_rssi;
struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
struct iwl_mvm *drv;
+ spinlock_t lock; /* for races in reinit/update table */
} pers;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 64f950501287..854edd7d7103 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -463,20 +463,22 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
}
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
- const u8 *data, u32 count)
+ const u8 *data, u32 count, bool async)
{
- struct iwl_rxq_sync_cmd *cmd;
+ u8 buf[sizeof(struct iwl_rxq_sync_cmd) +
+ sizeof(struct iwl_mvm_rss_sync_notif)];
+ struct iwl_rxq_sync_cmd *cmd = (void *)buf;
u32 data_size = sizeof(*cmd) + count;
int ret;
- /* should be DWORD aligned */
- if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
+ /*
+ * size must be a multiple of DWORD
+ * Ensure we don't overflow buf
+ */
+ if (WARN_ON(count & 3 ||
+ count > sizeof(struct iwl_mvm_rss_sync_notif)))
return -EINVAL;
- cmd = kzalloc(data_size, GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
cmd->rxq_mask = cpu_to_le32(rxq_mask);
cmd->count = cpu_to_le32(count);
cmd->flags = 0;
@@ -485,9 +487,8 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(DATA_PATH_GROUP,
TRIGGER_RX_QUEUES_NOTIF_CMD),
- 0, data_size, cmd);
+ async ? CMD_ASYNC : 0, data_size, cmd);
- kfree(cmd);
return ret;
}
@@ -503,14 +504,31 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
!ieee80211_sn_less(sn1, sn2 - buffer_size);
}
+static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn)
+{
+ struct iwl_mvm_rss_sync_notif notif = {
+ .metadata.type = IWL_MVM_RXQ_NSSN_SYNC,
+ .metadata.sync = 0,
+ .nssn_sync.baid = baid,
+ .nssn_sync.nssn = nssn,
+ };
+
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+}
+
#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
+enum iwl_mvm_release_flags {
+ IWL_MVM_RELEASE_SEND_RSS_SYNC = BIT(0),
+ IWL_MVM_RELEASE_FROM_RSS_SYNC = BIT(1),
+};
+
static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct napi_struct *napi,
struct iwl_mvm_baid_data *baid_data,
struct iwl_mvm_reorder_buffer *reorder_buf,
- u16 nssn)
+ u16 nssn, u32 flags)
{
struct iwl_mvm_reorder_buf_entry *entries =
&baid_data->entries[reorder_buf->queue *
@@ -519,6 +537,18 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
lockdep_assert_held(&reorder_buf->lock);
+ /*
+ * We keep the NSSN not too far behind, if we are sync'ing it and it
+ * is more than 2048 ahead of us, it must be behind us. Discard it.
+ * This can happen if the queue that hit the 0 / 2048 seqno was lagging
+ * behind and this queue already processed packets. The next if
+ * would have caught cases where this queue would have processed less
+ * than 64 packets, but it may have processed more than 64 packets.
+ */
+ if ((flags & IWL_MVM_RELEASE_FROM_RSS_SYNC) &&
+ ieee80211_sn_less(nssn, ssn))
+ goto set_timer;
+
/* ignore nssn smaller than head sn - this can happen due to timeout */
if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
goto set_timer;
@@ -529,6 +559,9 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
struct sk_buff *skb;
ssn = ieee80211_sn_inc(ssn);
+ if ((flags & IWL_MVM_RELEASE_SEND_RSS_SYNC) &&
+ (ssn == 2048 || ssn == 0))
+ iwl_mvm_sync_nssn(mvm, baid_data->baid, ssn);
/*
* Empty the list. Will have more than one frame for A-MSDU.
@@ -615,7 +648,8 @@ void iwl_mvm_reorder_timer_expired(struct timer_list *t)
sta_id, sn);
iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
sta, baid_data->tid);
- iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
+ iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data,
+ buf, sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
rcu_read_unlock();
} else {
/*
@@ -657,7 +691,8 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
- reorder_buf->buf_size));
+ reorder_buf->buf_size),
+ 0);
spin_unlock_bh(&reorder_buf->lock);
del_timer_sync(&reorder_buf->reorder_timer);
@@ -665,8 +700,54 @@ out:
rcu_read_unlock();
}
-void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
- int queue)
+static void iwl_mvm_release_frames_from_notif(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ u8 baid, u16 nssn, int queue,
+ u32 flags)
+{
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ struct iwl_mvm_baid_data *ba_data;
+
+ IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
+ baid, nssn);
+
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
+ baid >= ARRAY_SIZE(mvm->baid_map)))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, napi, ba_data,
+ reorder_buf, nssn, flags);
+ spin_unlock_bh(&reorder_buf->lock);
+
+out:
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_nssn_sync(struct iwl_mvm *mvm,
+ struct napi_struct *napi, int queue,
+ const struct iwl_mvm_nssn_sync_data *data)
+{
+ iwl_mvm_release_frames_from_notif(mvm, napi, data->baid,
+ data->nssn, queue,
+ IWL_MVM_RELEASE_FROM_RSS_SYNC);
+}
+
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct napi_struct *napi,
+ struct iwl_rx_cmd_buffer *rxb, int queue)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rxq_sync_notification *notif;
@@ -687,6 +768,10 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
case IWL_MVM_RXQ_NOTIF_DEL_BA:
iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
break;
+ case IWL_MVM_RXQ_NSSN_SYNC:
+ iwl_mvm_nssn_sync(mvm, napi, queue,
+ (void *)internal_notif->data);
+ break;
default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
}
@@ -785,7 +870,8 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
}
if (ieee80211_is_back_req(hdr->frame_control)) {
- iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data,
+ buffer, nssn, 0);
goto drop;
}
@@ -794,7 +880,10 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* If the SN is smaller than the NSSN it might need to first go into
* the reorder buffer, in which case we just release up to it and the
* rest of the function will take care of storing it and releasing up to
- * the nssn
+ * the nssn.
+ * This should not happen. This queue has been lagging and it should
+ * have been updated by a IWL_MVM_RXQ_NSSN_SYNC notification. Be nice
+ * and update the other queues.
*/
if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
buffer->buf_size) ||
@@ -802,7 +891,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
- min_sn);
+ min_sn, IWL_MVM_RELEASE_SEND_RSS_SYNC);
}
/* drop any oudated packets */
@@ -813,8 +902,23 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
buffer->buf_size) &&
- (!amsdu || last_subframe))
+ (!amsdu || last_subframe)) {
+ /*
+ * If we crossed the 2048 or 0 SN, notify all the
+ * queues. This is done in order to avoid having a
+ * head_sn that lags behind for too long. When that
+ * happens, we can get to a situation where the head_sn
+ * is within the interval [nssn - buf_size : nssn]
+ * which will make us think that the nssn is a packet
+ * that we already freed because of the reordering
+ * buffer and we will ignore it. So maintain the
+ * head_sn somewhat updated across all the queues:
+ * when it crosses 0 and 2048.
+ */
+ if (sn == 2048 || sn == 0)
+ iwl_mvm_sync_nssn(mvm, baid, sn);
buffer->head_sn = nssn;
+ }
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
return false;
@@ -829,8 +933,11 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* while technically there is no hole and we can move forward.
*/
if (!buffer->num_stored && sn == buffer->head_sn) {
- if (!amsdu || last_subframe)
+ if (!amsdu || last_subframe) {
+ if (sn == 2048 || sn == 0)
+ iwl_mvm_sync_nssn(mvm, baid, sn);
buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
+ }
/* No need to update AMSDU last SN - we are moving the head */
spin_unlock_bh(&buffer->lock);
return false;
@@ -875,7 +982,9 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
* release notification with up to date NSSN.
*/
if (!amsdu || last_subframe)
- iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
+ iwl_mvm_release_frames(mvm, sta, napi, baid_data,
+ buffer, nssn,
+ IWL_MVM_RELEASE_SEND_RSS_SYNC);
spin_unlock_bh(&buffer->lock);
return true;
@@ -1840,40 +1949,14 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
out:
rcu_read_unlock();
}
+
void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_frame_release *release = (void *)pkt->data;
- struct ieee80211_sta *sta;
- struct iwl_mvm_reorder_buffer *reorder_buf;
- struct iwl_mvm_baid_data *ba_data;
-
- int baid = release->baid;
-
- IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
- release->baid, le16_to_cpu(release->nssn));
- if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
- return;
-
- rcu_read_lock();
-
- ba_data = rcu_dereference(mvm->baid_map[baid]);
- if (WARN_ON_ONCE(!ba_data))
- goto out;
-
- sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
- goto out;
-
- reorder_buf = &ba_data->reorder_buf[queue];
-
- spin_lock_bh(&reorder_buf->lock);
- iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
- le16_to_cpu(release->nssn));
- spin_unlock_bh(&reorder_buf->lock);
-
-out:
- rcu_read_unlock();
+ iwl_mvm_release_frames_from_notif(mvm, napi, release->baid,
+ le16_to_cpu(release->nssn),
+ queue, 0);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index f545a737a92d..10f18536dd0d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1684,6 +1684,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
*/
if (iwl_mvm_has_tlc_offload(mvm))
iwl_mvm_rs_add_sta(mvm, mvm_sta);
+ else
+ spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
@@ -2421,7 +2423,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
{
- struct iwl_mvm_delba_notif notif = {
+ struct iwl_mvm_rss_sync_notif notif = {
.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
.metadata.sync = 1,
.delba.baid = baid,
@@ -2972,7 +2974,7 @@ out:
IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
sta->addr, tid);
- return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
+ return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
}
static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 4487cc3e07c1..8d70093847cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -343,9 +343,17 @@ struct iwl_mvm_delba_data {
u32 baid;
} __packed;
-struct iwl_mvm_delba_notif {
+struct iwl_mvm_nssn_sync_data {
+ u32 baid;
+ u32 nssn;
+} __packed;
+
+struct iwl_mvm_rss_sync_notif {
struct iwl_mvm_internal_rxq_notif metadata;
- struct iwl_mvm_delba_data delba;
+ union {
+ struct iwl_mvm_delba_data delba;
+ struct iwl_mvm_nssn_sync_data nssn_sync;
+ };
} __packed;
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index a3e5d88f1c07..6ac114a393cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -831,6 +831,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
unsigned int tcp_payload_len;
unsigned int mss = skb_shinfo(skb)->gso_size;
bool ipv4 = (skb->protocol == htons(ETH_P_IP));
+ bool qos = ieee80211_is_data_qos(hdr->frame_control);
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
skb_shinfo(skb)->gso_size = num_subframes * mss;
@@ -864,7 +865,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
if (tcp_payload_len > mss) {
skb_shinfo(tmp)->gso_size = mss;
} else {
- if (ieee80211_is_data_qos(hdr->frame_control)) {
+ if (qos) {
u8 *qc;
if (ipv4)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 9ecd5f09615a..b8e20a01c192 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -653,12 +653,12 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
* this case to clear the state indicating that station creation is in
* progress.
*/
-int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool sync)
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
{
struct iwl_host_cmd cmd = {
.id = LQ_CMD,
.len = { sizeof(struct iwl_lq_cmd), },
- .flags = sync ? 0 : CMD_ASYNC,
+ .flags = CMD_ASYNC,
.data = { lq, },
};
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index ea2a03d4bf55..de711c1160d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -604,10 +604,13 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x6014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x8010, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xE010, iwl9260_2ac_160_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0xE014, iwl9260_2ac_160_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fa4245d0d4a8..2f0ba7ef53b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -435,6 +435,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
DMA_TO_DEVICE);
}
+ meta->tbs = 0;
+
if (trans->cfg->use_tfh) {
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3aeff7a3c3d8..f86c2891310a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3619,10 +3619,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, &hwsim_genl_family,
NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
- if (!hdr)
+ if (hdr) {
+ genl_dump_check_consistent(cb, hdr);
+ genlmsg_end(skb, hdr);
+ } else {
res = -EMSGSIZE;
- genl_dump_check_consistent(cb, hdr);
- genlmsg_end(skb, hdr);
+ }
}
done:
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 3e442c7f7882..095837fba300 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -124,6 +124,7 @@ enum {
#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
+#define WPA_GTK_OUI_OFFSET 2
#define RSN_GTK_OUI_OFFSET 2
#define MWIFIEX_OUI_NOT_PRESENT 0
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 0d6d41727037..21dda385f6c6 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
- iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
+ iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
+ WPA_GTK_OUI_OFFSET);
oui = &mwifiex_wpa_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
if (ret)
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index e65d027b91fa..529be35ac178 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
/* Reset possible fault of previous session */
clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
- if (priv->config.reset_n_io) {
+ if (gpio_is_valid(priv->config.reset_n_io)) {
nfc_info(priv->dev, "reset the chip\n");
gpio_set_value(priv->config.reset_n_io, 0);
usleep_range(5000, 10000);
@@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
{
- if (priv->config.reset_n_io)
+ if (gpio_is_valid(priv->config.reset_n_io))
gpio_set_value(priv->config.reset_n_io, 0);
}
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 9a22056e8d9e..e5a622ce4b95 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -26,7 +26,7 @@
static unsigned int hci_muxed;
static unsigned int flow_control;
static unsigned int break_control;
-static unsigned int reset_n_io;
+static int reset_n_io = -EINVAL;
/*
** NFCMRVL NCI OPS
@@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
module_param(hci_muxed, uint, 0);
MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
-module_param(reset_n_io, uint, 0);
+module_param(reset_n_io, int, 0);
MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 945cc903d8f1..888e298f610b 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -305,6 +305,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
/* No configuration for USB */
memset(&config, 0, sizeof(config));
+ config.reset_n_io = -EINVAL;
nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index c3e10b6ab3a4..f25f1ec5f9e9 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -333,6 +333,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
skb->len - 2, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
transaction->aid_len = skb->data[1];
memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 06fc542fd198..6586378cacb0 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -317,6 +317,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
skb->len - 2, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
transaction->aid_len = skb->data[1];
memcpy(transaction->aid, &skb->data[2],
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 62d00fffa4af..3508a79110c7 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -62,14 +62,14 @@ static ssize_t sector_size_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
btt_lbasize_supported);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -91,11 +91,11 @@ static ssize_t uuid_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -120,13 +120,13 @@ static ssize_t namespace_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -138,14 +138,14 @@ static ssize_t size_show(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver)
rc = sprintf(buf, "%llu\n", nd_btt->size);
else {
/* no size to convey if the btt instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 2dca3034fee0..798c5c4aea9c 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -26,7 +26,7 @@
int nvdimm_major;
static int nvdimm_bus_major;
-static struct class *nd_class;
+struct class *nd_class;
static DEFINE_IDA(nd_ida);
static int to_nd_device_type(struct device *dev)
@@ -73,7 +73,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
{
nvdimm_bus_lock(&nvdimm_bus->dev);
if (--nvdimm_bus->probe_active == 0)
- wake_up(&nvdimm_bus->probe_wait);
+ wake_up(&nvdimm_bus->wait);
nvdimm_bus_unlock(&nvdimm_bus->dev);
}
@@ -91,7 +91,10 @@ static int nvdimm_bus_probe(struct device *dev)
dev->driver->name, dev_name(dev));
nvdimm_bus_probe_start(nvdimm_bus);
+ debug_nvdimm_lock(dev);
rc = nd_drv->probe(dev);
+ debug_nvdimm_unlock(dev);
+
if (rc == 0)
nd_region_probe_success(nvdimm_bus, dev);
else
@@ -113,8 +116,11 @@ static int nvdimm_bus_remove(struct device *dev)
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
int rc = 0;
- if (nd_drv->remove)
+ if (nd_drv->remove) {
+ debug_nvdimm_lock(dev);
rc = nd_drv->remove(dev);
+ debug_nvdimm_unlock(dev);
+ }
nd_region_disable(nvdimm_bus, dev);
dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
@@ -140,7 +146,7 @@ static void nvdimm_bus_shutdown(struct device *dev)
void nd_device_notify(struct device *dev, enum nvdimm_event event)
{
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_device_driver *nd_drv;
@@ -148,7 +154,7 @@ void nd_device_notify(struct device *dev, enum nvdimm_event event)
if (nd_drv->notify)
nd_drv->notify(dev, event);
}
- device_unlock(dev);
+ nd_device_unlock(dev);
}
EXPORT_SYMBOL(nd_device_notify);
@@ -296,7 +302,7 @@ static void nvdimm_bus_release(struct device *dev)
kfree(nvdimm_bus);
}
-static bool is_nvdimm_bus(struct device *dev)
+bool is_nvdimm_bus(struct device *dev)
{
return dev->release == nvdimm_bus_release;
}
@@ -341,7 +347,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
return NULL;
INIT_LIST_HEAD(&nvdimm_bus->list);
INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
- init_waitqueue_head(&nvdimm_bus->probe_wait);
+ init_waitqueue_head(&nvdimm_bus->wait);
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
if (nvdimm_bus->id < 0) {
kfree(nvdimm_bus);
@@ -426,6 +432,9 @@ static int nd_bus_remove(struct device *dev)
list_del_init(&nvdimm_bus->list);
mutex_unlock(&nvdimm_bus_list_mutex);
+ wait_event(nvdimm_bus->wait,
+ atomic_read(&nvdimm_bus->ioctl_active) == 0);
+
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
@@ -547,13 +556,38 @@ EXPORT_SYMBOL(nd_device_register);
void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
{
+ bool killed;
+
switch (mode) {
case ND_ASYNC:
+ /*
+ * In the async case this is being triggered with the
+ * device lock held and the unregistration work needs to
+ * be moved out of line iff this is thread has won the
+ * race to schedule the deletion.
+ */
+ if (!kill_device(dev))
+ return;
+
get_device(dev);
async_schedule_domain(nd_async_device_unregister, dev,
&nd_async_domain);
break;
case ND_SYNC:
+ /*
+ * In the sync case the device is being unregistered due
+ * to a state change of the parent. Claim the kill state
+ * to synchronize against other unregistration requests,
+ * or otherwise let the async path handle it if the
+ * unregistration was already queued.
+ */
+ nd_device_lock(dev);
+ killed = kill_device(dev);
+ nd_device_unlock(dev);
+
+ if (!killed)
+ return;
+
nd_synchronize();
device_unregister(dev);
break;
@@ -859,10 +893,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
do {
if (nvdimm_bus->probe_active == 0)
break;
- nvdimm_bus_unlock(&nvdimm_bus->dev);
- wait_event(nvdimm_bus->probe_wait,
+ nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
+ wait_event(nvdimm_bus->wait,
nvdimm_bus->probe_active == 0);
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ nd_device_lock(dev);
+ nvdimm_bus_lock(dev);
} while (true);
}
@@ -945,20 +981,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
int read_only, unsigned int ioctl_cmd, unsigned long arg)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
- static char out_env[ND_CMD_MAX_ENVELOPE];
- static char in_env[ND_CMD_MAX_ENVELOPE];
const struct nd_cmd_desc *desc = NULL;
unsigned int cmd = _IOC_NR(ioctl_cmd);
struct device *dev = &nvdimm_bus->dev;
void __user *p = (void __user *) arg;
+ char *out_env = NULL, *in_env = NULL;
const char *cmd_name, *dimm_name;
u32 in_len = 0, out_len = 0;
unsigned int func = cmd;
unsigned long cmd_mask;
struct nd_cmd_pkg pkg;
int rc, i, cmd_rc;
+ void *buf = NULL;
u64 buf_len = 0;
- void *buf;
if (nvdimm) {
desc = nd_cmd_dimm_desc(cmd);
@@ -989,7 +1024,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
case ND_CMD_ARS_START:
case ND_CMD_CLEAR_ERROR:
case ND_CMD_CALL:
- dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
+ dev_dbg(dev, "'%s' command while read-only.\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
return -EPERM;
@@ -998,6 +1033,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an input envelope */
+ in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ if (!in_env)
+ return -ENOMEM;
for (i = 0; i < desc->in_num; i++) {
u32 in_size, copy;
@@ -1005,14 +1043,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (in_size == UINT_MAX) {
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
__func__, dimm_name, cmd_name, i);
- return -ENXIO;
+ rc = -ENXIO;
+ goto out;
}
- if (in_len < sizeof(in_env))
- copy = min_t(u32, sizeof(in_env) - in_len, in_size);
+ if (in_len < ND_CMD_MAX_ENVELOPE)
+ copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
else
copy = 0;
- if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
- return -EFAULT;
+ if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
+ rc = -EFAULT;
+ goto out;
+ }
in_len += in_size;
}
@@ -1024,6 +1065,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an output envelope */
+ out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ if (!out_env) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
(u32 *) in_env, (u32 *) out_env, 0);
@@ -1032,15 +1079,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (out_size == UINT_MAX) {
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
dimm_name, cmd_name, i);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
- if (out_len < sizeof(out_env))
- copy = min_t(u32, sizeof(out_env) - out_len, out_size);
+ if (out_len < ND_CMD_MAX_ENVELOPE)
+ copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
else
copy = 0;
if (copy && copy_from_user(&out_env[out_len],
- p + in_len + out_len, copy))
- return -EFAULT;
+ p + in_len + out_len, copy)) {
+ rc = -EFAULT;
+ goto out;
+ }
out_len += out_size;
}
@@ -1048,19 +1098,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
buf = vmalloc(buf_len);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (copy_from_user(buf, p, buf_len)) {
rc = -EFAULT;
goto out;
}
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ nd_device_lock(dev);
+ nvdimm_bus_lock(dev);
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
if (rc)
goto out_unlock;
@@ -1075,39 +1129,24 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
clear_err->cleared);
}
- nvdimm_bus_unlock(&nvdimm_bus->dev);
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
- vfree(buf);
- return rc;
-
- out_unlock:
- nvdimm_bus_unlock(&nvdimm_bus->dev);
- out:
+out_unlock:
+ nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
+out:
+ kfree(in_env);
+ kfree(out_env);
vfree(buf);
return rc;
}
-static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long id = (long) file->private_data;
- int rc = -ENXIO, ro;
- struct nvdimm_bus *nvdimm_bus;
-
- ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
- mutex_lock(&nvdimm_bus_list_mutex);
- list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
- if (nvdimm_bus->id == id) {
- rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
- break;
- }
- }
- mutex_unlock(&nvdimm_bus_list_mutex);
-
- return rc;
-}
+enum nd_ioctl_mode {
+ BUS_IOCTL,
+ DIMM_IOCTL,
+};
static int match_dimm(struct device *dev, void *data)
{
@@ -1122,31 +1161,62 @@ static int match_dimm(struct device *dev, void *data)
return 0;
}
-static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
+ enum nd_ioctl_mode mode)
+
{
- int rc = -ENXIO, ro;
- struct nvdimm_bus *nvdimm_bus;
+ struct nvdimm_bus *nvdimm_bus, *found = NULL;
+ long id = (long) file->private_data;
+ struct nvdimm *nvdimm = NULL;
+ int rc, ro;
ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
mutex_lock(&nvdimm_bus_list_mutex);
list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
- struct device *dev = device_find_child(&nvdimm_bus->dev,
- file->private_data, match_dimm);
- struct nvdimm *nvdimm;
-
- if (!dev)
- continue;
+ if (mode == DIMM_IOCTL) {
+ struct device *dev;
+
+ dev = device_find_child(&nvdimm_bus->dev,
+ file->private_data, match_dimm);
+ if (!dev)
+ continue;
+ nvdimm = to_nvdimm(dev);
+ found = nvdimm_bus;
+ } else if (nvdimm_bus->id == id) {
+ found = nvdimm_bus;
+ }
- nvdimm = to_nvdimm(dev);
- rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
- put_device(dev);
- break;
+ if (found) {
+ atomic_inc(&nvdimm_bus->ioctl_active);
+ break;
+ }
}
mutex_unlock(&nvdimm_bus_list_mutex);
+ if (!found)
+ return -ENXIO;
+
+ nvdimm_bus = found;
+ rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
+
+ if (nvdimm)
+ put_device(&nvdimm->dev);
+ if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
+ wake_up(&nvdimm_bus->wait);
+
return rc;
}
+static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return nd_ioctl(file, cmd, arg, BUS_IOCTL);
+}
+
+static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
+}
+
static int nd_open(struct inode *inode, struct file *file)
{
long minor = iminor(inode);
@@ -1158,16 +1228,16 @@ static int nd_open(struct inode *inode, struct file *file)
static const struct file_operations nvdimm_bus_fops = {
.owner = THIS_MODULE,
.open = nd_open,
- .unlocked_ioctl = nd_ioctl,
- .compat_ioctl = nd_ioctl,
+ .unlocked_ioctl = bus_ioctl,
+ .compat_ioctl = bus_ioctl,
.llseek = noop_llseek,
};
static const struct file_operations nvdimm_fops = {
.owner = THIS_MODULE,
.open = nd_open,
- .unlocked_ioctl = nvdimm_ioctl,
- .compat_ioctl = nvdimm_ioctl,
+ .unlocked_ioctl = dimm_ioctl,
+ .compat_ioctl = dimm_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 5e1f060547bf..9204f1e9fd14 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -246,7 +246,7 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
*
* Enforce that uuids can only be changed while the device is disabled
* (driver detached)
- * LOCKING: expects device_lock() is held on entry
+ * LOCKING: expects nd_device_lock() is held on entry
*/
int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
size_t len)
@@ -347,15 +347,15 @@ static DEVICE_ATTR_RO(provider);
static int flush_namespaces(struct device *dev, void *data)
{
- device_lock(dev);
- device_unlock(dev);
+ nd_device_lock(dev);
+ nd_device_unlock(dev);
return 0;
}
static int flush_regions_dimms(struct device *dev, void *data)
{
- device_lock(dev);
- device_unlock(dev);
+ nd_device_lock(dev);
+ nd_device_unlock(dev);
device_for_each_child(dev, NULL, flush_namespaces);
return 0;
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index dfecd6e17043..29a065e769ea 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -484,12 +484,12 @@ static ssize_t security_store(struct device *dev,
* done while probing is idle and the DIMM is not in active use
* in any region.
*/
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __security_store(dev, buf, len);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 2d8d7e554877..a16e52251a30 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -410,7 +410,7 @@ static ssize_t alt_name_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __alt_name_store(dev, buf, len);
@@ -418,7 +418,7 @@ static ssize_t alt_name_store(struct device *dev,
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1077,7 +1077,7 @@ static ssize_t size_store(struct device *dev,
if (rc)
return rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __size_store(dev, val);
@@ -1103,7 +1103,7 @@ static ssize_t size_store(struct device *dev,
dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1286,7 +1286,7 @@ static ssize_t uuid_store(struct device *dev,
} else
return -ENXIO;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (to_ndns(dev)->claim)
@@ -1302,7 +1302,7 @@ static ssize_t uuid_store(struct device *dev,
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1376,7 +1376,7 @@ static ssize_t sector_size_store(struct device *dev,
} else
return -ENXIO;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
if (to_ndns(dev)->claim)
rc = -EBUSY;
@@ -1387,7 +1387,7 @@ static ssize_t sector_size_store(struct device *dev,
dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -1502,9 +1502,9 @@ static ssize_t holder_show(struct device *dev,
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -1541,7 +1541,7 @@ static ssize_t holder_class_store(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
rc = __holder_class_store(dev, buf);
@@ -1549,7 +1549,7 @@ static ssize_t holder_class_store(struct device *dev,
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc < 0 ? rc : len;
}
@@ -1560,7 +1560,7 @@ static ssize_t holder_class_show(struct device *dev,
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (ndns->claim_class == NVDIMM_CCLASS_NONE)
rc = sprintf(buf, "\n");
else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
@@ -1572,7 +1572,7 @@ static ssize_t holder_class_show(struct device *dev,
rc = sprintf(buf, "dax\n");
else
rc = sprintf(buf, "<unknown>\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -1586,7 +1586,7 @@ static ssize_t mode_show(struct device *dev,
char *mode;
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
claim = ndns->claim;
if (claim && is_nd_btt(claim))
mode = "safe";
@@ -1599,7 +1599,7 @@ static ssize_t mode_show(struct device *dev,
else
mode = "raw";
rc = sprintf(buf, "%s\n", mode);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -1703,8 +1703,8 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
* Flush any in-progess probes / removals in the driver
* for the raw personality of this namespace.
*/
- device_lock(&ndns->dev);
- device_unlock(&ndns->dev);
+ nd_device_lock(&ndns->dev);
+ nd_device_unlock(&ndns->dev);
if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
dev_name(dev));
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 391e88de3a29..0ac52b6eb00e 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -9,6 +9,7 @@
#include <linux/sizes.h>
#include <linux/mutex.h>
#include <linux/nd.h>
+#include "nd.h"
extern struct list_head nvdimm_bus_list;
extern struct mutex nvdimm_bus_list_mutex;
@@ -17,10 +18,11 @@ extern struct workqueue_struct *nvdimm_wq;
struct nvdimm_bus {
struct nvdimm_bus_descriptor *nd_desc;
- wait_queue_head_t probe_wait;
+ wait_queue_head_t wait;
struct list_head list;
struct device dev;
int id, probe_active;
+ atomic_t ioctl_active;
struct list_head mapping_list;
struct mutex reconfig_mutex;
struct badrange badrange;
@@ -181,4 +183,71 @@ ssize_t nd_namespace_store(struct device *dev,
struct nd_namespace_common **_ndns, const char *buf,
size_t len);
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
+bool is_nvdimm_bus(struct device *dev);
+
+#ifdef CONFIG_PROVE_LOCKING
+extern struct class *nd_class;
+
+enum {
+ LOCK_BUS,
+ LOCK_NDCTL,
+ LOCK_REGION,
+ LOCK_DIMM = LOCK_REGION,
+ LOCK_NAMESPACE,
+ LOCK_CLAIM,
+};
+
+static inline void debug_nvdimm_lock(struct device *dev)
+{
+ if (is_nd_region(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_REGION);
+ else if (is_nvdimm(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_DIMM);
+ else if (is_nd_btt(dev) || is_nd_pfn(dev) || is_nd_dax(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_CLAIM);
+ else if (dev->parent && (is_nd_region(dev->parent)))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_NAMESPACE);
+ else if (is_nvdimm_bus(dev))
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_BUS);
+ else if (dev->class && dev->class == nd_class)
+ mutex_lock_nested(&dev->lockdep_mutex, LOCK_NDCTL);
+ else
+ dev_WARN(dev, "unknown lock level\n");
+}
+
+static inline void debug_nvdimm_unlock(struct device *dev)
+{
+ mutex_unlock(&dev->lockdep_mutex);
+}
+
+static inline void nd_device_lock(struct device *dev)
+{
+ device_lock(dev);
+ debug_nvdimm_lock(dev);
+}
+
+static inline void nd_device_unlock(struct device *dev)
+{
+ debug_nvdimm_unlock(dev);
+ device_unlock(dev);
+}
+#else
+static inline void nd_device_lock(struct device *dev)
+{
+ device_lock(dev);
+}
+
+static inline void nd_device_unlock(struct device *dev)
+{
+ device_unlock(dev);
+}
+
+static inline void debug_nvdimm_lock(struct device *dev)
+{
+}
+
+static inline void debug_nvdimm_unlock(struct device *dev)
+{
+}
+#endif
#endif /* __ND_CORE_H__ */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index df2bdbd22450..3e7b11cf1aae 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -67,7 +67,7 @@ static ssize_t mode_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
if (dev->driver)
rc = -EBUSY;
@@ -89,7 +89,7 @@ static ssize_t mode_store(struct device *dev,
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -132,14 +132,14 @@ static ssize_t align_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments());
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -161,11 +161,11 @@ static ssize_t uuid_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc ? rc : len;
}
@@ -190,13 +190,13 @@ static ssize_t namespace_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -208,7 +208,7 @@ static ssize_t resource_show(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
@@ -222,7 +222,7 @@ static ssize_t resource_show(struct device *dev,
/* no address to convey if the pfn instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
@@ -234,7 +234,7 @@ static ssize_t size_show(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver) {
struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
u64 offset = __le64_to_cpu(pfn_sb->dataoff);
@@ -250,7 +250,7 @@ static ssize_t size_show(struct device *dev,
/* no size to convey if the pfn instance is disabled */
rc = -ENXIO;
}
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 2bf3acd69613..4c121dd03dd9 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -522,8 +522,8 @@ static int nd_pmem_remove(struct device *dev)
nvdimm_namespace_detach_btt(to_nd_btt(dev));
else {
/*
- * Note, this assumes device_lock() context to not race
- * nd_pmem_notify()
+ * Note, this assumes nd_device_lock() context to not
+ * race nd_pmem_notify()
*/
sysfs_put(pmem->bb_state);
pmem->bb_state = NULL;
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index ef46cc3a71ae..37bf8719a2a4 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -34,17 +34,6 @@ static int nd_region_probe(struct device *dev)
if (rc)
return rc;
- rc = nd_region_register_namespaces(nd_region, &err);
- if (rc < 0)
- return rc;
-
- ndrd = dev_get_drvdata(dev);
- ndrd->ns_active = rc;
- ndrd->ns_count = rc + err;
-
- if (rc && err && rc == err)
- return -ENODEV;
-
if (is_nd_pmem(&nd_region->dev)) {
struct resource ndr_res;
@@ -60,6 +49,17 @@ static int nd_region_probe(struct device *dev)
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
}
+ rc = nd_region_register_namespaces(nd_region, &err);
+ if (rc < 0)
+ return rc;
+
+ ndrd = dev_get_drvdata(dev);
+ ndrd->ns_active = rc;
+ ndrd->ns_count = rc + err;
+
+ if (rc && err && rc == err)
+ return -ENODEV;
+
nd_region->btt_seed = nd_btt_create(nd_region);
nd_region->pfn_seed = nd_pfn_create(nd_region);
nd_region->dax_seed = nd_dax_create(nd_region);
@@ -102,7 +102,7 @@ static int nd_region_remove(struct device *dev)
nvdimm_bus_unlock(dev);
/*
- * Note, this assumes device_lock() context to not race
+ * Note, this assumes nd_device_lock() context to not race
* nd_region_notify()
*/
sysfs_put(nd_region->bb_state);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 56f2227f192a..af30cbe7a8ea 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -331,7 +331,7 @@ static ssize_t set_cookie_show(struct device *dev,
* the v1.1 namespace label cookie definition. To read all this
* data we need to wait for probing to settle.
*/
- device_lock(dev);
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
if (nd_region->ndr_mappings) {
@@ -348,7 +348,7 @@ static ssize_t set_cookie_show(struct device *dev,
}
}
nvdimm_bus_unlock(dev);
- device_unlock(dev);
+ nd_device_unlock(dev);
if (rc)
return rc;
@@ -424,10 +424,12 @@ static ssize_t available_size_show(struct device *dev,
* memory nvdimm_bus_lock() is dropped, but that's userspace's
* problem to not race itself.
*/
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_available_dpa(nd_region);
nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
@@ -439,10 +441,12 @@ static ssize_t max_available_extent_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
+ nd_device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_allocatable_dpa(nd_region);
nvdimm_bus_unlock(dev);
+ nd_device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
@@ -561,12 +565,12 @@ static ssize_t region_badblocks_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
- device_lock(dev);
+ nd_device_lock(dev);
if (dev->driver)
rc = badblocks_show(&nd_region->bb, buf, 0);
else
rc = -ENXIO;
- device_unlock(dev);
+ nd_device_unlock(dev);
return rc;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index cc09b81fc7f4..8f3fbe5ca937 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2311,17 +2311,15 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
}
-static void __nvme_release_subsystem(struct nvme_subsystem *subsys)
+static void nvme_release_subsystem(struct device *dev)
{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
kfree(subsys);
}
-static void nvme_release_subsystem(struct device *dev)
-{
- __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev));
-}
-
static void nvme_destroy_subsystem(struct kref *ref)
{
struct nvme_subsystem *subsys =
@@ -2477,7 +2475,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
mutex_lock(&nvme_subsystems_lock);
found = __nvme_find_get_subsystem(subsys->subnqn);
if (found) {
- __nvme_release_subsystem(subsys);
+ put_device(&subsys->dev);
subsys = found;
if (!nvme_validate_cntlid(subsys, ctrl, id)) {
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index a9a927677970..4f0d0d12744e 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -12,11 +12,6 @@ module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
-inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
-{
- return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
-}
-
/*
* If multipathing is enabled we need to always use the subsystem instance
* number for numbering our devices to avoid conflicts between subsystems that
@@ -622,7 +617,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
int error;
- if (!nvme_ctrl_use_ana(ctrl))
+ /* check if multipath is enabled and we have the capability */
+ if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
return 0;
ctrl->anacap = id->anacap;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 716a876119c8..26b563f9985b 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -485,7 +485,11 @@ extern const struct attribute_group *nvme_ns_id_attr_groups[];
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
-bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl);
+static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+ return ctrl->ana_log_buf != NULL;
+}
+
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
void nvme_failover_req(struct request *req);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index bb970ca82517..db160cee42ad 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2254,9 +2254,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
if (!dev->ctrl.tagset) {
dev->tagset.ops = &nvme_mq_ops;
dev->tagset.nr_hw_queues = dev->online_queues - 1;
- dev->tagset.nr_maps = 1; /* default */
- if (dev->io_queues[HCTX_TYPE_READ])
- dev->tagset.nr_maps++;
+ dev->tagset.nr_maps = 2; /* default + read */
if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.nr_maps++;
dev->tagset.timeout = NVME_IO_TIMEOUT;
@@ -3029,6 +3027,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
.driver_data = NVME_QUIRK_LIGHTNVM, },
+ { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 2d06b8095a19..df352b334ea7 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -723,8 +723,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
cpu_pm_pmu_setup(armpmu, cmd);
break;
case CPU_PM_EXIT:
- cpu_pm_pmu_setup(armpmu, cmd);
case CPU_PM_ENTER_FAILED:
+ cpu_pm_pmu_setup(armpmu, cmd);
armpmu->start(armpmu);
break;
default:
diff --git a/drivers/platform/olpc/olpc-xo175-ec.c b/drivers/platform/olpc/olpc-xo175-ec.c
index 48d6f0d87583..83ed1fbf73cf 100644
--- a/drivers/platform/olpc/olpc-xo175-ec.c
+++ b/drivers/platform/olpc/olpc-xo175-ec.c
@@ -736,6 +736,12 @@ static const struct of_device_id olpc_xo175_ec_of_match[] = {
};
MODULE_DEVICE_TABLE(of, olpc_xo175_ec_of_match);
+static const struct spi_device_id olpc_xo175_ec_id_table[] = {
+ { "xo1.75-ec", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, olpc_xo175_ec_id_table);
+
static struct spi_driver olpc_xo175_ec_spi_driver = {
.driver = {
.name = "olpc-xo175-ec",
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
index 235c0b89f824..c510d0d72475 100644
--- a/drivers/platform/x86/intel_pmc_core.c
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -812,6 +812,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
INTEL_CPU_FAM6(KABYLAKE_DESKTOP, spt_reg_map),
INTEL_CPU_FAM6(CANNONLAKE_MOBILE, cnp_reg_map),
INTEL_CPU_FAM6(ICELAKE_MOBILE, icl_reg_map),
+ INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map),
{}
};
diff --git a/drivers/platform/x86/pcengines-apuv2.c b/drivers/platform/x86/pcengines-apuv2.c
index b0d3110ae378..e4c68efac0c2 100644
--- a/drivers/platform/x86/pcengines-apuv2.c
+++ b/drivers/platform/x86/pcengines-apuv2.c
@@ -93,7 +93,7 @@ static struct gpiod_lookup_table gpios_led_table = {
static struct gpio_keys_button apu2_keys_buttons[] = {
{
- .code = KEY_SETUP,
+ .code = KEY_RESTART,
.active_low = 1,
.desc = "front button",
.type = EV_KEY,
@@ -255,6 +255,4 @@ MODULE_DESCRIPTION("PC Engines APUv2/APUv3 board GPIO/LED/keys driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(dmi, apu_gpio_dmi_table);
MODULE_ALIAS("platform:pcengines-apuv2");
-MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME);
-MODULE_SOFTDEP("pre: platform:leds-gpio");
-MODULE_SOFTDEP("pre: platform:gpio_keys_polled");
+MODULE_SOFTDEP("pre: platform:" AMD_FCH_GPIO_DRIVER_NAME " platform:leds-gpio platform:gpio_keys_polled");
diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c
index 9fd6dd342169..6df481896b5f 100644
--- a/drivers/powercap/intel_rapl_common.c
+++ b/drivers/powercap/intel_rapl_common.c
@@ -1454,7 +1454,7 @@ static void __exit rapl_exit(void)
unregister_pm_notifier(&rapl_pm_notifier);
}
-module_init(rapl_init);
+fs_initcall(rapl_init);
module_exit(rapl_exit);
MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code");
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 540e8aafc990..f808c5fa9838 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -671,7 +671,7 @@ static int __init powercap_init(void)
return class_register(&powercap_class);
}
-device_initcall(powercap_init);
+fs_initcall(powercap_init);
MODULE_DESCRIPTION("PowerCap sysfs Driver");
MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index 152053361862..989506bd90b1 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -174,14 +174,14 @@
#define AXP803_DCDC5_1140mV_STEPS 35
#define AXP803_DCDC5_1140mV_END \
(AXP803_DCDC5_1140mV_START + AXP803_DCDC5_1140mV_STEPS)
-#define AXP803_DCDC5_NUM_VOLTAGES 68
+#define AXP803_DCDC5_NUM_VOLTAGES 69
#define AXP803_DCDC6_600mV_START 0x00
#define AXP803_DCDC6_600mV_STEPS 50
#define AXP803_DCDC6_600mV_END \
(AXP803_DCDC6_600mV_START + AXP803_DCDC6_600mV_STEPS)
#define AXP803_DCDC6_1120mV_START 0x33
-#define AXP803_DCDC6_1120mV_STEPS 14
+#define AXP803_DCDC6_1120mV_STEPS 20
#define AXP803_DCDC6_1120mV_END \
(AXP803_DCDC6_1120mV_START + AXP803_DCDC6_1120mV_STEPS)
#define AXP803_DCDC6_NUM_VOLTAGES 72
@@ -240,7 +240,7 @@
#define AXP806_DCDCA_600mV_END \
(AXP806_DCDCA_600mV_START + AXP806_DCDCA_600mV_STEPS)
#define AXP806_DCDCA_1120mV_START 0x33
-#define AXP806_DCDCA_1120mV_STEPS 14
+#define AXP806_DCDCA_1120mV_STEPS 20
#define AXP806_DCDCA_1120mV_END \
(AXP806_DCDCA_1120mV_START + AXP806_DCDCA_1120mV_STEPS)
#define AXP806_DCDCA_NUM_VOLTAGES 72
@@ -774,8 +774,8 @@ static const struct regulator_linear_range axp806_dcdcd_ranges[] = {
AXP806_DCDCD_600mV_END,
20000),
REGULATOR_LINEAR_RANGE(1600000,
- AXP806_DCDCD_600mV_START,
- AXP806_DCDCD_600mV_END,
+ AXP806_DCDCD_1600mV_START,
+ AXP806_DCDCD_1600mV_END,
100000),
};
diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
index 5d067f7c2116..0c440c5e2832 100644
--- a/drivers/regulator/lp87565-regulator.c
+++ b/drivers/regulator/lp87565-regulator.c
@@ -163,7 +163,7 @@ static int lp87565_regulator_probe(struct platform_device *pdev)
struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent);
struct regulator_config config = { };
struct regulator_dev *rdev;
- int i, min_idx = LP87565_BUCK_0, max_idx = LP87565_BUCK_3;
+ int i, min_idx, max_idx;
platform_set_drvdata(pdev, lp87565);
@@ -182,9 +182,9 @@ static int lp87565_regulator_probe(struct platform_device *pdev)
max_idx = LP87565_BUCK_3210;
break;
default:
- dev_err(lp87565->dev, "Invalid lp config %d\n",
- lp87565->dev_type);
- return -EINVAL;
+ min_idx = LP87565_BUCK_0;
+ max_idx = LP87565_BUCK_3;
+ break;
}
for (i = min_idx; i <= max_idx; i++) {
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 397918ebba55..9112faa6a9a0 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -416,8 +416,10 @@ device_node *regulator_of_get_init_node(struct device *dev,
if (!name)
name = child->name;
- if (!strcmp(desc->of_match, name))
+ if (!strcmp(desc->of_match, name)) {
+ of_node_put(search);
return of_node_get(child);
+ }
}
of_node_put(search);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index b9ce93e9df89..99f86612f775 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
char msg_format;
char msg_no;
+ /*
+ * intrc values ENODEV, ENOLINK and EPERM
+ * will be optained from sleep_on to indicate that no
+ * IO operation can be started
+ */
+ if (cqr->intrc == -ENODEV)
+ return 1;
+
+ if (cqr->intrc == -ENOLINK)
+ return 1;
+
+ if (cqr->intrc == -EPERM)
+ return 1;
+
sense = dasd_get_sense(&cqr->irb);
if (!sense)
return 0;
@@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
lcu->flags &= ~NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
- do {
- rc = dasd_sleep_on(cqr);
- if (rc && suborder_not_supported(cqr))
- return -EOPNOTSUPP;
- } while (rc && (cqr->retries > 0));
- if (rc) {
+ rc = dasd_sleep_on(cqr);
+ if (rc && !suborder_not_supported(cqr)) {
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 8c9d412b6d33..e7cf0a1d4f71 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -398,6 +398,7 @@ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm,
}
if (dstat == 0x08)
break;
+ /* else, fall through */
case 0x04:
/* Device end interrupt. */
if ((raw = req->info) == NULL)
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 8d3370da2dfc..3e0b2f63a9d2 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -677,6 +677,7 @@ tape_generic_remove(struct ccw_device *cdev)
switch (device->tape_state) {
case TS_INIT:
tape_state_set(device, TS_NOT_OPER);
+ /* fallthrough */
case TS_NOT_OPER:
/*
* Nothing to do.
@@ -949,6 +950,7 @@ __tape_start_request(struct tape_device *device, struct tape_request *request)
break;
if (device->tape_state == TS_UNUSED)
break;
+ /* fallthrough */
default:
if (device->tape_state == TS_BLKUSE)
break;
@@ -1116,6 +1118,7 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
case -ETIMEDOUT:
DBF_LH(1, "(%08x): Request timed out\n",
device->cdev_id);
+ /* fallthrough */
case -EIO:
__tape_end_request(device, request, -EIO);
break;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 730c4e68094b..4142c85e77d8 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -319,9 +319,7 @@ static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
int retries = 0, cc;
unsigned long laob = 0;
- WARN_ON_ONCE(aob && ((queue_type(q) != QDIO_IQDIO_QFMT) ||
- !q->u.out.use_cq));
- if (q->u.out.use_cq && aob != 0) {
+ if (aob) {
fc = QDIO_SIGA_WRITEQ;
laob = aob;
}
@@ -621,9 +619,6 @@ static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q,
{
unsigned long phys_aob = 0;
- if (!q->use_cq)
- return 0;
-
if (!q->aobs[bufnr]) {
struct qaob *aob = qdio_allocate_aob();
q->aobs[bufnr] = aob;
@@ -1308,6 +1303,8 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
for_each_output_queue(irq_ptr, q, i) {
if (use_cq) {
+ if (multicast_outbound(q))
+ continue;
if (qdio_enable_async_operation(&q->u.out) < 0) {
use_cq = 0;
continue;
@@ -1553,18 +1550,19 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
/* One SIGA-W per buffer required for unicast HSI */
WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
- phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+ if (q->u.out.use_cq)
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
rc = qdio_kick_outbound_q(q, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
+ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
+ state == SLSB_CU_OUTPUT_PRIMED) {
+ /* The previous buffer is not processed yet, tack on. */
+ qperf_inc(q, fast_requeue);
} else {
- /* try to fast requeue buffers */
- get_buf_state(q, prev_buf(bufnr), &state, 0);
- if (state != SLSB_CU_OUTPUT_PRIMED)
- rc = qdio_kick_outbound_q(q, 0);
- else
- qperf_inc(q, fast_requeue);
+ rc = qdio_kick_outbound_q(q, 0);
}
/* in case of SIGA errors we must process the error immediately */
diff --git a/drivers/s390/cio/vfio_ccw_async.c b/drivers/s390/cio/vfio_ccw_async.c
index 8c1d2357ef5b..7a838e3d7c0f 100644
--- a/drivers/s390/cio/vfio_ccw_async.c
+++ b/drivers/s390/cio/vfio_ccw_async.c
@@ -70,7 +70,7 @@ static void vfio_ccw_async_region_release(struct vfio_ccw_private *private,
}
-const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
+static const struct vfio_ccw_regops vfio_ccw_async_region_ops = {
.read = vfio_ccw_async_region_read,
.write = vfio_ccw_async_region_write,
.release = vfio_ccw_async_region_release,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 1d4c893ead23..3645d1720c4b 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -72,8 +72,10 @@ static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
sizeof(*pa->pa_iova_pfn) +
sizeof(*pa->pa_pfn),
GFP_KERNEL);
- if (unlikely(!pa->pa_iova_pfn))
+ if (unlikely(!pa->pa_iova_pfn)) {
+ pa->pa_nr = 0;
return -ENOMEM;
+ }
pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
@@ -421,7 +423,7 @@ static int ccwchain_loop_tic(struct ccwchain *chain,
static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
{
struct ccwchain *chain;
- int len;
+ int len, ret;
/* Copy 2K (the most we support today) of possible CCWs */
len = copy_from_iova(cp->mdev, cp->guest_cp, cda,
@@ -448,7 +450,12 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
/* Loop for tics on this new chain. */
- return ccwchain_loop_tic(chain, cp);
+ ret = ccwchain_loop_tic(chain, cp);
+
+ if (ret)
+ ccwchain_free(chain);
+
+ return ret;
}
/* Loop for TICs. */
@@ -642,17 +649,16 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
/* Build a ccwchain for the first CCW segment */
ret = ccwchain_handle_ccw(orb->cmd.cpa, cp);
- if (ret)
- cp_free(cp);
-
- /* It is safe to force: if not set but idals used
- * ccwchain_calc_length returns an error.
- */
- cp->orb.cmd.c64 = 1;
- if (!ret)
+ if (!ret) {
cp->initialized = true;
+ /* It is safe to force: if it was not set but idals used
+ * ccwchain_calc_length would have returned an error.
+ */
+ cp->orb.cmd.c64 = 1;
+ }
+
return ret;
}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 2b90a5ecaeb9..9208c0e56c33 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -88,7 +88,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
(SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
if (scsw_is_solicited(&irb->scsw)) {
cp_update_scsw(&private->cp, &irb->scsw);
- if (is_final)
+ if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
cp_free(&private->cp);
}
mutex_lock(&private->io_mutex);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 5ea83dc4f1d7..dad2be333d82 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -152,6 +152,7 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
ap_msg->receive(aq, ap_msg, aq->reply);
break;
}
+ /* fall through */
case AP_RESPONSE_NO_PENDING_REPLY:
if (!status.queue_empty || aq->queue_count <= 0)
break;
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 12fe9deb265e..a36251d138fb 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -801,10 +801,7 @@ static int convert_response_ica(struct zcrypt_queue *zq,
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_ica(zq, reply,
outputdata, outputdatalength);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ /* fall through - wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -837,10 +834,7 @@ static int convert_response_xcrb(struct zcrypt_queue *zq,
}
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_xcrb(zq, reply, xcRB);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ /* fall through - wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
zq->online = 0;
@@ -870,7 +864,7 @@ static int convert_response_ep11_xcrb(struct zcrypt_queue *zq,
return convert_error(zq, reply);
if (msg->cprbx.cprb_ver_id == 0x04)
return convert_type86_ep11_xcrb(zq, reply, xcRB);
- /* Fall through, no break, incorrect cprb version is an unknown resp.*/
+ /* fall through - wrong cprb version is an unknown resp */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
@@ -900,10 +894,7 @@ static int convert_response_rng(struct zcrypt_queue *zq,
return -EINVAL;
if (msg->cprbx.cprb_ver_id == 0x02)
return convert_type86_rng(zq, reply, data);
- /*
- * Fall through, no break, incorrect cprb version is an unknown
- * response
- */
+ /* fall through - wrong cprb version is an unknown response */
default: /* Unknown response type, this should NEVER EVER happen */
zq->online = 0;
pr_err("Cryptographic device %02x.%04x failed and was set offline\n",
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 1a55e5942d36..957889a42d2e 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -145,6 +145,8 @@ struct airq_info {
struct airq_iv *aiv;
};
static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
+static DEFINE_MUTEX(airq_areas_lock);
+
static u8 *summary_indicators;
static inline u8 *get_summary_indicator(struct airq_info *info)
@@ -265,9 +267,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
unsigned long bit, flags;
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
+ mutex_lock(&airq_areas_lock);
if (!airq_areas[i])
airq_areas[i] = new_airq_info(i);
info = airq_areas[i];
+ mutex_unlock(&airq_areas_lock);
if (!info)
return 0;
write_lock_irqsave(&info->lock, flags);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 75f66f8ad3ea..1b92f3c19ff3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1523,10 +1523,10 @@ config SCSI_VIRTIO
source "drivers/scsi/csiostor/Kconfig"
-endif # SCSI_LOWLEVEL
-
source "drivers/scsi/pcmcia/Kconfig"
+endif # SCSI_LOWLEVEL
+
source "drivers/scsi/device_handler/Kconfig"
endmenu
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index f0066f8a1786..4971104b1817 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -40,6 +40,7 @@
#define ALUA_FAILOVER_TIMEOUT 60
#define ALUA_FAILOVER_RETRIES 5
#define ALUA_RTPG_DELAY_MSECS 5
+#define ALUA_RTPG_RETRY_DELAY 2
/* device handler flags */
#define ALUA_OPTIMIZE_STPG 0x01
@@ -682,7 +683,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
case SCSI_ACCESS_STATE_TRANSITIONING:
if (time_before(jiffies, pg->expiry)) {
/* State transition, retry */
- pg->interval = 2;
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
err = SCSI_DH_RETRY;
} else {
struct alua_dh_data *h;
@@ -807,6 +808,8 @@ static void alua_rtpg_work(struct work_struct *work)
spin_lock_irqsave(&pg->lock, flags);
pg->flags &= ~ALUA_PG_RUNNING;
pg->flags |= ALUA_PG_RUN_RTPG;
+ if (!pg->interval)
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
spin_unlock_irqrestore(&pg->lock, flags);
queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg->interval * HZ);
@@ -818,6 +821,8 @@ static void alua_rtpg_work(struct work_struct *work)
spin_lock_irqsave(&pg->lock, flags);
if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
pg->flags &= ~ALUA_PG_RUNNING;
+ if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
queue_delayed_work(kaluad_wq, &pg->rtpg_work,
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 590ec8009f52..1791a393795d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1019,7 +1019,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fcoe_fcf *fcf;
struct fcoe_fcf new;
- unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
+ unsigned long sol_tov = msecs_to_jiffies(FCOE_CTLR_SOL_TOV);
int first = 0;
int mtu_valid;
int found = 0;
@@ -2005,7 +2005,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
*/
static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
{
- return (struct fcoe_rport *)(rdata + 1);
+ return container_of(rdata, struct fcoe_rport, rdata);
}
/**
@@ -2269,7 +2269,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
*/
static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
struct sk_buff *skb,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
struct fip_header *fiph;
struct fip_desc *desc = NULL;
@@ -2277,16 +2277,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
struct fip_wwn_desc *wwn = NULL;
struct fip_vn_desc *vn = NULL;
struct fip_size_desc *size = NULL;
- struct fcoe_rport *frport;
size_t rlen;
size_t dlen;
u32 desc_mask = 0;
u32 dtype;
u8 sub;
- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
- frport = fcoe_ctlr_rport(rdata);
-
fiph = (struct fip_header *)skb->data;
frport->flags = ntohs(fiph->fip_flags);
@@ -2349,15 +2345,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
if (dlen != sizeof(struct fip_wwn_desc))
goto len_err;
wwn = (struct fip_wwn_desc *)desc;
- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ frport->rdata.ids.node_name =
+ get_unaligned_be64(&wwn->fd_wwn);
break;
case FIP_DT_VN_ID:
if (dlen != sizeof(struct fip_vn_desc))
goto len_err;
vn = (struct fip_vn_desc *)desc;
memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
- rdata->ids.port_id = ntoh24(vn->fd_fc_id);
- rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
+ frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
+ frport->rdata.ids.port_name =
+ get_unaligned_be64(&vn->fd_wwpn);
break;
case FIP_DT_FC4F:
if (dlen != sizeof(struct fip_fc4_feat))
@@ -2403,16 +2401,14 @@ static void fcoe_ctlr_vn_send_claim(struct fcoe_ctlr *fip)
/**
* fcoe_ctlr_vn_probe_req() - handle incoming VN2VN probe request.
* @fip: The FCoE controller
- * @rdata: parsed remote port with frport from the probe request
+ * @frport: parsed FCoE rport from the probe request
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
- struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
-
- if (rdata->ids.port_id != fip->port_id)
+ if (frport->rdata.ids.port_id != fip->port_id)
return;
switch (fip->state) {
@@ -2432,7 +2428,7 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
* Probe's REC bit is not set.
* If we don't reply, we will change our address.
*/
- if (fip->lp->wwpn > rdata->ids.port_name &&
+ if (fip->lp->wwpn > frport->rdata.ids.port_name &&
!(frport->flags & FIP_FL_REC_OR_P2P)) {
LIBFCOE_FIP_DBG(fip, "vn_probe_req: "
"port_id collision\n");
@@ -2456,14 +2452,14 @@ static void fcoe_ctlr_vn_probe_req(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_probe_reply() - handle incoming VN2VN probe reply.
* @fip: The FCoE controller
- * @rdata: parsed remote port with frport from the probe request
+ * @frport: parsed FCoE rport from the probe request
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
- if (rdata->ids.port_id != fip->port_id)
+ if (frport->rdata.ids.port_id != fip->port_id)
return;
switch (fip->state) {
case FIP_ST_VNMP_START:
@@ -2486,11 +2482,11 @@ static void fcoe_ctlr_vn_probe_reply(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_add() - Add a VN2VN entry to the list, based on a claim reply.
* @fip: The FCoE controller
- * @new: newly-parsed remote port with frport as a template for new rdata
+ * @new: newly-parsed FCoE rport as a template for new rdata
*
* Called with ctlr_mutex held.
*/
-static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
+static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fcoe_rport *new)
{
struct fc_lport *lport = fip->lp;
struct fc_rport_priv *rdata;
@@ -2498,7 +2494,7 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
struct fcoe_rport *frport;
u32 port_id;
- port_id = new->ids.port_id;
+ port_id = new->rdata.ids.port_id;
if (port_id == fip->port_id)
return;
@@ -2515,22 +2511,28 @@ static void fcoe_ctlr_vn_add(struct fcoe_ctlr *fip, struct fc_rport_priv *new)
rdata->disc_id = lport->disc.disc_id;
ids = &rdata->ids;
- if ((ids->port_name != -1 && ids->port_name != new->ids.port_name) ||
- (ids->node_name != -1 && ids->node_name != new->ids.node_name)) {
+ if ((ids->port_name != -1 &&
+ ids->port_name != new->rdata.ids.port_name) ||
+ (ids->node_name != -1 &&
+ ids->node_name != new->rdata.ids.node_name)) {
mutex_unlock(&rdata->rp_mutex);
LIBFCOE_FIP_DBG(fip, "vn_add rport logoff %6.6x\n", port_id);
fc_rport_logoff(rdata);
mutex_lock(&rdata->rp_mutex);
}
- ids->port_name = new->ids.port_name;
- ids->node_name = new->ids.node_name;
+ ids->port_name = new->rdata.ids.port_name;
+ ids->node_name = new->rdata.ids.node_name;
mutex_unlock(&rdata->rp_mutex);
frport = fcoe_ctlr_rport(rdata);
LIBFCOE_FIP_DBG(fip, "vn_add rport %6.6x %s state %d\n",
port_id, frport->fcoe_len ? "old" : "new",
rdata->rp_state);
- *frport = *fcoe_ctlr_rport(new);
+ frport->fcoe_len = new->fcoe_len;
+ frport->flags = new->flags;
+ frport->login_count = new->login_count;
+ memcpy(frport->enode_mac, new->enode_mac, ETH_ALEN);
+ memcpy(frport->vn_mac, new->vn_mac, ETH_ALEN);
frport->time = 0;
}
@@ -2562,16 +2564,14 @@ static int fcoe_ctlr_vn_lookup(struct fcoe_ctlr *fip, u32 port_id, u8 *mac)
/**
* fcoe_ctlr_vn_claim_notify() - handle received FIP VN2VN Claim Notification
* @fip: The FCoE controller
- * @new: newly-parsed remote port with frport as a template for new rdata
+ * @new: newly-parsed FCoE rport as a template for new rdata
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
- struct fc_rport_priv *new)
+ struct fcoe_rport *new)
{
- struct fcoe_rport *frport = fcoe_ctlr_rport(new);
-
- if (frport->flags & FIP_FL_REC_OR_P2P) {
+ if (new->flags & FIP_FL_REC_OR_P2P) {
LIBFCOE_FIP_DBG(fip, "send probe req for P2P/REC\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
@@ -2580,7 +2580,7 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
case FIP_ST_VNMP_START:
case FIP_ST_VNMP_PROBE1:
case FIP_ST_VNMP_PROBE2:
- if (new->ids.port_id == fip->port_id) {
+ if (new->rdata.ids.port_id == fip->port_id) {
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
"restart, state %d\n",
fip->state);
@@ -2589,8 +2589,8 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
break;
case FIP_ST_VNMP_CLAIM:
case FIP_ST_VNMP_UP:
- if (new->ids.port_id == fip->port_id) {
- if (new->ids.port_name > fip->lp->wwpn) {
+ if (new->rdata.ids.port_id == fip->port_id) {
+ if (new->rdata.ids.port_name > fip->lp->wwpn) {
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
"restart, port_id collision\n");
fcoe_ctlr_vn_restart(fip);
@@ -2602,15 +2602,16 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
break;
}
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: send reply to %x\n",
- new->ids.port_id);
- fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, frport->enode_mac,
- min((u32)frport->fcoe_len,
+ new->rdata.ids.port_id);
+ fcoe_ctlr_vn_send(fip, FIP_SC_VN_CLAIM_REP, new->enode_mac,
+ min((u32)new->fcoe_len,
fcoe_ctlr_fcoe_size(fip)));
fcoe_ctlr_vn_add(fip, new);
break;
default:
LIBFCOE_FIP_DBG(fip, "vn_claim_notify: "
- "ignoring claim from %x\n", new->ids.port_id);
+ "ignoring claim from %x\n",
+ new->rdata.ids.port_id);
break;
}
}
@@ -2618,15 +2619,15 @@ static void fcoe_ctlr_vn_claim_notify(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_claim_resp() - handle received Claim Response
* @fip: The FCoE controller that received the frame
- * @new: newly-parsed remote port with frport from the Claim Response
+ * @new: newly-parsed FCoE rport from the Claim Response
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
- struct fc_rport_priv *new)
+ struct fcoe_rport *new)
{
LIBFCOE_FIP_DBG(fip, "claim resp from from rport %x - state %s\n",
- new->ids.port_id, fcoe_ctlr_state(fip->state));
+ new->rdata.ids.port_id, fcoe_ctlr_state(fip->state));
if (fip->state == FIP_ST_VNMP_UP || fip->state == FIP_ST_VNMP_CLAIM)
fcoe_ctlr_vn_add(fip, new);
}
@@ -2634,28 +2635,28 @@ static void fcoe_ctlr_vn_claim_resp(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vn_beacon() - handle received beacon.
* @fip: The FCoE controller that received the frame
- * @new: newly-parsed remote port with frport from the Beacon
+ * @new: newly-parsed FCoE rport from the Beacon
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
- struct fc_rport_priv *new)
+ struct fcoe_rport *new)
{
struct fc_lport *lport = fip->lp;
struct fc_rport_priv *rdata;
struct fcoe_rport *frport;
- frport = fcoe_ctlr_rport(new);
- if (frport->flags & FIP_FL_REC_OR_P2P) {
+ if (new->flags & FIP_FL_REC_OR_P2P) {
LIBFCOE_FIP_DBG(fip, "p2p beacon while in vn2vn mode\n");
fcoe_ctlr_vn_send(fip, FIP_SC_VN_PROBE_REQ, fcoe_all_vn2vn, 0);
return;
}
- rdata = fc_rport_lookup(lport, new->ids.port_id);
+ rdata = fc_rport_lookup(lport, new->rdata.ids.port_id);
if (rdata) {
- if (rdata->ids.node_name == new->ids.node_name &&
- rdata->ids.port_name == new->ids.port_name) {
+ if (rdata->ids.node_name == new->rdata.ids.node_name &&
+ rdata->ids.port_name == new->rdata.ids.port_name) {
frport = fcoe_ctlr_rport(rdata);
+
LIBFCOE_FIP_DBG(fip, "beacon from rport %x\n",
rdata->ids.port_id);
if (!frport->time && fip->state == FIP_ST_VNMP_UP) {
@@ -2678,7 +2679,7 @@ static void fcoe_ctlr_vn_beacon(struct fcoe_ctlr *fip,
* Don't add the neighbor yet.
*/
LIBFCOE_FIP_DBG(fip, "beacon from new rport %x. sending claim notify\n",
- new->ids.port_id);
+ new->rdata.ids.port_id);
if (time_after(jiffies,
fip->sol_time + msecs_to_jiffies(FIP_VN_ANN_WAIT)))
fcoe_ctlr_vn_send_claim(fip);
@@ -2738,10 +2739,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vn2vn_subcode sub;
- struct {
- struct fc_rport_priv rdata;
- struct fcoe_rport frport;
- } buf;
+ struct fcoe_rport frport = { };
int rc, vlan_id = 0;
fiph = (struct fip_header *)skb->data;
@@ -2757,7 +2755,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
goto drop;
}
- rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
+ rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
goto drop;
@@ -2766,19 +2764,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
mutex_lock(&fip->ctlr_mutex);
switch (sub) {
case FIP_SC_VN_PROBE_REQ:
- fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
+ fcoe_ctlr_vn_probe_req(fip, &frport);
break;
case FIP_SC_VN_PROBE_REP:
- fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
+ fcoe_ctlr_vn_probe_reply(fip, &frport);
break;
case FIP_SC_VN_CLAIM_NOTIFY:
- fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
+ fcoe_ctlr_vn_claim_notify(fip, &frport);
break;
case FIP_SC_VN_CLAIM_REP:
- fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
+ fcoe_ctlr_vn_claim_resp(fip, &frport);
break;
case FIP_SC_VN_BEACON:
- fcoe_ctlr_vn_beacon(fip, &buf.rdata);
+ fcoe_ctlr_vn_beacon(fip, &frport);
break;
default:
LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
@@ -2802,22 +2800,18 @@ drop:
*/
static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
struct sk_buff *skb,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
struct fip_header *fiph;
struct fip_desc *desc = NULL;
struct fip_mac_desc *macd = NULL;
struct fip_wwn_desc *wwn = NULL;
- struct fcoe_rport *frport;
size_t rlen;
size_t dlen;
u32 desc_mask = 0;
u32 dtype;
u8 sub;
- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
- frport = fcoe_ctlr_rport(rdata);
-
fiph = (struct fip_header *)skb->data;
frport->flags = ntohs(fiph->fip_flags);
@@ -2871,7 +2865,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
if (dlen != sizeof(struct fip_wwn_desc))
goto len_err;
wwn = (struct fip_wwn_desc *)desc;
- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ frport->rdata.ids.node_name =
+ get_unaligned_be64(&wwn->fd_wwn);
break;
default:
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
@@ -2957,13 +2952,13 @@ static void fcoe_ctlr_vlan_send(struct fcoe_ctlr *fip,
/**
* fcoe_ctlr_vlan_disk_reply() - send FIP VLAN Discovery Notification.
* @fip: The FCoE controller
+ * @frport: The newly-parsed FCoE rport from the Discovery Request
*
* Called with ctlr_mutex held.
*/
static void fcoe_ctlr_vlan_disc_reply(struct fcoe_ctlr *fip,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
- struct fcoe_rport *frport = fcoe_ctlr_rport(rdata);
enum fip_vlan_subcode sub = FIP_SC_VL_NOTE;
if (fip->mode == FIP_MODE_VN2VN)
@@ -2982,22 +2977,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vlan_subcode sub;
- struct {
- struct fc_rport_priv rdata;
- struct fcoe_rport frport;
- } buf;
+ struct fcoe_rport frport = { };
int rc;
fiph = (struct fip_header *)skb->data;
sub = fiph->fip_subcode;
- rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
+ rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
goto drop;
}
mutex_lock(&fip->ctlr_mutex);
if (sub == FIP_SC_VL_REQ)
- fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
+ fcoe_ctlr_vlan_disc_reply(fip, &frport);
mutex_unlock(&fip->ctlr_mutex);
drop:
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 43a6b5350775..1bb6aada93fa 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2334,6 +2334,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
case IOACCEL2_SERV_RESPONSE_COMPLETE:
switch (c2->error_data.status) {
case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+ if (cmd)
+ cmd->result = 0;
break;
case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2483,8 +2485,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
/* check for good status */
if (likely(c2->error_data.serv_response == 0 &&
- c2->error_data.status == 0))
+ c2->error_data.status == 0)) {
+ cmd->result = 0;
return hpsa_cmd_free_and_done(h, c, cmd);
+ }
/*
* Any RAID offload error results in retry which will use
@@ -5654,6 +5658,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
return SCSI_MLQUEUE_DEVICE_BUSY;
/*
+ * This is necessary because the SML doesn't zero out this field during
+ * error recovery.
+ */
+ cmd->result = 0;
+
+ /*
* Call alternate submit routine for I/O accelerated commands.
* Retries always go down the normal I/O path.
*/
@@ -6081,8 +6091,6 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
if (idx != h->last_collision_tag) { /* Print once per tag */
dev_warn(&h->pdev->dev,
"%s: tag collision (tag=%d)\n", __func__, idx);
- if (c->scsi_cmd != NULL)
- scsi_print_command(c->scsi_cmd);
if (scmd)
scsi_print_command(scmd);
h->last_collision_tag = idx;
@@ -7798,7 +7806,7 @@ static void hpsa_free_pci_init(struct ctlr_info *h)
hpsa_disable_interrupt_mode(h); /* pci_init 2 */
/*
* call pci_disable_device before pci_release_regions per
- * Documentation/PCI/pci.rst
+ * Documentation/driver-api/pci/pci.rst
*/
pci_disable_device(h->pdev); /* pci_init 1 */
pci_release_regions(h->pdev); /* pci_init 2 */
@@ -7881,7 +7889,7 @@ clean2: /* intmode+region, pci */
clean1:
/*
* call pci_disable_device before pci_release_regions per
- * Documentation/PCI/pci.rst
+ * Documentation/driver-api/pci/pci.rst
*/
pci_disable_device(h->pdev);
pci_release_regions(h->pdev);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index acd16e0d52cf..8cdbac076a1b 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4864,8 +4864,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
- ibmvfc_free_event_pool(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_free_event_pool(vhost);
ibmvfc_free_mem(vhost);
spin_lock(&ibmvfc_driver_lock);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index e0f3852fdad1..da6e97d8dc3b 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -128,6 +128,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
+ size_t rport_priv_size = sizeof(*rdata);
lockdep_assert_held(&lport->disc.disc_mutex);
@@ -135,7 +136,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
if (rdata)
return rdata;
- rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
+ if (lport->rport_priv_size > 0)
+ rport_priv_size = lport->rport_priv_size;
+ rdata = kzalloc(rport_priv_size, GFP_KERNEL);
if (!rdata)
return NULL;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index b2339d04a700..f9f07935556e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3163,6 +3163,7 @@ fw_crash_buffer_show(struct device *cdev,
(struct megasas_instance *) shost->hostdata;
u32 size;
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ unsigned long chunk_left_bytes;
unsigned long src_addr;
unsigned long flags;
u32 buff_offset;
@@ -3186,6 +3187,8 @@ fw_crash_buffer_show(struct device *cdev,
}
size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+ chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+ size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
@@ -8763,7 +8766,7 @@ static int __init megasas_init(void)
if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
(event_log_level > MFI_EVT_CLASS_DEAD)) {
- printk(KERN_WARNING "megarid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
+ pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
event_log_level = MFI_EVT_CLASS_CRITICAL;
}
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index a32b3f0fcd15..120e3c4de8c2 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -537,7 +537,7 @@ static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
return 0;
}
-int
+static int
megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
{
u32 max_mpt_cmd, i, j;
@@ -576,7 +576,8 @@ megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
return 0;
}
-int
+
+static int
megasas_alloc_request_fusion(struct megasas_instance *instance)
{
struct fusion_context *fusion;
@@ -657,7 +658,7 @@ retry_alloc:
return 0;
}
-int
+static int
megasas_alloc_reply_fusion(struct megasas_instance *instance)
{
int i, count;
@@ -734,7 +735,7 @@ megasas_alloc_reply_fusion(struct megasas_instance *instance)
return 0;
}
-int
+static int
megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
{
int i, j, k, msix_count;
@@ -916,7 +917,7 @@ megasas_free_reply_fusion(struct megasas_instance *instance) {
* and is used as SMID of the cmd.
* SMID value range is from 1 to max_fw_cmds.
*/
-int
+static int
megasas_alloc_cmds_fusion(struct megasas_instance *instance)
{
int i;
@@ -1736,7 +1737,7 @@ static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
*
* This is the main function for initializing firmware.
*/
-u32
+static u32
megasas_init_adapter_fusion(struct megasas_instance *instance)
{
struct fusion_context *fusion;
@@ -1962,7 +1963,7 @@ megasas_fusion_stop_watchdog(struct megasas_instance *instance)
* @ext_status : ext status of cmd returned by FW
*/
-void
+static void
map_cmd_status(struct fusion_context *fusion,
struct scsi_cmnd *scmd, u8 status, u8 ext_status,
u32 data_length, u8 *sense)
@@ -2375,7 +2376,7 @@ int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
*
* Used to set the PD LBA in CDB for FP IOs
*/
-void
+static void
megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
@@ -2714,7 +2715,7 @@ megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion,
* Prepares the io_request and chain elements (sg_frame) for IO
* The IO can be for PD (Fast Path) or LD
*/
-void
+static void
megasas_build_ldio_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scp,
struct megasas_cmd_fusion *cmd)
@@ -3211,7 +3212,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance,
* Invokes helper functions to prepare request frames
* and sets flags appropriate for IO/Non-IO cmd
*/
-int
+static int
megasas_build_io_fusion(struct megasas_instance *instance,
struct scsi_cmnd *scp,
struct megasas_cmd_fusion *cmd)
@@ -3325,9 +3326,9 @@ megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
/* megasas_prepate_secondRaid1_IO
* It prepares the raid 1 second IO
*/
-void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
- struct megasas_cmd_fusion *cmd,
- struct megasas_cmd_fusion *r1_cmd)
+static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
+ struct megasas_cmd_fusion *cmd,
+ struct megasas_cmd_fusion *r1_cmd)
{
union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
struct fusion_context *fusion;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 684662888792..050c0f029ef9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2703,6 +2703,8 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
u64 required_mask, coherent_mask;
struct sysinfo s;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
if (ioc->is_mcpu_endpoint)
goto try_32bit;
@@ -2712,17 +2714,17 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
goto try_32bit;
if (ioc->dma_mask)
- coherent_mask = DMA_BIT_MASK(64);
+ coherent_mask = DMA_BIT_MASK(dma_mask);
else
coherent_mask = DMA_BIT_MASK(32);
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
dma_set_coherent_mask(&pdev->dev, coherent_mask))
goto try_32bit;
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = 64;
+ ioc->dma_mask = dma_mask;
goto out;
try_32bit:
@@ -2744,7 +2746,7 @@ static int
_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
struct pci_dev *pdev)
{
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
return -ENODEV;
}
@@ -4989,7 +4991,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
total_sz += sz;
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
- if (ioc->dma_mask == 64) {
+ if (ioc->dma_mask > 32) {
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
pci_name(ioc->pdev));
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4059655639d9..da83034d4759 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4877,7 +4877,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
ql_log(ql_log_warn, vha, 0xd049,
"Failed to allocate ct_sns request.\n");
kfree(fcport);
- fcport = NULL;
+ return NULL;
}
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9381171c2fc0..11e64b50497f 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1784,8 +1784,10 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
}
- shost->max_sectors = min_t(unsigned int, shost->max_sectors,
- dma_max_mapping_size(dev) << SECTOR_SHIFT);
+ if (dev->dma_mask) {
+ shost->max_sectors = min_t(unsigned int, shost->max_sectors,
+ dma_max_mapping_size(dev) >> SECTOR_SHIFT);
+ }
blk_queue_max_hw_sectors(q, shost->max_sectors);
if (shost->unchecked_isa_dma)
blk_queue_bounce_limit(q, BLK_BOUNCE_ISA);
diff --git a/drivers/soc/fsl/qe/qe.c b/drivers/soc/fsl/qe/qe.c
index 62c6ba17991a..c9519e62308c 100644
--- a/drivers/soc/fsl/qe/qe.c
+++ b/drivers/soc/fsl/qe/qe.c
@@ -419,7 +419,7 @@ static void qe_upload_microcode(const void *base,
/*
* Upload a microcode to the I-RAM at a specific address.
*
- * See Documentation/powerpc/qe_firmware.txt for information on QE microcode
+ * See Documentation/powerpc/qe_firmware.rst for information on QE microcode
* uploading.
*
* Currently, only version 1 is supported, so the 'version' field must be
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 6f243a90c844..840b1b8ff3dc 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -834,7 +834,8 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
/* handle all the 3-wire mode */
- if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
+ if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
+ tfr->rx_buf != ctlr->dummy_rx)
cs |= BCM2835_SPI_CS_REN;
else
cs &= ~BCM2835_SPI_CS_REN;
diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
index 41a49b93ca60..448c00e4065b 100644
--- a/drivers/spi/spi-fsl-qspi.c
+++ b/drivers/spi/spi-fsl-qspi.c
@@ -206,7 +206,7 @@ static const struct fsl_qspi_devtype_data imx6sx_data = {
};
static const struct fsl_qspi_devtype_data imx7d_data = {
- .rxfifo = SZ_512,
+ .rxfifo = SZ_128,
.txfifo = SZ_512,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index eca9d52ecf65..9eb82150666e 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -410,6 +410,12 @@ static int spi_gpio_probe(struct platform_device *pdev)
bb = &spi_gpio->bitbang;
bb->master = master;
+ /*
+ * There is some additional business, apart from driving the CS GPIO
+ * line, that we need to do on selection. This makes the local
+ * callback for chipselect always get called.
+ */
+ master->flags |= SPI_MASTER_GPIO_SS;
bb->chipselect = spi_gpio_chipselect;
bb->set_line_direction = spi_gpio_set_direction;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index fc7ab4b26880..bb6a14d1ab0f 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1457,6 +1457,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x02aa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x02ab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x02fb), LPSS_CNL_SSP },
+ /* TGL-LP */
+ { PCI_VDEVICE(INTEL, 0xa0aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0de), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0df), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0fb), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0fd), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0xa0fe), LPSS_CNL_SSP },
{ },
};
@@ -1831,14 +1839,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
status = devm_spi_register_controller(&pdev->dev, controller);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi controller\n");
- goto out_error_clock_enabled;
+ goto out_error_pm_runtime_enabled;
}
return status;
-out_error_clock_enabled:
+out_error_pm_runtime_enabled:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+
+out_error_clock_enabled:
clk_disable_unprepare(ssp->clk);
out_error_dma_irq_alloc:
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 22dd4c457d6a..c70caf4ea490 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -875,10 +875,12 @@ static u8 cxgbit_get_iscsi_dcb_priority(struct net_device *ndev, u16 local_port)
return 0;
if (caps & DCB_CAP_DCBX_VER_IEEE) {
- iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
-
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_STREAM;
ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
-
+ if (!ret) {
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+ ret = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+ }
} else if (caps & DCB_CAP_DCBX_VER_CEE) {
iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_main.c b/drivers/target/iscsi/cxgbit/cxgbit_main.c
index 343b129c2cfa..e877b917c15f 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_main.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_main.c
@@ -589,7 +589,8 @@ static void cxgbit_dcb_workfn(struct work_struct *work)
iscsi_app = &dcb_work->dcb_app;
if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
- if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
+ if ((iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_STREAM) &&
+ (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY))
goto out;
priority = iscsi_app->app.priority;
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index 213ab3cc6b80..d3446acf9bbd 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -487,6 +487,7 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev,
rapl_mmio_cpu_online, rapl_mmio_cpu_down_prep);
if (ret < 0) {
powercap_unregister_control_type(rapl_mmio_priv.control_type);
+ rapl_mmio_priv.control_type = NULL;
return ret;
}
rapl_mmio_priv.pcap_rapl_online = ret;
@@ -496,6 +497,9 @@ static int proc_thermal_rapl_add(struct pci_dev *pdev,
static void proc_thermal_rapl_remove(void)
{
+ if (IS_ERR_OR_NULL(rapl_mmio_priv.control_type))
+ return;
+
cpuhp_remove_state(rapl_mmio_priv.pcap_rapl_online);
powercap_unregister_control_type(rapl_mmio_priv.control_type);
}
diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
index cb4db1b3ca3c..5fb214e67d73 100644
--- a/drivers/tty/hvc/hvcs.c
+++ b/drivers/tty/hvc/hvcs.c
@@ -47,7 +47,7 @@
* using the 2.6 Linux kernel kref construct.
*
* For direction on installation and usage of this driver please reference
- * Documentation/powerpc/hvcs.txt.
+ * Documentation/powerpc/hvcs.rst.
*/
#include <linux/device.h>
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index fd385c8c53a5..3083dbae35f7 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1035,25 +1035,6 @@ config SERIAL_VT8500_CONSOLE
depends on SERIAL_VT8500=y
select SERIAL_CORE_CONSOLE
-config SERIAL_NETX
- tristate "NetX serial port support"
- depends on ARCH_NETX
- select SERIAL_CORE
- help
- If you have a machine based on a Hilscher NetX SoC you
- can enable its onboard serial port by enabling this option.
-
- To compile this driver as a module, choose M here: the
- module will be called netx-serial.
-
-config SERIAL_NETX_CONSOLE
- bool "Console on NetX serial port"
- depends on SERIAL_NETX=y
- select SERIAL_CORE_CONSOLE
- help
- If you have enabled the serial port on the Hilscher NetX SoC
- you can make it the console by answering Y to this option.
-
config SERIAL_OMAP
tristate "OMAP serial port support"
depends on ARCH_OMAP2PLUS
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index 7cd7cabfa6c4..15a0fccadf7e 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -59,7 +59,6 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o
-obj-$(CONFIG_SERIAL_NETX) += netx-serial.o
obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
deleted file mode 100644
index b3556863491f..000000000000
--- a/drivers/tty/serial/netx-serial.c
+++ /dev/null
@@ -1,733 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
- */
-
-#if defined(CONFIG_SERIAL_NETX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
-#define SUPPORT_SYSRQ
-#endif
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/console.h>
-#include <linux/sysrq.h>
-#include <linux/platform_device.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <mach/hardware.h>
-#include <mach/netx-regs.h>
-
-/* We've been assigned a range on the "Low-density serial ports" major */
-#define SERIAL_NX_MAJOR 204
-#define MINOR_START 170
-
-enum uart_regs {
- UART_DR = 0x00,
- UART_SR = 0x04,
- UART_LINE_CR = 0x08,
- UART_BAUDDIV_MSB = 0x0c,
- UART_BAUDDIV_LSB = 0x10,
- UART_CR = 0x14,
- UART_FR = 0x18,
- UART_IIR = 0x1c,
- UART_ILPR = 0x20,
- UART_RTS_CR = 0x24,
- UART_RTS_LEAD = 0x28,
- UART_RTS_TRAIL = 0x2c,
- UART_DRV_ENABLE = 0x30,
- UART_BRM_CR = 0x34,
- UART_RXFIFO_IRQLEVEL = 0x38,
- UART_TXFIFO_IRQLEVEL = 0x3c,
-};
-
-#define SR_FE (1<<0)
-#define SR_PE (1<<1)
-#define SR_BE (1<<2)
-#define SR_OE (1<<3)
-
-#define LINE_CR_BRK (1<<0)
-#define LINE_CR_PEN (1<<1)
-#define LINE_CR_EPS (1<<2)
-#define LINE_CR_STP2 (1<<3)
-#define LINE_CR_FEN (1<<4)
-#define LINE_CR_5BIT (0<<5)
-#define LINE_CR_6BIT (1<<5)
-#define LINE_CR_7BIT (2<<5)
-#define LINE_CR_8BIT (3<<5)
-#define LINE_CR_BITS_MASK (3<<5)
-
-#define CR_UART_EN (1<<0)
-#define CR_SIREN (1<<1)
-#define CR_SIRLP (1<<2)
-#define CR_MSIE (1<<3)
-#define CR_RIE (1<<4)
-#define CR_TIE (1<<5)
-#define CR_RTIE (1<<6)
-#define CR_LBE (1<<7)
-
-#define FR_CTS (1<<0)
-#define FR_DSR (1<<1)
-#define FR_DCD (1<<2)
-#define FR_BUSY (1<<3)
-#define FR_RXFE (1<<4)
-#define FR_TXFF (1<<5)
-#define FR_RXFF (1<<6)
-#define FR_TXFE (1<<7)
-
-#define IIR_MIS (1<<0)
-#define IIR_RIS (1<<1)
-#define IIR_TIS (1<<2)
-#define IIR_RTIS (1<<3)
-#define IIR_MASK 0xf
-
-#define RTS_CR_AUTO (1<<0)
-#define RTS_CR_RTS (1<<1)
-#define RTS_CR_COUNT (1<<2)
-#define RTS_CR_MOD2 (1<<3)
-#define RTS_CR_RTS_POL (1<<4)
-#define RTS_CR_CTS_CTR (1<<5)
-#define RTS_CR_CTS_POL (1<<6)
-#define RTS_CR_STICK (1<<7)
-
-#define UART_PORT_SIZE 0x40
-#define DRIVER_NAME "netx-uart"
-
-struct netx_port {
- struct uart_port port;
-};
-
-static void netx_stop_tx(struct uart_port *port)
-{
- unsigned int val;
- val = readl(port->membase + UART_CR);
- writel(val & ~CR_TIE, port->membase + UART_CR);
-}
-
-static void netx_stop_rx(struct uart_port *port)
-{
- unsigned int val;
- val = readl(port->membase + UART_CR);
- writel(val & ~CR_RIE, port->membase + UART_CR);
-}
-
-static void netx_enable_ms(struct uart_port *port)
-{
- unsigned int val;
- val = readl(port->membase + UART_CR);
- writel(val | CR_MSIE, port->membase + UART_CR);
-}
-
-static inline void netx_transmit_buffer(struct uart_port *port)
-{
- struct circ_buf *xmit = &port->state->xmit;
-
- if (port->x_char) {
- writel(port->x_char, port->membase + UART_DR);
- port->icount.tx++;
- port->x_char = 0;
- return;
- }
-
- if (uart_tx_stopped(port) || uart_circ_empty(xmit)) {
- netx_stop_tx(port);
- return;
- }
-
- do {
- /* send xmit->buf[xmit->tail]
- * out the port here */
- writel(xmit->buf[xmit->tail], port->membase + UART_DR);
- xmit->tail = (xmit->tail + 1) &
- (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- if (uart_circ_empty(xmit))
- break;
- } while (!(readl(port->membase + UART_FR) & FR_TXFF));
-
- if (uart_circ_empty(xmit))
- netx_stop_tx(port);
-}
-
-static void netx_start_tx(struct uart_port *port)
-{
- writel(
- readl(port->membase + UART_CR) | CR_TIE, port->membase + UART_CR);
-
- if (!(readl(port->membase + UART_FR) & FR_TXFF))
- netx_transmit_buffer(port);
-}
-
-static unsigned int netx_tx_empty(struct uart_port *port)
-{
- return readl(port->membase + UART_FR) & FR_BUSY ? 0 : TIOCSER_TEMT;
-}
-
-static void netx_txint(struct uart_port *port)
-{
- struct circ_buf *xmit = &port->state->xmit;
-
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- netx_stop_tx(port);
- return;
- }
-
- netx_transmit_buffer(port);
-
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(port);
-}
-
-static void netx_rxint(struct uart_port *port, unsigned long *flags)
-{
- unsigned char rx, flg, status;
-
- while (!(readl(port->membase + UART_FR) & FR_RXFE)) {
- rx = readl(port->membase + UART_DR);
- flg = TTY_NORMAL;
- port->icount.rx++;
- status = readl(port->membase + UART_SR);
- if (status & SR_BE) {
- writel(0, port->membase + UART_SR);
- if (uart_handle_break(port))
- continue;
- }
-
- if (unlikely(status & (SR_FE | SR_PE | SR_OE))) {
-
- if (status & SR_PE)
- port->icount.parity++;
- else if (status & SR_FE)
- port->icount.frame++;
- if (status & SR_OE)
- port->icount.overrun++;
-
- status &= port->read_status_mask;
-
- if (status & SR_BE)
- flg = TTY_BREAK;
- else if (status & SR_PE)
- flg = TTY_PARITY;
- else if (status & SR_FE)
- flg = TTY_FRAME;
- }
-
- if (uart_handle_sysrq_char(port, rx))
- continue;
-
- uart_insert_char(port, status, SR_OE, rx, flg);
- }
-
- spin_unlock_irqrestore(&port->lock, *flags);
- tty_flip_buffer_push(&port->state->port);
- spin_lock_irqsave(&port->lock, *flags);
-}
-
-static irqreturn_t netx_int(int irq, void *dev_id)
-{
- struct uart_port *port = dev_id;
- unsigned long flags;
- unsigned char status;
-
- spin_lock_irqsave(&port->lock,flags);
-
- status = readl(port->membase + UART_IIR) & IIR_MASK;
- while (status) {
- if (status & IIR_RIS)
- netx_rxint(port, &flags);
- if (status & IIR_TIS)
- netx_txint(port);
- if (status & IIR_MIS) {
- if (readl(port->membase + UART_FR) & FR_CTS)
- uart_handle_cts_change(port, 1);
- else
- uart_handle_cts_change(port, 0);
- }
- writel(0, port->membase + UART_IIR);
- status = readl(port->membase + UART_IIR) & IIR_MASK;
- }
-
- spin_unlock_irqrestore(&port->lock,flags);
- return IRQ_HANDLED;
-}
-
-static unsigned int netx_get_mctrl(struct uart_port *port)
-{
- unsigned int ret = TIOCM_DSR | TIOCM_CAR;
-
- if (readl(port->membase + UART_FR) & FR_CTS)
- ret |= TIOCM_CTS;
-
- return ret;
-}
-
-static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl)
-{
- unsigned int val;
-
- /* FIXME: Locking needed ? */
- if (mctrl & TIOCM_RTS) {
- val = readl(port->membase + UART_RTS_CR);
- writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR);
- }
-}
-
-static void netx_break_ctl(struct uart_port *port, int break_state)
-{
- unsigned int line_cr;
- spin_lock_irq(&port->lock);
-
- line_cr = readl(port->membase + UART_LINE_CR);
- if (break_state != 0)
- line_cr |= LINE_CR_BRK;
- else
- line_cr &= ~LINE_CR_BRK;
- writel(line_cr, port->membase + UART_LINE_CR);
-
- spin_unlock_irq(&port->lock);
-}
-
-static int netx_startup(struct uart_port *port)
-{
- int ret;
-
- ret = request_irq(port->irq, netx_int, 0,
- DRIVER_NAME, port);
- if (ret) {
- dev_err(port->dev, "unable to grab irq%d\n",port->irq);
- goto exit;
- }
-
- writel(readl(port->membase + UART_LINE_CR) | LINE_CR_FEN,
- port->membase + UART_LINE_CR);
-
- writel(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE | CR_UART_EN,
- port->membase + UART_CR);
-
-exit:
- return ret;
-}
-
-static void netx_shutdown(struct uart_port *port)
-{
- writel(0, port->membase + UART_CR) ;
-
- free_irq(port->irq, port);
-}
-
-static void
-netx_set_termios(struct uart_port *port, struct ktermios *termios,
- struct ktermios *old)
-{
- unsigned int baud, quot;
- unsigned char old_cr;
- unsigned char line_cr = LINE_CR_FEN;
- unsigned char rts_cr = 0;
-
- switch (termios->c_cflag & CSIZE) {
- case CS5:
- line_cr |= LINE_CR_5BIT;
- break;
- case CS6:
- line_cr |= LINE_CR_6BIT;
- break;
- case CS7:
- line_cr |= LINE_CR_7BIT;
- break;
- case CS8:
- line_cr |= LINE_CR_8BIT;
- break;
- }
-
- if (termios->c_cflag & CSTOPB)
- line_cr |= LINE_CR_STP2;
-
- if (termios->c_cflag & PARENB) {
- line_cr |= LINE_CR_PEN;
- if (!(termios->c_cflag & PARODD))
- line_cr |= LINE_CR_EPS;
- }
-
- if (termios->c_cflag & CRTSCTS)
- rts_cr = RTS_CR_AUTO | RTS_CR_CTS_CTR | RTS_CR_RTS_POL;
-
- baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
- quot = baud * 4096;
- quot /= 1000;
- quot *= 256;
- quot /= 100000;
-
- spin_lock_irq(&port->lock);
-
- uart_update_timeout(port, termios->c_cflag, baud);
-
- old_cr = readl(port->membase + UART_CR);
-
- /* disable interrupts */
- writel(old_cr & ~(CR_MSIE | CR_RIE | CR_TIE | CR_RTIE),
- port->membase + UART_CR);
-
- /* drain transmitter */
- while (readl(port->membase + UART_FR) & FR_BUSY);
-
- /* disable UART */
- writel(old_cr & ~CR_UART_EN, port->membase + UART_CR);
-
- /* modem status interrupts */
- old_cr &= ~CR_MSIE;
- if (UART_ENABLE_MS(port, termios->c_cflag))
- old_cr |= CR_MSIE;
-
- writel((quot>>8) & 0xff, port->membase + UART_BAUDDIV_MSB);
- writel(quot & 0xff, port->membase + UART_BAUDDIV_LSB);
- writel(line_cr, port->membase + UART_LINE_CR);
-
- writel(rts_cr, port->membase + UART_RTS_CR);
-
- /*
- * Characters to ignore
- */
- port->ignore_status_mask = 0;
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= SR_PE;
- if (termios->c_iflag & IGNBRK) {
- port->ignore_status_mask |= SR_BE;
- /*
- * If we're ignoring parity and break indicators,
- * ignore overruns too (for real raw support).
- */
- if (termios->c_iflag & IGNPAR)
- port->ignore_status_mask |= SR_PE;
- }
-
- port->read_status_mask = 0;
- if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
- port->read_status_mask |= SR_BE;
- if (termios->c_iflag & INPCK)
- port->read_status_mask |= SR_PE | SR_FE;
-
- writel(old_cr, port->membase + UART_CR);
-
- spin_unlock_irq(&port->lock);
-}
-
-static const char *netx_type(struct uart_port *port)
-{
- return port->type == PORT_NETX ? "NETX" : NULL;
-}
-
-static void netx_release_port(struct uart_port *port)
-{
- release_mem_region(port->mapbase, UART_PORT_SIZE);
-}
-
-static int netx_request_port(struct uart_port *port)
-{
- return request_mem_region(port->mapbase, UART_PORT_SIZE,
- DRIVER_NAME) != NULL ? 0 : -EBUSY;
-}
-
-static void netx_config_port(struct uart_port *port, int flags)
-{
- if (flags & UART_CONFIG_TYPE && netx_request_port(port) == 0)
- port->type = PORT_NETX;
-}
-
-static int
-netx_verify_port(struct uart_port *port, struct serial_struct *ser)
-{
- int ret = 0;
-
- if (ser->type != PORT_UNKNOWN && ser->type != PORT_NETX)
- ret = -EINVAL;
-
- return ret;
-}
-
-static struct uart_ops netx_pops = {
- .tx_empty = netx_tx_empty,
- .set_mctrl = netx_set_mctrl,
- .get_mctrl = netx_get_mctrl,
- .stop_tx = netx_stop_tx,
- .start_tx = netx_start_tx,
- .stop_rx = netx_stop_rx,
- .enable_ms = netx_enable_ms,
- .break_ctl = netx_break_ctl,
- .startup = netx_startup,
- .shutdown = netx_shutdown,
- .set_termios = netx_set_termios,
- .type = netx_type,
- .release_port = netx_release_port,
- .request_port = netx_request_port,
- .config_port = netx_config_port,
- .verify_port = netx_verify_port,
-};
-
-static struct netx_port netx_ports[] = {
- {
- .port = {
- .type = PORT_NETX,
- .iotype = UPIO_MEM,
- .membase = (char __iomem *)io_p2v(NETX_PA_UART0),
- .mapbase = NETX_PA_UART0,
- .irq = NETX_IRQ_UART0,
- .uartclk = 100000000,
- .fifosize = 16,
- .flags = UPF_BOOT_AUTOCONF,
- .ops = &netx_pops,
- .line = 0,
- },
- }, {
- .port = {
- .type = PORT_NETX,
- .iotype = UPIO_MEM,
- .membase = (char __iomem *)io_p2v(NETX_PA_UART1),
- .mapbase = NETX_PA_UART1,
- .irq = NETX_IRQ_UART1,
- .uartclk = 100000000,
- .fifosize = 16,
- .flags = UPF_BOOT_AUTOCONF,
- .ops = &netx_pops,
- .line = 1,
- },
- }, {
- .port = {
- .type = PORT_NETX,
- .iotype = UPIO_MEM,
- .membase = (char __iomem *)io_p2v(NETX_PA_UART2),
- .mapbase = NETX_PA_UART2,
- .irq = NETX_IRQ_UART2,
- .uartclk = 100000000,
- .fifosize = 16,
- .flags = UPF_BOOT_AUTOCONF,
- .ops = &netx_pops,
- .line = 2,
- },
- }
-};
-
-#ifdef CONFIG_SERIAL_NETX_CONSOLE
-
-static void netx_console_putchar(struct uart_port *port, int ch)
-{
- while (readl(port->membase + UART_FR) & FR_BUSY);
- writel(ch, port->membase + UART_DR);
-}
-
-static void
-netx_console_write(struct console *co, const char *s, unsigned int count)
-{
- struct uart_port *port = &netx_ports[co->index].port;
- unsigned char cr_save;
-
- cr_save = readl(port->membase + UART_CR);
- writel(cr_save | CR_UART_EN, port->membase + UART_CR);
-
- uart_console_write(port, s, count, netx_console_putchar);
-
- while (readl(port->membase + UART_FR) & FR_BUSY);
- writel(cr_save, port->membase + UART_CR);
-}
-
-static void __init
-netx_console_get_options(struct uart_port *port, int *baud,
- int *parity, int *bits, int *flow)
-{
- unsigned char line_cr;
-
- *baud = (readl(port->membase + UART_BAUDDIV_MSB) << 8) |
- readl(port->membase + UART_BAUDDIV_LSB);
- *baud *= 1000;
- *baud /= 4096;
- *baud *= 1000;
- *baud /= 256;
- *baud *= 100;
-
- line_cr = readl(port->membase + UART_LINE_CR);
- *parity = 'n';
- if (line_cr & LINE_CR_PEN) {
- if (line_cr & LINE_CR_EPS)
- *parity = 'e';
- else
- *parity = 'o';
- }
-
- switch (line_cr & LINE_CR_BITS_MASK) {
- case LINE_CR_8BIT:
- *bits = 8;
- break;
- case LINE_CR_7BIT:
- *bits = 7;
- break;
- case LINE_CR_6BIT:
- *bits = 6;
- break;
- case LINE_CR_5BIT:
- *bits = 5;
- break;
- }
-
- if (readl(port->membase + UART_RTS_CR) & RTS_CR_AUTO)
- *flow = 'r';
-}
-
-static int __init
-netx_console_setup(struct console *co, char *options)
-{
- struct netx_port *sport;
- int baud = 9600;
- int bits = 8;
- int parity = 'n';
- int flow = 'n';
-
- /*
- * Check whether an invalid uart number has been specified, and
- * if so, search for the first available port that does have
- * console support.
- */
- if (co->index == -1 || co->index >= ARRAY_SIZE(netx_ports))
- co->index = 0;
- sport = &netx_ports[co->index];
-
- if (options) {
- uart_parse_options(options, &baud, &parity, &bits, &flow);
- } else {
- /* if the UART is enabled, assume it has been correctly setup
- * by the bootloader and get the options
- */
- if (readl(sport->port.membase + UART_CR) & CR_UART_EN) {
- netx_console_get_options(&sport->port, &baud,
- &parity, &bits, &flow);
- }
-
- }
-
- return uart_set_options(&sport->port, co, baud, parity, bits, flow);
-}
-
-static struct uart_driver netx_reg;
-static struct console netx_console = {
- .name = "ttyNX",
- .write = netx_console_write,
- .device = uart_console_device,
- .setup = netx_console_setup,
- .flags = CON_PRINTBUFFER,
- .index = -1,
- .data = &netx_reg,
-};
-
-static int __init netx_console_init(void)
-{
- register_console(&netx_console);
- return 0;
-}
-console_initcall(netx_console_init);
-
-#define NETX_CONSOLE &netx_console
-#else
-#define NETX_CONSOLE NULL
-#endif
-
-static struct uart_driver netx_reg = {
- .owner = THIS_MODULE,
- .driver_name = DRIVER_NAME,
- .dev_name = "ttyNX",
- .major = SERIAL_NX_MAJOR,
- .minor = MINOR_START,
- .nr = ARRAY_SIZE(netx_ports),
- .cons = NETX_CONSOLE,
-};
-
-static int serial_netx_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct netx_port *sport = platform_get_drvdata(pdev);
-
- if (sport)
- uart_suspend_port(&netx_reg, &sport->port);
-
- return 0;
-}
-
-static int serial_netx_resume(struct platform_device *pdev)
-{
- struct netx_port *sport = platform_get_drvdata(pdev);
-
- if (sport)
- uart_resume_port(&netx_reg, &sport->port);
-
- return 0;
-}
-
-static int serial_netx_probe(struct platform_device *pdev)
-{
- struct uart_port *port = &netx_ports[pdev->id].port;
-
- dev_info(&pdev->dev, "initialising\n");
-
- port->dev = &pdev->dev;
-
- writel(1, port->membase + UART_RXFIFO_IRQLEVEL);
- uart_add_one_port(&netx_reg, &netx_ports[pdev->id].port);
- platform_set_drvdata(pdev, &netx_ports[pdev->id]);
-
- return 0;
-}
-
-static int serial_netx_remove(struct platform_device *pdev)
-{
- struct netx_port *sport = platform_get_drvdata(pdev);
-
- if (sport)
- uart_remove_one_port(&netx_reg, &sport->port);
-
- return 0;
-}
-
-static struct platform_driver serial_netx_driver = {
- .probe = serial_netx_probe,
- .remove = serial_netx_remove,
-
- .suspend = serial_netx_suspend,
- .resume = serial_netx_resume,
-
- .driver = {
- .name = DRIVER_NAME,
- },
-};
-
-static int __init netx_serial_init(void)
-{
- int ret;
-
- printk(KERN_INFO "Serial: NetX driver\n");
-
- ret = uart_register_driver(&netx_reg);
- if (ret)
- return ret;
-
- ret = platform_driver_register(&serial_netx_driver);
- if (ret != 0)
- uart_unregister_driver(&netx_reg);
-
- return 0;
-}
-
-static void __exit netx_serial_exit(void)
-{
- platform_driver_unregister(&serial_netx_driver);
- uart_unregister_driver(&netx_reg);
-}
-
-module_init(netx_serial_init);
-module_exit(netx_serial_exit);
-
-MODULE_AUTHOR("Sascha Hauer");
-MODULE_DESCRIPTION("NetX serial port driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 717292c1c0df..60ff236a3d63 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
tsk = waiter->task;
- smp_mb();
- waiter->task = NULL;
+ smp_store_release(&waiter->task, NULL);
wake_up_process(tsk);
put_task_struct(tsk);
}
@@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (!waiter.task)
+ if (!smp_load_acquire(&waiter.task))
break;
if (!timeout)
break;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index ec92f36ab5c4..34aa39d1aed9 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3771,7 +3771,11 @@ static ssize_t show_bind(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct con_driver *con = dev_get_drvdata(dev);
- int bind = con_is_bound(con->con);
+ int bind;
+
+ console_lock();
+ bind = con_is_bound(con->con);
+ console_unlock();
return snprintf(buf, PAGE_SIZE, "%i\n", bind);
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 88533938ce19..9320787ac2e6 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -3052,8 +3052,8 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr,
local_mem = devm_memremap(hcd->self.sysdev, phys_addr,
size, MEMREMAP_WC);
- if (!local_mem)
- return -ENOMEM;
+ if (IS_ERR(local_mem))
+ return PTR_ERR(local_mem);
/*
* Here we pass a dma_addr_t but the arg type is a phys_addr_t.
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index fe9422d3bcdc..b0882c13a1d1 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -149,7 +149,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
case PCI_VENDOR_ID_AMD:
/* AMD PLL quirk */
- if (usb_amd_find_chipset_info())
+ if (usb_amd_quirk_pll_check())
ehci->amd_pll_fix = 1;
/* AMD8111 EHCI doesn't work, according to AMD errata */
if (pdev->device == 0x7463) {
@@ -186,7 +186,7 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
case PCI_VENDOR_ID_ATI:
/* AMD PLL quirk */
- if (usb_amd_find_chipset_info())
+ if (usb_amd_quirk_pll_check())
ehci->amd_pll_fix = 1;
/*
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 09a8ebd95588..6968b9f2b76b 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -159,7 +159,7 @@ out:
return result;
error_set_cluster_id:
- wusb_cluster_id_put(wusbhc->cluster_id);
+ wusb_cluster_id_put(addr);
error_cluster_id_get:
goto out;
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index a033f7d855e0..f4e13a3fddee 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -152,7 +152,7 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- if (usb_amd_find_chipset_info())
+ if (usb_amd_quirk_pll_check())
ohci->flags |= OHCI_QUIRK_AMD_PLL;
/* SB800 needs pre-fetch fix */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 3ce71cbfbb58..f6d04491df60 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -132,7 +132,7 @@ static struct amd_chipset_info {
struct amd_chipset_type sb_type;
int isoc_reqs;
int probe_count;
- int probe_result;
+ bool need_pll_quirk;
} amd_chipset;
static DEFINE_SPINLOCK(amd_lock);
@@ -201,11 +201,11 @@ void sb800_prefetch(struct device *dev, int on)
}
EXPORT_SYMBOL_GPL(sb800_prefetch);
-int usb_amd_find_chipset_info(void)
+static void usb_amd_find_chipset_info(void)
{
unsigned long flags;
struct amd_chipset_info info;
- int ret;
+ info.need_pll_quirk = 0;
spin_lock_irqsave(&amd_lock, flags);
@@ -213,27 +213,34 @@ int usb_amd_find_chipset_info(void)
if (amd_chipset.probe_count > 0) {
amd_chipset.probe_count++;
spin_unlock_irqrestore(&amd_lock, flags);
- return amd_chipset.probe_result;
+ return;
}
memset(&info, 0, sizeof(info));
spin_unlock_irqrestore(&amd_lock, flags);
if (!amd_chipset_sb_type_init(&info)) {
- ret = 0;
goto commit;
}
- /* Below chipset generations needn't enable AMD PLL quirk */
- if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
- info.sb_type.gen == AMD_CHIPSET_SB600 ||
- info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
- (info.sb_type.gen == AMD_CHIPSET_SB700 &&
- info.sb_type.rev > 0x3b)) {
+ switch (info.sb_type.gen) {
+ case AMD_CHIPSET_SB700:
+ info.need_pll_quirk = info.sb_type.rev <= 0x3B;
+ break;
+ case AMD_CHIPSET_SB800:
+ case AMD_CHIPSET_HUDSON2:
+ case AMD_CHIPSET_BOLTON:
+ info.need_pll_quirk = 1;
+ break;
+ default:
+ info.need_pll_quirk = 0;
+ break;
+ }
+
+ if (!info.need_pll_quirk) {
if (info.smbus_dev) {
pci_dev_put(info.smbus_dev);
info.smbus_dev = NULL;
}
- ret = 0;
goto commit;
}
@@ -252,7 +259,6 @@ int usb_amd_find_chipset_info(void)
}
}
- ret = info.probe_result = 1;
printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
commit:
@@ -263,7 +269,6 @@ commit:
/* Mark that we where here */
amd_chipset.probe_count++;
- ret = amd_chipset.probe_result;
spin_unlock_irqrestore(&amd_lock, flags);
@@ -276,10 +281,7 @@ commit:
amd_chipset = info;
spin_unlock_irqrestore(&amd_lock, flags);
}
-
- return ret;
}
-EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev)
{
@@ -315,6 +317,13 @@ bool usb_amd_prefetch_quirk(void)
}
EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk);
+bool usb_amd_quirk_pll_check(void)
+{
+ usb_amd_find_chipset_info();
+ return amd_chipset.need_pll_quirk;
+}
+EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_check);
+
/*
* The hardware normally enables the A-link power management feature, which
* lets the system lower the power consumption in idle states.
@@ -520,7 +529,7 @@ void usb_amd_dev_put(void)
amd_chipset.nb_type = 0;
memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type));
amd_chipset.isoc_reqs = 0;
- amd_chipset.probe_result = 0;
+ amd_chipset.need_pll_quirk = 0;
spin_unlock_irqrestore(&amd_lock, flags);
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 63c633077d9e..e729de21fad7 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -5,11 +5,11 @@
#ifdef CONFIG_USB_PCI
void uhci_reset_hc(struct pci_dev *pdev, unsigned long base);
int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base);
-int usb_amd_find_chipset_info(void);
int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev);
bool usb_amd_hang_symptom_quirk(void);
bool usb_amd_prefetch_quirk(void);
void usb_amd_dev_put(void);
+bool usb_amd_quirk_pll_check(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c2fe218e051f..1e0236e90687 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -130,7 +130,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_AMD_0x96_HOST;
/* AMD PLL quirk */
- if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
+ if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_quirk_pll_check())
xhci->quirks |= XHCI_AMD_PLL_FIX;
if (pdev->vendor == PCI_VENDOR_ID_AMD &&
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7a264962a1a9..f5c41448d067 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -2175,7 +2175,8 @@ static inline bool xhci_urb_suitable_for_idt(struct urb *urb)
if (!usb_endpoint_xfer_isoc(&urb->ep->desc) && usb_urb_dir_out(urb) &&
usb_endpoint_maxp(&urb->ep->desc) >= TRB_IDT_MAX_SIZE &&
urb->transfer_buffer_length <= TRB_IDT_MAX_SIZE &&
- !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP))
+ !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) &&
+ !urb->num_sgs)
return true;
return false;
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4d6ae3795a88..6ca9111d150a 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -375,7 +375,8 @@ out_err:
#ifdef CONFIG_OF
static void usb251xb_get_ports_field(struct usb251xb *hub,
- const char *prop_name, u8 port_cnt, u8 *fld)
+ const char *prop_name, u8 port_cnt,
+ bool ds_only, u8 *fld)
{
struct device *dev = hub->dev;
struct property *prop;
@@ -383,7 +384,7 @@ static void usb251xb_get_ports_field(struct usb251xb *hub,
u32 port;
of_property_for_each_u32(dev->of_node, prop_name, prop, p, port) {
- if ((port >= 1) && (port <= port_cnt))
+ if ((port >= ds_only ? 1 : 0) && (port <= port_cnt))
*fld |= BIT(port);
else
dev_warn(dev, "port %u doesn't exist\n", port);
@@ -501,15 +502,15 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
hub->non_rem_dev = USB251XB_DEF_NON_REMOVABLE_DEVICES;
usb251xb_get_ports_field(hub, "non-removable-ports", data->port_cnt,
- &hub->non_rem_dev);
+ true, &hub->non_rem_dev);
hub->port_disable_sp = USB251XB_DEF_PORT_DISABLE_SELF;
usb251xb_get_ports_field(hub, "sp-disabled-ports", data->port_cnt,
- &hub->port_disable_sp);
+ true, &hub->port_disable_sp);
hub->port_disable_bp = USB251XB_DEF_PORT_DISABLE_BUS;
usb251xb_get_ports_field(hub, "bp-disabled-ports", data->port_cnt,
- &hub->port_disable_bp);
+ true, &hub->port_disable_bp);
hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
if (!of_property_read_u32(np, "sp-max-total-current-microamp",
@@ -573,9 +574,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
*/
hub->port_swap = USB251XB_DEF_PORT_SWAP;
usb251xb_get_ports_field(hub, "swap-dx-lanes", data->port_cnt,
- &hub->port_swap);
- if (of_get_property(np, "swap-us-lanes", NULL))
- hub->port_swap |= BIT(0);
+ false, &hub->port_swap);
/* The following parameters are currently not exposed to devicetree, but
* may be as soon as needed.
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 30790240aec6..05b80211290d 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -28,6 +28,8 @@
* status of a command.
*/
+#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -99,6 +101,7 @@ static int slave_alloc (struct scsi_device *sdev)
static int slave_configure(struct scsi_device *sdev)
{
struct us_data *us = host_to_us(sdev->host);
+ struct device *dev = us->pusb_dev->bus->sysdev;
/*
* Many devices have trouble transferring more than 32KB at a time,
@@ -129,6 +132,14 @@ static int slave_configure(struct scsi_device *sdev)
}
/*
+ * The max_hw_sectors should be up to maximum size of a mapping for
+ * the device. Otherwise, a DMA API might fail on swiotlb environment.
+ */
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ min_t(size_t, queue_max_hw_sectors(sdev->request_queue),
+ dma_max_mapping_size(dev) >> SECTOR_SHIFT));
+
+ /*
* Some USB host controllers can't do DMA; they have to use PIO.
* They indicate this by setting their dma_mask to NULL. For
* such controllers we need to make sure the block layer sets
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 819296332913..42a8c2a13ab1 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -96,7 +96,7 @@ struct vhost_uaddr {
};
#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
-#define VHOST_ARCH_CAN_ACCEL_UACCESS 1
+#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
#else
#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
#endif
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 4c339c7e66e5..a446a7221e13 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -1143,7 +1143,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto out_put_map;
if (!use_ptemod) {
- err = vm_map_pages(vma, map->pages, map->count);
+ err = vm_map_pages_zero(vma, map->pages, map->count);
if (err)
goto out_put_map;
} else {
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 2f5ce7230a43..c6070e70dd73 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -724,25 +724,6 @@ static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
return 0;
}
-struct remap_pfn {
- struct mm_struct *mm;
- struct page **pages;
- pgprot_t prot;
- unsigned long i;
-};
-
-static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
-{
- struct remap_pfn *r = data;
- struct page *page = r->pages[r->i];
- pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
-
- set_pte_at(r->mm, addr, ptep, pte);
- r->i++;
-
- return 0;
-}
-
static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
{
struct privcmd_data *data = file->private_data;
@@ -774,7 +755,8 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
goto out;
}
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
+ xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
struct page **pages;
unsigned int i;
@@ -808,16 +790,9 @@ static long privcmd_ioctl_mmap_resource(struct file *file, void __user *udata)
if (rc)
goto out;
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
- struct remap_pfn r = {
- .mm = vma->vm_mm,
- .pages = vma->vm_private_data,
- .prot = vma->vm_page_prot,
- };
-
- rc = apply_to_page_range(r.mm, kdata.addr,
- kdata.num << PAGE_SHIFT,
- remap_pfn_fn, &r);
+ if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
+ xen_feature(XENFEAT_auto_translated_physmap)) {
+ rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
} else {
unsigned int domid =
(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index cfbe46785a3b..ae1df496bf38 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -83,34 +83,18 @@ static inline dma_addr_t xen_virt_to_bus(void *address)
return xen_phys_to_bus(virt_to_phys(address));
}
-static int check_pages_physically_contiguous(unsigned long xen_pfn,
- unsigned int offset,
- size_t length)
+static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
- unsigned long next_bfn;
- int i;
- int nr_pages;
+ unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
+ unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
next_bfn = pfn_to_bfn(xen_pfn);
- nr_pages = (offset + length + XEN_PAGE_SIZE-1) >> XEN_PAGE_SHIFT;
- for (i = 1; i < nr_pages; i++) {
+ for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
- return 0;
- }
- return 1;
-}
+ return 1;
-static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
-{
- unsigned long xen_pfn = XEN_PFN_DOWN(p);
- unsigned int offset = p & ~XEN_PAGE_MASK;
-
- if (offset + size <= XEN_PAGE_SIZE)
- return 0;
- if (check_pages_physically_contiguous(xen_pfn, offset, size))
- return 0;
- return 1;
+ return 0;
}
static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
@@ -338,6 +322,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL;
}
+ SetPageXenRemapped(virt_to_page(ret));
}
memset(ret, 0, size);
return ret;
@@ -361,8 +346,9 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
- if (((dev_addr + size - 1 <= dma_mask)) ||
- range_straddles_page_boundary(phys, size))
+ if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+ range_straddles_page_boundary(phys, size)) &&
+ TestClearPageXenRemapped(virt_to_page(vaddr)))
xen_destroy_contiguous_region(phys, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 73427d8e0116..e5694133ebe5 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
{
int err;
u16 old_value;
- pci_power_t new_state, old_state;
+ pci_power_t new_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
- old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index ba883a80b3c0..7b1077f0abcb 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -262,3 +262,35 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt,
return 0;
}
EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages);
+
+struct remap_pfn {
+ struct mm_struct *mm;
+ struct page **pages;
+ pgprot_t prot;
+ unsigned long i;
+};
+
+static int remap_pfn_fn(pte_t *ptep, unsigned long addr, void *data)
+{
+ struct remap_pfn *r = data;
+ struct page *page = r->pages[r->i];
+ pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), r->prot));
+
+ set_pte_at(r->mm, addr, ptep, pte);
+ r->i++;
+
+ return 0;
+}
+
+/* Used by the privcmd module, but has to be built-in on ARM */
+int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr, unsigned long len)
+{
+ struct remap_pfn r = {
+ .mm = vma->vm_mm,
+ .pages = vma->vm_private_data,
+ .prot = vma->vm_page_prot,
+ };
+
+ return apply_to_page_range(vma->vm_mm, addr, len, remap_pfn_fn, &r);
+}
+EXPORT_SYMBOL_GPL(xen_remap_vma_range);
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 1ce73e014139..114f281f3687 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -339,8 +339,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->tmp_u = htonl(0);
afs_extract_to_tmp(call);
}
+ /* Fall through */
- /* Fall through - and extract the returned data length */
+ /* extract the returned data length */
case 1:
_debug("extract data length");
ret = afs_extract_data(call, true);
@@ -366,8 +367,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->bvec[0].bv_page = req->pages[req->index];
iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
ASSERTCMP(size, <=, PAGE_SIZE);
+ /* Fall through */
- /* Fall through - and extract the returned data */
+ /* extract the returned data */
case 2:
_debug("extract data %zu/%llu",
iov_iter_count(&call->iter), req->remain);
@@ -394,8 +396,8 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
/* Discard any excess data the server gave us */
iov_iter_discard(&call->iter, READ, req->actual_len - req->len);
call->unmarshall = 3;
-
/* Fall through */
+
case 3:
_debug("extract discard %zu/%llu",
iov_iter_count(&call->iter), req->actual_len - req->len);
@@ -407,8 +409,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
no_more_data:
call->unmarshall = 4;
afs_extract_to_buf(call, (21 + 3 + 6) * 4);
+ /* Fall through */
- /* Fall through - and extract the metadata */
+ /* extract the metadata */
case 4:
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -1471,8 +1474,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
case 0:
call->unmarshall++;
afs_extract_to_buf(call, 12 * 4);
+ /* Fall through */
- /* Fall through - and extract the returned status record */
+ /* extract the returned status record */
case 1:
_debug("extract status");
ret = afs_extract_data(call, true);
@@ -1483,8 +1487,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
xdr_decode_AFSFetchVolumeStatus(&bp, call->out_volstatus);
call->unmarshall++;
afs_extract_to_tmp(call);
+ /* Fall through */
- /* Fall through - and extract the volume name length */
+ /* extract the volume name length */
case 2:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1498,8 +1503,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the volume name */
+ /* extract the volume name */
case 3:
_debug("extract volname");
ret = afs_extract_data(call, true);
@@ -1511,8 +1517,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
_debug("volname '%s'", p);
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the offline message length */
+ /* extract the offline message length */
case 4:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1526,8 +1533,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the offline message */
+ /* extract the offline message */
case 5:
_debug("extract offline");
ret = afs_extract_data(call, true);
@@ -1540,8 +1548,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the message of the day length */
+ /* extract the message of the day length */
case 6:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1555,8 +1564,9 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the message of the day */
+ /* extract the message of the day */
case 7:
_debug("extract motd");
ret = afs_extract_data(call, false);
@@ -1850,8 +1860,9 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the capabilities word count */
+ /* Extract the capabilities word count */
case 1:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1863,8 +1874,9 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call)
call->count2 = count;
iov_iter_discard(&call->iter, READ, count * sizeof(__be32));
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract capabilities words */
+ /* Extract capabilities words */
case 2:
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -2020,9 +2032,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
/* Extract the file status count and array in two steps */
- /* Fall through */
case 1:
_debug("extract status count");
ret = afs_extract_data(call, true);
@@ -2039,8 +2051,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->unmarshall++;
more_counts:
afs_extract_to_buf(call, 21 * sizeof(__be32));
-
/* Fall through */
+
case 2:
_debug("extract status array %u", call->count);
ret = afs_extract_data(call, true);
@@ -2060,9 +2072,9 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->count = 0;
call->unmarshall++;
afs_extract_to_tmp(call);
+ /* Fall through */
/* Extract the callback count and array in two steps */
- /* Fall through */
case 3:
_debug("extract CB count");
ret = afs_extract_data(call, true);
@@ -2078,8 +2090,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->unmarshall++;
more_cbs:
afs_extract_to_buf(call, 3 * sizeof(__be32));
-
/* Fall through */
+
case 4:
_debug("extract CB array");
ret = afs_extract_data(call, true);
@@ -2096,8 +2108,8 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call)
afs_extract_to_buf(call, 6 * sizeof(__be32));
call->unmarshall++;
-
/* Fall through */
+
case 5:
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -2193,6 +2205,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
/* extract the returned data length */
case 1:
@@ -2210,6 +2223,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
acl->size = call->count2;
afs_extract_begin(call, acl->data, size);
call->unmarshall++;
+ /* Fall through */
/* extract the returned data */
case 2:
@@ -2219,6 +2233,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call)
afs_extract_to_buf(call, (21 + 6) * 4);
call->unmarshall++;
+ /* Fall through */
/* extract the metadata */
case 3:
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 18722aaeda33..2575503170fc 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -450,8 +450,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
req->offset = req->pos & (PAGE_SIZE - 1);
afs_extract_to_tmp64(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the returned data length */
+ /* extract the returned data length */
case 1:
_debug("extract data length");
ret = afs_extract_data(call, true);
@@ -477,8 +478,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
call->bvec[0].bv_page = req->pages[req->index];
iov_iter_bvec(&call->iter, READ, call->bvec, 1, size);
ASSERTCMP(size, <=, PAGE_SIZE);
+ /* Fall through */
- /* Fall through - and extract the returned data */
+ /* extract the returned data */
case 2:
_debug("extract data %zu/%llu",
iov_iter_count(&call->iter), req->remain);
@@ -505,8 +507,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
/* Discard any excess data the server gave us */
iov_iter_discard(&call->iter, READ, req->actual_len - req->len);
call->unmarshall = 3;
-
/* Fall through */
+
case 3:
_debug("extract discard %zu/%llu",
iov_iter_count(&call->iter), req->actual_len - req->len);
@@ -521,8 +523,9 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSCallBack) +
sizeof(struct yfs_xdr_YFSVolSync));
+ /* Fall through */
- /* Fall through - and extract the metadata */
+ /* extract the metadata */
case 4:
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -539,8 +542,8 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
req->file_size = call->out_scb->status.size;
call->unmarshall++;
-
/* Fall through */
+
case 5:
break;
}
@@ -1429,8 +1432,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
case 0:
call->unmarshall++;
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus));
+ /* Fall through */
- /* Fall through - and extract the returned status record */
+ /* extract the returned status record */
case 1:
_debug("extract status");
ret = afs_extract_data(call, true);
@@ -1441,8 +1445,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
xdr_decode_YFSFetchVolumeStatus(&bp, call->out_volstatus);
call->unmarshall++;
afs_extract_to_tmp(call);
+ /* Fall through */
- /* Fall through - and extract the volume name length */
+ /* extract the volume name length */
case 2:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1456,8 +1461,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the volume name */
+ /* extract the volume name */
case 3:
_debug("extract volname");
ret = afs_extract_data(call, true);
@@ -1469,8 +1475,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
_debug("volname '%s'", p);
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the offline message length */
+ /* extract the offline message length */
case 4:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1484,8 +1491,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the offline message */
+ /* extract the offline message */
case 5:
_debug("extract offline");
ret = afs_extract_data(call, true);
@@ -1498,8 +1506,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the message of the day length */
+ /* extract the message of the day length */
case 6:
ret = afs_extract_data(call, true);
if (ret < 0)
@@ -1513,8 +1522,9 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
size = (call->count + 3) & ~3; /* It's padded */
afs_extract_to_buf(call, size);
call->unmarshall++;
+ /* Fall through */
- /* Fall through - and extract the message of the day */
+ /* extract the message of the day */
case 7:
_debug("extract motd");
ret = afs_extract_data(call, false);
@@ -1526,8 +1536,8 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call)
_debug("motd '%s'", p);
call->unmarshall++;
-
/* Fall through */
+
case 8:
break;
}
@@ -1805,9 +1815,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
/* Extract the file status count and array in two steps */
- /* Fall through */
case 1:
_debug("extract status count");
ret = afs_extract_data(call, true);
@@ -1824,8 +1834,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->unmarshall++;
more_counts:
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus));
-
/* Fall through */
+
case 2:
_debug("extract status array %u", call->count);
ret = afs_extract_data(call, true);
@@ -1845,9 +1855,9 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->count = 0;
call->unmarshall++;
afs_extract_to_tmp(call);
+ /* Fall through */
/* Extract the callback count and array in two steps */
- /* Fall through */
case 3:
_debug("extract CB count");
ret = afs_extract_data(call, true);
@@ -1863,8 +1873,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
call->unmarshall++;
more_cbs:
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack));
-
/* Fall through */
+
case 4:
_debug("extract CB array");
ret = afs_extract_data(call, true);
@@ -1881,8 +1891,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync));
call->unmarshall++;
-
/* Fall through */
+
case 5:
ret = afs_extract_data(call, false);
if (ret < 0)
@@ -1892,8 +1902,8 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call)
xdr_decode_YFSVolSync(&bp, call->out_volsync);
call->unmarshall++;
-
/* Fall through */
+
case 6:
break;
}
@@ -1978,6 +1988,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
case 0:
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
/* Extract the file ACL length */
case 1:
@@ -1999,6 +2010,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
iov_iter_discard(&call->iter, READ, size);
}
call->unmarshall++;
+ /* Fall through */
/* Extract the file ACL */
case 2:
@@ -2008,6 +2020,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
afs_extract_to_tmp(call);
call->unmarshall++;
+ /* Fall through */
/* Extract the volume ACL length */
case 3:
@@ -2029,6 +2042,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
iov_iter_discard(&call->iter, READ, size);
}
call->unmarshall++;
+ /* Fall through */
/* Extract the volume ACL */
case 4:
@@ -2041,6 +2055,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
sizeof(struct yfs_xdr_YFSFetchStatus) +
sizeof(struct yfs_xdr_YFSVolSync));
call->unmarshall++;
+ /* Fall through */
/* extract the metadata */
case 5:
@@ -2057,6 +2072,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call)
xdr_decode_YFSVolSync(&bp, call->out_volsync);
call->unmarshall++;
+ /* Fall through */
case 6:
break;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 4707dfff991b..a6f7c892cb4a 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -345,15 +345,24 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
struct bio *bio;
bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
+ bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
- int ret = 0;
+ gfp_t gfp;
+ ssize_t ret;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
+ if (nowait)
+ gfp = GFP_NOWAIT;
+ else
+ gfp = GFP_KERNEL;
+
+ bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
+ if (!bio)
+ return -EAGAIN;
dio = container_of(bio, struct blkdev_dio, bio);
dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -375,7 +384,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
if (!is_poll)
blk_start_plug(&plug);
+ ret = 0;
for (;;) {
+ int err;
+
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9;
bio->bi_write_hint = iocb->ki_hint;
@@ -383,8 +395,10 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bio->bi_end_io = blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio;
- ret = bio_iov_iter_get_pages(bio, iter);
- if (unlikely(ret)) {
+ err = bio_iov_iter_get_pages(bio, iter);
+ if (unlikely(err)) {
+ if (!ret)
+ ret = err;
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
break;
@@ -399,6 +413,14 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
task_io_account_write(bio->bi_iter.bi_size);
}
+ /*
+ * Tell underlying layer to not block for resource shortage.
+ * And if we would have blocked, return error inline instead
+ * of through the bio->bi_end_io() callback.
+ */
+ if (nowait)
+ bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
+
dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size;
@@ -412,6 +434,12 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
qc = submit_bio(bio);
+ if (qc == BLK_QC_T_EAGAIN) {
+ if (!ret)
+ ret = -EAGAIN;
+ goto error;
+ }
+ ret = dio->size;
if (polled)
WRITE_ONCE(iocb->ki_cookie, qc);
@@ -432,8 +460,20 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
atomic_inc(&dio->ref);
}
- submit_bio(bio);
- bio = bio_alloc(GFP_KERNEL, nr_pages);
+ qc = submit_bio(bio);
+ if (qc == BLK_QC_T_EAGAIN) {
+ if (!ret)
+ ret = -EAGAIN;
+ goto error;
+ }
+ ret = dio->size;
+
+ bio = bio_alloc(gfp, nr_pages);
+ if (!bio) {
+ if (!ret)
+ ret = -EAGAIN;
+ goto error;
+ }
}
if (!is_poll)
@@ -453,13 +493,16 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
__set_current_state(TASK_RUNNING);
+out:
if (!ret)
ret = blk_status_to_errno(dio->bio.bi_status);
- if (likely(!ret))
- ret = dio->size;
bio_put(&dio->bio);
return ret;
+error:
+ if (!is_poll)
+ blk_finish_plug(&plug);
+ goto out;
}
static ssize_t
@@ -1139,8 +1182,7 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno)
* Pointer to the block device containing @bdev on success, ERR_PTR()
* value on failure.
*/
-static struct block_device *bd_start_claiming(struct block_device *bdev,
- void *holder)
+struct block_device *bd_start_claiming(struct block_device *bdev, void *holder)
{
struct gendisk *disk;
struct block_device *whole;
@@ -1187,6 +1229,62 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
return ERR_PTR(err);
}
}
+EXPORT_SYMBOL(bd_start_claiming);
+
+static void bd_clear_claiming(struct block_device *whole, void *holder)
+{
+ lockdep_assert_held(&bdev_lock);
+ /* tell others that we're done */
+ BUG_ON(whole->bd_claiming != holder);
+ whole->bd_claiming = NULL;
+ wake_up_bit(&whole->bd_claiming, 0);
+}
+
+/**
+ * bd_finish_claiming - finish claiming of a block device
+ * @bdev: block device of interest
+ * @whole: whole block device (returned from bd_start_claiming())
+ * @holder: holder that has claimed @bdev
+ *
+ * Finish exclusive open of a block device. Mark the device as exlusively
+ * open by the holder and wake up all waiters for exclusive open to finish.
+ */
+void bd_finish_claiming(struct block_device *bdev, struct block_device *whole,
+ void *holder)
+{
+ spin_lock(&bdev_lock);
+ BUG_ON(!bd_may_claim(bdev, whole, holder));
+ /*
+ * Note that for a whole device bd_holders will be incremented twice,
+ * and bd_holder will be set to bd_may_claim before being set to holder
+ */
+ whole->bd_holders++;
+ whole->bd_holder = bd_may_claim;
+ bdev->bd_holders++;
+ bdev->bd_holder = holder;
+ bd_clear_claiming(whole, holder);
+ spin_unlock(&bdev_lock);
+}
+EXPORT_SYMBOL(bd_finish_claiming);
+
+/**
+ * bd_abort_claiming - abort claiming of a block device
+ * @bdev: block device of interest
+ * @whole: whole block device (returned from bd_start_claiming())
+ * @holder: holder that has claimed @bdev
+ *
+ * Abort claiming of a block device when the exclusive open failed. This can be
+ * also used when exclusive open is not actually desired and we just needed
+ * to block other exclusive openers for a while.
+ */
+void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
+ void *holder)
+{
+ spin_lock(&bdev_lock);
+ bd_clear_claiming(whole, holder);
+ spin_unlock(&bdev_lock);
+}
+EXPORT_SYMBOL(bd_abort_claiming);
#ifdef CONFIG_SYSFS
struct bd_holder_disk {
@@ -1656,29 +1754,7 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
/* finish claiming */
mutex_lock(&bdev->bd_mutex);
- spin_lock(&bdev_lock);
-
- if (!res) {
- BUG_ON(!bd_may_claim(bdev, whole, holder));
- /*
- * Note that for a whole device bd_holders
- * will be incremented twice, and bd_holder
- * will be set to bd_may_claim before being
- * set to holder
- */
- whole->bd_holders++;
- whole->bd_holder = bd_may_claim;
- bdev->bd_holders++;
- bdev->bd_holder = holder;
- }
-
- /* tell others that we're done */
- BUG_ON(whole->bd_claiming != holder);
- whole->bd_claiming = NULL;
- wake_up_bit(&whole->bd_claiming, 0);
-
- spin_unlock(&bdev_lock);
-
+ bd_finish_claiming(bdev, whole, holder);
/*
* Block event polling for write claims if requested. Any
* write holder makes the write_holder state stick until
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 89116afda7a2..e5d85311d5d5 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1483,7 +1483,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
ulist_init(roots);
ulist_init(tmp);
- trans = btrfs_attach_transaction(root);
+ trans = btrfs_join_transaction_nostart(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
ret = PTR_ERR(trans);
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 98fccce4208c..393eceda57c8 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -346,9 +346,12 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
if (blockers) {
btrfs_assert_no_spinning_writers(eb);
eb->blocking_writers--;
- /* Use the lighter barrier after atomic */
- smp_mb__after_atomic();
- cond_wake_up_nomb(&eb->write_lock_wq);
+ /*
+ * We need to order modifying blocking_writers above with
+ * actually waking up the sleepers to ensure they see the
+ * updated value of blocking_writers
+ */
+ cond_wake_up(&eb->write_lock_wq);
} else {
btrfs_assert_spinning_writers_put(eb);
write_unlock(&eb->lock);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 1744ba8b2754..ae7f64a8facb 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -985,13 +985,14 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
struct extent_state **cached_state)
{
struct btrfs_ordered_extent *ordered;
- struct extent_state *cachedp = NULL;
+ struct extent_state *cache = NULL;
+ struct extent_state **cachedp = &cache;
if (cached_state)
- cachedp = *cached_state;
+ cachedp = cached_state;
while (1) {
- lock_extent_bits(tree, start, end, &cachedp);
+ lock_extent_bits(tree, start, end, cachedp);
ordered = btrfs_lookup_ordered_range(inode, start,
end - start + 1);
if (!ordered) {
@@ -1001,10 +1002,10 @@ void btrfs_lock_and_flush_ordered_range(struct extent_io_tree *tree,
* aren't exposing it outside of this function
*/
if (!cached_state)
- refcount_dec(&cachedp->refs);
+ refcount_dec(&cache->refs);
break;
}
- unlock_extent_cached(tree, start, end, &cachedp);
+ unlock_extent_cached(tree, start, end, cachedp);
btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1);
btrfs_put_ordered_extent(ordered);
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 69b59bf75882..c3c0c064c25d 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6322,68 +6322,21 @@ static int changed_extent(struct send_ctx *sctx,
{
int ret = 0;
- if (sctx->cur_ino != sctx->cmp_key->objectid) {
-
- if (result == BTRFS_COMPARE_TREE_CHANGED) {
- struct extent_buffer *leaf_l;
- struct extent_buffer *leaf_r;
- struct btrfs_file_extent_item *ei_l;
- struct btrfs_file_extent_item *ei_r;
-
- leaf_l = sctx->left_path->nodes[0];
- leaf_r = sctx->right_path->nodes[0];
- ei_l = btrfs_item_ptr(leaf_l,
- sctx->left_path->slots[0],
- struct btrfs_file_extent_item);
- ei_r = btrfs_item_ptr(leaf_r,
- sctx->right_path->slots[0],
- struct btrfs_file_extent_item);
-
- /*
- * We may have found an extent item that has changed
- * only its disk_bytenr field and the corresponding
- * inode item was not updated. This case happens due to
- * very specific timings during relocation when a leaf
- * that contains file extent items is COWed while
- * relocation is ongoing and its in the stage where it
- * updates data pointers. So when this happens we can
- * safely ignore it since we know it's the same extent,
- * but just at different logical and physical locations
- * (when an extent is fully replaced with a new one, we
- * know the generation number must have changed too,
- * since snapshot creation implies committing the current
- * transaction, and the inode item must have been updated
- * as well).
- * This replacement of the disk_bytenr happens at
- * relocation.c:replace_file_extents() through
- * relocation.c:btrfs_reloc_cow_block().
- */
- if (btrfs_file_extent_generation(leaf_l, ei_l) ==
- btrfs_file_extent_generation(leaf_r, ei_r) &&
- btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
- btrfs_file_extent_compression(leaf_l, ei_l) ==
- btrfs_file_extent_compression(leaf_r, ei_r) &&
- btrfs_file_extent_encryption(leaf_l, ei_l) ==
- btrfs_file_extent_encryption(leaf_r, ei_r) &&
- btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
- btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
- btrfs_file_extent_type(leaf_l, ei_l) ==
- btrfs_file_extent_type(leaf_r, ei_r) &&
- btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
- btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
- btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
- btrfs_file_extent_offset(leaf_l, ei_l) ==
- btrfs_file_extent_offset(leaf_r, ei_r) &&
- btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_num_bytes(leaf_r, ei_r))
- return 0;
- }
-
- inconsistent_snapshot_error(sctx, result, "extent");
- return -EIO;
- }
+ /*
+ * We have found an extent item that changed without the inode item
+ * having changed. This can happen either after relocation (where the
+ * disk_bytenr of an extent item is replaced at
+ * relocation.c:replace_file_extents()) or after deduplication into a
+ * file in both the parent and send snapshots (where an extent item can
+ * get modified or replaced with a new one). Note that deduplication
+ * updates the inode item, but it only changes the iversion (sequence
+ * field in the inode item) of the inode, so if a file is deduplicated
+ * the same amount of times in both the parent and send snapshots, its
+ * iversion becames the same in both snapshots, whence the inode item is
+ * the same on both snapshots.
+ */
+ if (sctx->cur_ino != sctx->cmp_key->objectid)
+ return 0;
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result != BTRFS_COMPARE_TREE_DELETED)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 3b8ae1a8f02d..e3adb714c04b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
__TRANS_ATTACH |
- __TRANS_JOIN),
+ __TRANS_JOIN |
+ __TRANS_JOIN_NOSTART),
[TRANS_STATE_UNBLOCKED] = (__TRANS_START |
__TRANS_ATTACH |
__TRANS_JOIN |
- __TRANS_JOIN_NOLOCK),
+ __TRANS_JOIN_NOLOCK |
+ __TRANS_JOIN_NOSTART),
[TRANS_STATE_COMPLETED] = (__TRANS_START |
__TRANS_ATTACH |
__TRANS_JOIN |
- __TRANS_JOIN_NOLOCK),
+ __TRANS_JOIN_NOLOCK |
+ __TRANS_JOIN_NOSTART),
};
void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -543,7 +546,8 @@ again:
ret = join_transaction(fs_info, type);
if (ret == -EBUSY) {
wait_current_trans(fs_info);
- if (unlikely(type == TRANS_ATTACH))
+ if (unlikely(type == TRANS_ATTACH ||
+ type == TRANS_JOIN_NOSTART))
ret = -ENOENT;
}
} while (ret == -EBUSY);
@@ -660,6 +664,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
}
/*
+ * Similar to regular join but it never starts a transaction when none is
+ * running or after waiting for the current one to finish.
+ */
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
+{
+ return start_transaction(root, 0, TRANS_JOIN_NOSTART,
+ BTRFS_RESERVE_NO_FLUSH, true);
+}
+
+/*
* btrfs_attach_transaction() - catch the running transaction
*
* It is used when we want to commit the current the transaction, but
@@ -2037,6 +2051,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
}
} else {
spin_unlock(&fs_info->trans_lock);
+ /*
+ * The previous transaction was aborted and was already removed
+ * from the list of transactions at fs_info->trans_list. So we
+ * abort to prevent writing a new superblock that reflects a
+ * corrupt state (pointing to trees with unwritten nodes/leafs).
+ */
+ if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
+ ret = -EROFS;
+ goto cleanup_transaction;
+ }
}
extwriter_counter_dec(cur_trans, trans->type);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 527ea94b57d9..2c5a6f6e5bb0 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -94,11 +94,13 @@ struct btrfs_transaction {
#define __TRANS_JOIN (1U << 11)
#define __TRANS_JOIN_NOLOCK (1U << 12)
#define __TRANS_DUMMY (1U << 13)
+#define __TRANS_JOIN_NOSTART (1U << 14)
#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
#define TRANS_ATTACH (__TRANS_ATTACH)
#define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
#define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
+#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
@@ -183,6 +185,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
int min_factor);
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
struct btrfs_root *root);
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 6e30949d9f77..a7ec2d3dff92 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -638,9 +638,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
COMPATIBLE_IOCTL(PPPIOCATTCHAN)
COMPATIBLE_IOCTL(PPPIOCGCHAN)
COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
-/* PPPOX */
-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
/* Big A */
/* sparc only */
/* Big Q for sound/OSS */
diff --git a/fs/coredump.c b/fs/coredump.c
index e42e17e55bfd..b1ea7dfbd149 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -7,6 +7,7 @@
#include <linux/stat.h>
#include <linux/fcntl.h>
#include <linux/swap.h>
+#include <linux/ctype.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/pagemap.h>
@@ -187,11 +188,13 @@ put_exe_file:
* name into corename, which must have space for at least
* CORENAME_MAX_SIZE bytes plus one byte for the zero terminator.
*/
-static int format_corename(struct core_name *cn, struct coredump_params *cprm)
+static int format_corename(struct core_name *cn, struct coredump_params *cprm,
+ size_t **argv, int *argc)
{
const struct cred *cred = current_cred();
const char *pat_ptr = core_pattern;
int ispipe = (*pat_ptr == '|');
+ bool was_space = false;
int pid_in_pattern = 0;
int err = 0;
@@ -201,12 +204,35 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
return -ENOMEM;
cn->corename[0] = '\0';
- if (ispipe)
+ if (ispipe) {
+ int argvs = sizeof(core_pattern) / 2;
+ (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL);
+ if (!(*argv))
+ return -ENOMEM;
+ (*argv)[(*argc)++] = 0;
++pat_ptr;
+ }
/* Repeat as long as we have more pattern to process and more output
space */
while (*pat_ptr) {
+ /*
+ * Split on spaces before doing template expansion so that
+ * %e and %E don't get split if they have spaces in them
+ */
+ if (ispipe) {
+ if (isspace(*pat_ptr)) {
+ was_space = true;
+ pat_ptr++;
+ continue;
+ } else if (was_space) {
+ was_space = false;
+ err = cn_printf(cn, "%c", '\0');
+ if (err)
+ return err;
+ (*argv)[(*argc)++] = cn->used;
+ }
+ }
if (*pat_ptr != '%') {
err = cn_printf(cn, "%c", *pat_ptr++);
} else {
@@ -546,6 +572,8 @@ void do_coredump(const kernel_siginfo_t *siginfo)
struct cred *cred;
int retval = 0;
int ispipe;
+ size_t *argv = NULL;
+ int argc = 0;
struct files_struct *displaced;
/* require nonrelative corefile path and be extra careful */
bool need_suid_safe = false;
@@ -592,9 +620,10 @@ void do_coredump(const kernel_siginfo_t *siginfo)
old_cred = override_creds(cred);
- ispipe = format_corename(&cn, &cprm);
+ ispipe = format_corename(&cn, &cprm, &argv, &argc);
if (ispipe) {
+ int argi;
int dump_count;
char **helper_argv;
struct subprocess_info *sub_info;
@@ -637,12 +666,16 @@ void do_coredump(const kernel_siginfo_t *siginfo)
goto fail_dropcount;
}
- helper_argv = argv_split(GFP_KERNEL, cn.corename, NULL);
+ helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv),
+ GFP_KERNEL);
if (!helper_argv) {
printk(KERN_WARNING "%s failed to allocate memory\n",
__func__);
goto fail_dropcount;
}
+ for (argi = 0; argi < argc; argi++)
+ helper_argv[argi] = cn.corename + argv[argi];
+ helper_argv[argi] = NULL;
retval = -ENOMEM;
sub_info = call_usermodehelper_setup(helper_argv[0],
@@ -652,7 +685,7 @@ void do_coredump(const kernel_siginfo_t *siginfo)
retval = call_usermodehelper_exec(sub_info,
UMH_WAIT_EXEC);
- argv_free(helper_argv);
+ kfree(helper_argv);
if (retval) {
printk(KERN_INFO "Core dump to |%s pipe failed\n",
cn.corename);
@@ -766,6 +799,7 @@ fail_dropcount:
if (ispipe)
atomic_dec(&core_dump_count);
fail_unlock:
+ kfree(argv);
kfree(cn.corename);
coredump_finish(mm, core_dumped);
revert_creds(old_cred);
diff --git a/fs/dax.c b/fs/dax.c
index a237141d8787..b64964ef44f6 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -266,7 +266,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
static void put_unlocked_entry(struct xa_state *xas, void *entry)
{
/* If we were the only waiter woken, wake the next one */
- if (entry && dax_is_conflict(entry))
+ if (entry && !dax_is_conflict(entry))
dax_wake_entry(xas, entry, false);
}
diff --git a/fs/exec.c b/fs/exec.c
index c71cbfe6826a..f7f6a140856a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1828,7 +1828,7 @@ static int __do_execve_file(int fd, struct filename *filename,
membarrier_execve(current);
rseq_execve(current);
acct_update_integrals(current);
- task_numa_free(current);
+ task_numa_free(current, false);
free_bprm(bprm);
kfree(pathbuf);
if (filename)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f8d46df8fa9e..3e58a6f697dd 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1653,19 +1653,12 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
- u32 oldflags;
/* Is it quota file? Do not allow user to mess with it */
if (IS_NOQUOTA(inode))
return -EPERM;
- oldflags = fi->i_flags;
-
- if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
- if (!capable(CAP_LINUX_IMMUTABLE))
- return -EPERM;
-
- fi->i_flags = iflags | (oldflags & ~mask);
+ fi->i_flags = iflags | (fi->i_flags & ~mask);
if (fi->i_flags & F2FS_PROJINHERIT_FL)
set_inode_flag(inode, FI_PROJ_INHERIT);
@@ -1770,7 +1763,8 @@ static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- u32 fsflags;
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ u32 fsflags, old_fsflags;
u32 iflags;
int ret;
@@ -1794,8 +1788,14 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
inode_lock(inode);
+ old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
+ ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
+ if (ret)
+ goto out;
+
ret = f2fs_setflags_common(inode, iflags,
f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
+out:
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
@@ -2855,52 +2855,32 @@ static inline u32 f2fs_xflags_to_iflags(u32 xflags)
return iflags;
}
-static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
+static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
{
- struct inode *inode = file_inode(filp);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct fsxattr fa;
- memset(&fa, 0, sizeof(struct fsxattr));
- fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags);
+ simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
- fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
- fi->i_projid);
-
- if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
- return -EFAULT;
- return 0;
+ fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
}
-static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
+static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
{
- /*
- * Project Quota ID state is only allowed to change from within the init
- * namespace. Enforce that restriction only if we are trying to change
- * the quota ID state. Everything else is allowed in user namespaces.
- */
- if (current_user_ns() == &init_user_ns)
- return 0;
+ struct inode *inode = file_inode(filp);
+ struct fsxattr fa;
- if (__kprojid_val(F2FS_I(inode)->i_projid) != fa->fsx_projid)
- return -EINVAL;
-
- if (F2FS_I(inode)->i_flags & F2FS_PROJINHERIT_FL) {
- if (!(fa->fsx_xflags & FS_XFLAG_PROJINHERIT))
- return -EINVAL;
- } else {
- if (fa->fsx_xflags & FS_XFLAG_PROJINHERIT)
- return -EINVAL;
- }
+ f2fs_fill_fsxattr(inode, &fa);
+ if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
+ return -EFAULT;
return 0;
}
static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
- struct fsxattr fa;
+ struct fsxattr fa, old_fa;
u32 iflags;
int err;
@@ -2923,9 +2903,12 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
return err;
inode_lock(inode);
- err = f2fs_ioctl_check_project(inode, &fa);
+
+ f2fs_fill_fsxattr(inode, &old_fa);
+ err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
if (err)
goto out;
+
err = f2fs_setflags_common(inode, iflags,
f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
if (err)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 6691f526fa40..8974672db78f 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -796,6 +796,29 @@ static int move_data_block(struct inode *inode, block_t bidx,
if (lfs_mode)
down_write(&fio.sbi->io_order_lock);
+ mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, false);
+ if (!mpage)
+ goto up_out;
+
+ fio.encrypted_page = mpage;
+
+ /* read source block in mpage */
+ if (!PageUptodate(mpage)) {
+ err = f2fs_submit_page_bio(&fio);
+ if (err) {
+ f2fs_put_page(mpage, 1);
+ goto up_out;
+ }
+ lock_page(mpage);
+ if (unlikely(mpage->mapping != META_MAPPING(fio.sbi) ||
+ !PageUptodate(mpage))) {
+ err = -EIO;
+ f2fs_put_page(mpage, 1);
+ goto up_out;
+ }
+ }
+
f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
&sum, CURSEG_COLD_DATA, NULL, false);
@@ -803,44 +826,18 @@ static int move_data_block(struct inode *inode, block_t bidx,
newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS);
if (!fio.encrypted_page) {
err = -ENOMEM;
- goto recover_block;
- }
-
- mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
- fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
- if (mpage) {
- bool updated = false;
-
- if (PageUptodate(mpage)) {
- memcpy(page_address(fio.encrypted_page),
- page_address(mpage), PAGE_SIZE);
- updated = true;
- }
f2fs_put_page(mpage, 1);
- invalidate_mapping_pages(META_MAPPING(fio.sbi),
- fio.old_blkaddr, fio.old_blkaddr);
- if (updated)
- goto write_page;
- }
-
- err = f2fs_submit_page_bio(&fio);
- if (err)
- goto put_page_out;
-
- /* write page */
- lock_page(fio.encrypted_page);
-
- if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
- err = -EIO;
- goto put_page_out;
- }
- if (unlikely(!PageUptodate(fio.encrypted_page))) {
- err = -EIO;
- goto put_page_out;
+ goto recover_block;
}
-write_page:
+ /* write target block */
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true, true);
+ memcpy(page_address(fio.encrypted_page),
+ page_address(mpage), PAGE_SIZE);
+ f2fs_put_page(mpage, 1);
+ invalidate_mapping_pages(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, fio.old_blkaddr);
+
set_page_dirty(fio.encrypted_page);
if (clear_page_dirty_for_io(fio.encrypted_page))
dec_page_count(fio.sbi, F2FS_DIRTY_META);
@@ -871,11 +868,12 @@ write_page:
put_page_out:
f2fs_put_page(fio.encrypted_page, 1);
recover_block:
- if (lfs_mode)
- up_write(&fio.sbi->io_order_lock);
if (err)
f2fs_do_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
true, true);
+up_out:
+ if (lfs_mode)
+ up_write(&fio.sbi->io_order_lock);
put_out:
f2fs_put_dnode(&dn);
out:
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 6de6cda44031..78a1b873e48a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2422,6 +2422,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
size_t crc_offset = 0;
__u32 crc = 0;
+ if (le32_to_cpu(raw_super->magic) != F2FS_SUPER_MAGIC) {
+ f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
+ F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
+ return -EINVAL;
+ }
+
/* Check checksum_offset and crc in superblock */
if (__F2FS_HAS_FEATURE(raw_super, F2FS_FEATURE_SB_CHKSUM)) {
crc_offset = le32_to_cpu(raw_super->checksum_offset);
@@ -2429,26 +2435,20 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
offsetof(struct f2fs_super_block, crc)) {
f2fs_info(sbi, "Invalid SB checksum offset: %zu",
crc_offset);
- return 1;
+ return -EFSCORRUPTED;
}
crc = le32_to_cpu(raw_super->crc);
if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
- return 1;
+ return -EFSCORRUPTED;
}
}
- if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
- f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
- F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
- return 1;
- }
-
/* Currently, support only 4KB page cache size */
if (F2FS_BLKSIZE != PAGE_SIZE) {
f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
PAGE_SIZE);
- return 1;
+ return -EFSCORRUPTED;
}
/* Currently, support only 4KB block size */
@@ -2456,14 +2456,14 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
if (blocksize != F2FS_BLKSIZE) {
f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
blocksize);
- return 1;
+ return -EFSCORRUPTED;
}
/* check log blocks per segment */
if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
f2fs_info(sbi, "Invalid log blocks per segment (%u)",
le32_to_cpu(raw_super->log_blocks_per_seg));
- return 1;
+ return -EFSCORRUPTED;
}
/* Currently, support 512/1024/2048/4096 bytes sector size */
@@ -2473,7 +2473,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
F2FS_MIN_LOG_SECTOR_SIZE) {
f2fs_info(sbi, "Invalid log sectorsize (%u)",
le32_to_cpu(raw_super->log_sectorsize));
- return 1;
+ return -EFSCORRUPTED;
}
if (le32_to_cpu(raw_super->log_sectors_per_block) +
le32_to_cpu(raw_super->log_sectorsize) !=
@@ -2481,7 +2481,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
le32_to_cpu(raw_super->log_sectors_per_block),
le32_to_cpu(raw_super->log_sectorsize));
- return 1;
+ return -EFSCORRUPTED;
}
segment_count = le32_to_cpu(raw_super->segment_count);
@@ -2495,7 +2495,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
if (segment_count > F2FS_MAX_SEGMENT ||
segment_count < F2FS_MIN_SEGMENTS) {
f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
- return 1;
+ return -EFSCORRUPTED;
}
if (total_sections > segment_count ||
@@ -2503,25 +2503,25 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
segs_per_sec > segment_count || !segs_per_sec) {
f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
segment_count, total_sections, segs_per_sec);
- return 1;
+ return -EFSCORRUPTED;
}
if ((segment_count / segs_per_sec) < total_sections) {
f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
segment_count, segs_per_sec, total_sections);
- return 1;
+ return -EFSCORRUPTED;
}
if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
segment_count, le64_to_cpu(raw_super->block_count));
- return 1;
+ return -EFSCORRUPTED;
}
if (secs_per_zone > total_sections || !secs_per_zone) {
f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
secs_per_zone, total_sections);
- return 1;
+ return -EFSCORRUPTED;
}
if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
@@ -2531,7 +2531,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
le32_to_cpu(raw_super->extension_count),
raw_super->hot_ext_count,
F2FS_MAX_EXTENSION);
- return 1;
+ return -EFSCORRUPTED;
}
if (le32_to_cpu(raw_super->cp_payload) >
@@ -2539,7 +2539,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
f2fs_info(sbi, "Insane cp_payload (%u > %u)",
le32_to_cpu(raw_super->cp_payload),
blocks_per_seg - F2FS_CP_PACKS);
- return 1;
+ return -EFSCORRUPTED;
}
/* check reserved ino info */
@@ -2550,12 +2550,12 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
le32_to_cpu(raw_super->node_ino),
le32_to_cpu(raw_super->meta_ino),
le32_to_cpu(raw_super->root_ino));
- return 1;
+ return -EFSCORRUPTED;
}
/* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
if (sanity_check_area_boundary(sbi, bh))
- return 1;
+ return -EFSCORRUPTED;
return 0;
}
@@ -2870,10 +2870,10 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
}
/* sanity checking of raw super */
- if (sanity_check_raw_super(sbi, bh)) {
+ err = sanity_check_raw_super(sbi, bh);
+ if (err) {
f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
block + 1);
- err = -EFSCORRUPTED;
brelse(bh);
continue;
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 79581b9bdebb..4df26ef2b2b1 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -1002,11 +1002,16 @@ static void gfs2_iomap_page_done(struct inode *inode, loff_t pos,
unsigned copied, struct page *page,
struct iomap *iomap)
{
+ struct gfs2_trans *tr = current->journal_info;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
if (page && !gfs2_is_stuffed(ip))
gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied);
+
+ if (tr->tr_num_buf_new)
+ __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+
gfs2_trans_end(sdp);
}
@@ -1099,8 +1104,6 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos,
tr = current->journal_info;
if (tr->tr_num_buf_new)
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
- else
- gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[0]);
gfs2_trans_end(sdp);
}
@@ -1181,10 +1184,16 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
if (ip->i_qadata && ip->i_qadata->qa_qd_num)
gfs2_quota_unlock(ip);
+
+ if (unlikely(!written))
+ goto out_unlock;
+
if (iomap->flags & IOMAP_F_SIZE_CHANGED)
mark_inode_dirty(inode);
- gfs2_write_unlock(inode);
+ set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
+out_unlock:
+ gfs2_write_unlock(inode);
out:
return 0;
}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index e2a66e12fbc6..d542f1cf4428 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -202,7 +202,7 @@ struct async_list {
struct file *file;
off_t io_end;
- size_t io_pages;
+ size_t io_len;
};
struct io_ring_ctx {
@@ -333,7 +333,8 @@ struct io_kiocb {
#define REQ_F_IO_DRAIN 16 /* drain existing IO first */
#define REQ_F_IO_DRAINED 32 /* drain done */
#define REQ_F_LINK 64 /* linked sqes */
-#define REQ_F_FAIL_LINK 128 /* fail rest of links */
+#define REQ_F_LINK_DONE 128 /* linked sqes done */
+#define REQ_F_FAIL_LINK 256 /* fail rest of links */
u64 user_data;
u32 result;
u32 sequence;
@@ -429,7 +430,7 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
return false;
- return req->sequence > ctx->cached_cq_tail + ctx->sq_ring->dropped;
+ return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped;
}
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
@@ -632,6 +633,7 @@ static void io_req_link_next(struct io_kiocb *req)
nxt->flags |= REQ_F_LINK;
}
+ nxt->flags |= REQ_F_LINK_DONE;
INIT_WORK(&nxt->work, io_sq_wq_submit_work);
queue_work(req->ctx->sqo_wq, &nxt->work);
}
@@ -1064,8 +1066,44 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
*/
offset = buf_addr - imu->ubuf;
iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
- if (offset)
- iov_iter_advance(iter, offset);
+
+ if (offset) {
+ /*
+ * Don't use iov_iter_advance() here, as it's really slow for
+ * using the latter parts of a big fixed buffer - it iterates
+ * over each segment manually. We can cheat a bit here, because
+ * we know that:
+ *
+ * 1) it's a BVEC iter, we set it up
+ * 2) all bvecs are PAGE_SIZE in size, except potentially the
+ * first and last bvec
+ *
+ * So just find our index, and adjust the iterator afterwards.
+ * If the offset is within the first bvec (or the whole first
+ * bvec, just use iov_iter_advance(). This makes it easier
+ * since we can just skip the first segment, which may not
+ * be PAGE_SIZE aligned.
+ */
+ const struct bio_vec *bvec = imu->bvec;
+
+ if (offset <= bvec->bv_len) {
+ iov_iter_advance(iter, offset);
+ } else {
+ unsigned long seg_skip;
+
+ /* skip first vec */
+ offset -= bvec->bv_len;
+ seg_skip = 1 + (offset >> PAGE_SHIFT);
+
+ iter->bvec = bvec + seg_skip;
+ iter->nr_segs -= seg_skip;
+ iter->count -= (seg_skip << PAGE_SHIFT);
+ iter->iov_offset = offset & ~PAGE_MASK;
+ if (iter->iov_offset)
+ iter->count -= iter->iov_offset;
+ }
+ }
+
return 0;
}
@@ -1120,28 +1158,26 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
off_t io_end = kiocb->ki_pos + len;
if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) {
- unsigned long max_pages;
+ unsigned long max_bytes;
/* Use 8x RA size as a decent limiter for both reads/writes */
- max_pages = filp->f_ra.ra_pages;
- if (!max_pages)
- max_pages = VM_READAHEAD_PAGES;
- max_pages *= 8;
-
- /* If max pages are exceeded, reset the state */
- len >>= PAGE_SHIFT;
- if (async_list->io_pages + len <= max_pages) {
+ max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
+ if (!max_bytes)
+ max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
+
+ /* If max len are exceeded, reset the state */
+ if (async_list->io_len + len <= max_bytes) {
req->flags |= REQ_F_SEQ_PREV;
- async_list->io_pages += len;
+ async_list->io_len += len;
} else {
io_end = 0;
- async_list->io_pages = 0;
+ async_list->io_len = 0;
}
}
/* New file? Reset state. */
if (async_list->file != filp) {
- async_list->io_pages = 0;
+ async_list->io_len = 0;
async_list->file = filp;
}
async_list->io_end = io_end;
@@ -1630,6 +1666,8 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
INIT_LIST_HEAD(&poll->wait.entry);
init_waitqueue_func_entry(&poll->wait, io_poll_wake);
+ INIT_LIST_HEAD(&req->list);
+
mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
spin_lock_irq(&ctx->completion_lock);
@@ -1800,6 +1838,7 @@ restart:
do {
struct sqe_submit *s = &req->submit;
const struct io_uring_sqe *sqe = s->sqe;
+ unsigned int flags = req->flags;
/* Ensure we clear previously set non-block flag */
req->rw.ki_flags &= ~IOCB_NOWAIT;
@@ -1844,6 +1883,10 @@ restart:
/* async context always use a copy of the sqe */
kfree(sqe);
+ /* req from defer and link list needn't decrease async cnt */
+ if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
+ goto out;
+
if (!async_list)
break;
if (!list_empty(&req_list)) {
@@ -1891,6 +1934,7 @@ restart:
}
}
+out:
if (cur_mm) {
set_fs(old_fs);
unuse_mm(cur_mm);
@@ -1917,6 +1961,10 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
ret = true;
spin_lock(&list->lock);
list_add_tail(&req->list, &list->list);
+ /*
+ * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
+ */
+ smp_mb();
if (!atomic_read(&list->cnt)) {
list_del_init(&req->list);
ret = false;
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
index 2d165388d952..93cd11938bf5 100644
--- a/fs/iomap/Makefile
+++ b/fs/iomap/Makefile
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0-or-newer
+# SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (c) 2019 Oracle.
# All Rights Reserved.
diff --git a/fs/namespace.c b/fs/namespace.c
index 6464ea4acba9..d28d30b13043 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1463,7 +1463,6 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
disconnect = disconnect_mount(p, how);
-
if (mnt_has_parent(p)) {
mnt_add_count(p->mnt_parent, -1);
if (!disconnect) {
@@ -1471,10 +1470,11 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
} else {
umount_mnt(p);
- hlist_add_head(&p->mnt_umount, &unmounted);
}
}
change_mnt_propagation(p, MS_PRIVATE);
+ if (disconnect)
+ hlist_add_head(&p->mnt_umount, &unmounted);
}
}
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 385f3aaa2448..90c830e3758e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3825,7 +3825,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
int low_bucket = 0, bucket, high_bucket;
struct ocfs2_xattr_bucket *search;
- u32 last_hash;
u64 blkno, lower_blkno = 0;
search = ocfs2_xattr_bucket_new(inode);
@@ -3869,8 +3868,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
if (xh->xh_count)
xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
- last_hash = le32_to_cpu(xe->xe_name_hash);
-
/* record lower_blkno which may be the insert place. */
lower_blkno = blkno;
diff --git a/fs/open.c b/fs/open.c
index b5b80469b93d..a59abe3c669a 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -374,6 +374,25 @@ long do_faccessat(int dfd, const char __user *filename, int mode)
override_cred->cap_permitted;
}
+ /*
+ * The new set of credentials can *only* be used in
+ * task-synchronous circumstances, and does not need
+ * RCU freeing, unless somebody then takes a separate
+ * reference to it.
+ *
+ * NOTE! This is _only_ true because this credential
+ * is used purely for override_creds() that installs
+ * it as the subjective cred. Other threads will be
+ * accessing ->real_cred, not the subjective cred.
+ *
+ * If somebody _does_ make a copy of this (using the
+ * 'get_current_cred()' function), that will clear the
+ * non_rcu field, because now that other user may be
+ * expecting RCU freeing. But normal thread-synchronous
+ * cred accesses will keep things non-RCY.
+ */
+ override_cred->non_rcu = 1;
+
old_cred = override_creds(override_cred);
retry:
res = user_path_at(dfd, filename, lookup_flags, &path);
diff --git a/fs/super.c b/fs/super.c
index 113c58f19425..5960578a4076 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -478,13 +478,10 @@ EXPORT_SYMBOL(generic_shutdown_super);
bool mount_capable(struct fs_context *fc)
{
- struct user_namespace *user_ns = fc->global ? &init_user_ns
- : fc->user_ns;
-
if (!(fc->fs_type->fs_flags & FS_USERNS_MOUNT))
return capable(CAP_SYS_ADMIN);
else
- return ns_capable(user_ns, CAP_SYS_ADMIN);
+ return ns_capable(fc->user_ns, CAP_SYS_ADMIN);
}
/**
diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c
index 94c4f1de1922..77ff9f97bcda 100644
--- a/fs/xfs/scrub/dabtree.c
+++ b/fs/xfs/scrub/dabtree.c
@@ -278,7 +278,11 @@ xchk_da_btree_block_check_sibling(
/* Compare upper level pointer to sibling pointer. */
if (ds->state->altpath.blk[level].blkno != sibling)
xchk_da_set_corrupt(ds, level);
- xfs_trans_brelse(ds->dargs.trans, ds->state->altpath.blk[level].bp);
+ if (ds->state->altpath.blk[level].bp) {
+ xfs_trans_brelse(ds->dargs.trans,
+ ds->state->altpath.blk[level].bp);
+ ds->state->altpath.blk[level].bp = NULL;
+ }
out:
return error;
}
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index a8a06bb78ea8..f5c955d35be4 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -272,6 +272,7 @@ xfs_bulkstat_to_bstat(
struct xfs_bstat *bs1,
const struct xfs_bulkstat *bstat)
{
+ /* memset is needed here because of padding holes in the structure. */
memset(bs1, 0, sizeof(struct xfs_bstat));
bs1->bs_ino = bstat->bs_ino;
bs1->bs_mode = bstat->bs_mode;
@@ -388,6 +389,8 @@ xfs_inumbers_to_inogrp(
struct xfs_inogrp *ig1,
const struct xfs_inumbers *ig)
{
+ /* memset is needed here because of padding holes in the structure. */
+ memset(ig1, 0, sizeof(struct xfs_inogrp));
ig1->xi_startino = ig->xi_startino;
ig1->xi_alloccount = ig->xi_alloccount;
ig1->xi_allocmask = ig->xi_allocmask;
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 8666fe7f35d7..02970b11f71f 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -118,26 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
- int oldval = 0, ret;
-
- pagefault_disable();
-
- switch (op) {
- case FUTEX_OP_SET:
- case FUTEX_OP_ADD:
- case FUTEX_OP_OR:
- case FUTEX_OP_ANDN:
- case FUTEX_OP_XOR:
- default:
- ret = -ENOSYS;
- }
-
- pagefault_enable();
-
- if (!ret)
- *oval = oldval;
-
- return ret;
+ return -ENOSYS;
}
static inline int
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index c64bea7a52be..e9f20b813a69 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -7,24 +7,6 @@
#include <linux/compiler.h>
#include <linux/log2.h>
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
- int order;
-
- size--;
- size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
- order = fls(size);
-#else
- order = fls64(size);
-#endif
- return order;
-}
-
/**
* get_order - Determine the allocation order of a memory size
* @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
* to hold an object of the specified size.
*
* The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
*/
-#define get_order(n) \
-( \
- __builtin_constant_p(n) ? ( \
- ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
- (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
- ilog2((n) - 1) - PAGE_SHIFT + 1) \
- ) : \
- __get_order(n) \
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ if (__builtin_constant_p(size)) {
+ if (!size)
+ return BITS_PER_LONG - PAGE_SHIFT;
+
+ if (size < (1UL << PAGE_SHIFT))
+ return 0;
+
+ return ilog2((size) - 1) - PAGE_SHIFT + 1;
+ }
+
+ size--;
+ size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+ return fls(size);
+#else
+ return fls64(size);
+#endif
+}
#endif /* __ASSEMBLY__ */
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 72d51d1e9dd9..5cf2c5dd8b1e 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -149,6 +149,8 @@ struct drm_client_buffer {
struct drm_client_buffer *
drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
+void *drm_client_buffer_vmap(struct drm_client_buffer *buffer);
+void drm_client_buffer_vunmap(struct drm_client_buffer *buffer);
int drm_client_modeset_create(struct drm_client_dev *client);
void drm_client_modeset_free(struct drm_client_dev *client);
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 759d462d028b..f57eea0481e0 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -853,6 +853,13 @@ struct drm_mode_config {
uint32_t preferred_depth, prefer_shadow;
/**
+ * @prefer_shadow_fbdev:
+ *
+ * Hint to framebuffer emulation to prefer shadow-fb rendering.
+ */
+ bool prefer_shadow_fbdev;
+
+ /**
* @quirk_addfb_prefer_xbgr_30bpp:
*
* Special hack for legacy ADDFB to keep nouveau userspace happy. Should
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 689a58231288..12811091fd50 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -181,6 +181,7 @@ struct blkcg_policy {
extern struct blkcg blkcg_root;
extern struct cgroup_subsys_state * const blkcg_root_css;
+extern bool blkcg_debug_stats;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index feff3fe4467e..1b1fa1557e68 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -311,6 +311,7 @@ enum req_flag_bits {
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
__REQ_NOWAIT, /* Don't wait if request will block */
+ __REQ_NOWAIT_INLINE, /* Return would-block error inline */
/*
* When a shared kthread needs to issue a bio for a cgroup, doing
* so synchronously can lead to priority inversions as the kthread
@@ -345,6 +346,7 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
+#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
@@ -418,12 +420,13 @@ static inline int op_stat_group(unsigned int op)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
+#define BLK_QC_T_EAGAIN -2U
#define BLK_QC_T_SHIFT 16
#define BLK_QC_T_INTERNAL (1U << 31)
static inline bool blk_qc_t_valid(blk_qc_t cookie)
{
- return cookie != BLK_QC_T_NONE;
+ return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
}
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 3c096c7a51dc..853a8f181394 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -359,6 +359,7 @@ int __must_check devm_clk_bulk_get(struct device *dev, int num_clks,
/**
* devm_clk_bulk_get_optional - managed get multiple optional consumer clocks
* @dev: device for clock "consumer"
+ * @num_clks: the number of clk_bulk_data
* @clks: pointer to the clk_bulk_data table of consumer
*
* Behaves the same as devm_clk_bulk_get() except where there is no clock
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 7eb43a038330..f7a30e0099be 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -145,7 +145,11 @@ struct cred {
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
- struct rcu_head rcu; /* RCU deletion hook */
+ /* RCU deletion */
+ union {
+ int non_rcu; /* Can we skip RCU deletion? */
+ struct rcu_head rcu; /* RCU deletion hook */
+ };
} __randomize_layout;
extern void __put_cred(struct cred *);
@@ -246,6 +250,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
if (!cred)
return cred;
validate_creds(cred);
+ nonconst_cred->non_rcu = 0;
return get_new_cred(nonconst_cred);
}
@@ -257,6 +262,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
if (!atomic_inc_not_zero(&nonconst_cred->usage))
return NULL;
validate_creds(cred);
+ nonconst_cred->non_rcu = 0;
return cred;
}
diff --git a/include/linux/device.h b/include/linux/device.h
index c330b75c6c57..6717adee33f0 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -915,6 +915,8 @@ struct dev_links_info {
* This identifies the device type and carries type-specific
* information.
* @mutex: Mutex to synchronize calls to its driver.
+ * @lockdep_mutex: An optional debug lock that a subsystem can use as a
+ * peer lock to gain localized lockdep coverage of the device_lock.
* @bus: Type of bus device is on.
* @driver: Which driver has allocated this
* @platform_data: Platform data specific to the device.
@@ -998,6 +1000,9 @@ struct device {
core doesn't touch it */
void *driver_data; /* Driver data, set and get with
dev_set_drvdata/dev_get_drvdata */
+#ifdef CONFIG_PROVE_LOCKING
+ struct mutex lockdep_mutex;
+#endif
struct mutex mutex; /* mutex to synchronize calls to
* its driver.
*/
@@ -1383,6 +1388,7 @@ extern int (*platform_notify_remove)(struct device *dev);
*/
extern struct device *get_device(struct device *dev);
extern void put_device(struct device *dev);
+extern bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
extern int devtmpfs_create_node(struct device *dev);
diff --git a/include/linux/dim.h b/include/linux/dim.h
index d3a0fbfff2bb..9fa4b3f88c39 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -272,62 +272,6 @@ dim_update_sample_with_comps(u16 event_ctr, u64 packets, u64 bytes, u64 comps,
/* Net DIM */
-/*
- * Net DIM profiles:
- * There are different set of profiles for each CQ period mode.
- * There are different set of profiles for RX/TX CQs.
- * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
- */
-#define NET_DIM_PARAMS_NUM_PROFILES 5
-#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
-#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
-#define NET_DIM_DEF_PROFILE_CQE 1
-#define NET_DIM_DEF_PROFILE_EQE 1
-
-#define NET_DIM_RX_EQE_PROFILES { \
- {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
-}
-
-#define NET_DIM_RX_CQE_PROFILES { \
- {2, 256}, \
- {8, 128}, \
- {16, 64}, \
- {32, 64}, \
- {64, 64} \
-}
-
-#define NET_DIM_TX_EQE_PROFILES { \
- {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
- {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
-}
-
-#define NET_DIM_TX_CQE_PROFILES { \
- {5, 128}, \
- {8, 64}, \
- {16, 32}, \
- {32, 32}, \
- {64, 32} \
-}
-
-static const struct dim_cq_moder
-rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
- NET_DIM_RX_EQE_PROFILES,
- NET_DIM_RX_CQE_PROFILES,
-};
-
-static const struct dim_cq_moder
-tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
- NET_DIM_TX_EQE_PROFILES,
- NET_DIM_TX_CQE_PROFILES,
-};
-
/**
* net_dim_get_rx_moderation - provide a CQ moderation object for the given RX profile
* @cq_period_mode: CQ period mode
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index e11b115dd0e4..f7d1eea32c78 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -689,8 +689,8 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
*/
static inline bool dma_addressing_limited(struct device *dev)
{
- return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) <
- dma_get_required_mask(dev);
+ return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
+ dma_get_required_mask(dev);
}
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 17cd0078377c..1dd014c9c87b 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -45,7 +45,6 @@ struct elevator_mq_ops {
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
bool (*has_work)(struct blk_mq_hw_ctx *);
void (*completed_request)(struct request *, u64);
- void (*started_request)(struct request *);
void (*requeue_request)(struct request *);
struct request *(*former_request)(struct request_queue *, struct request *);
struct request *(*next_request)(struct request_queue *, struct request *);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ff65d22cf336..92c6e31fb008 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -24,6 +24,7 @@
#include <net/sch_generic.h>
+#include <asm/byteorder.h>
#include <uapi/linux/filter.h>
#include <uapi/linux/bpf.h>
@@ -747,6 +748,18 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
return size <= size_default && (size & (size - 1)) == 0;
}
+static inline u8
+bpf_ctx_narrow_load_shift(u32 off, u32 size, u32 size_default)
+{
+ u8 load_off = off & (size_default - 1);
+
+#ifdef __LITTLE_ENDIAN
+ return load_off * 8;
+#else
+ return (size_default - (load_off + size)) * 8;
+#endif
+}
+
#define bpf_ctx_wide_access_ok(off, size, type, field) \
(size == sizeof(__u64) && \
off >= offsetof(type, field) && \
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 56b8e358af5c..997a530ff4e9 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2598,6 +2598,12 @@ extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
void *holder);
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
void *holder);
+extern struct block_device *bd_start_claiming(struct block_device *bdev,
+ void *holder);
+extern void bd_finish_claiming(struct block_device *bdev,
+ struct block_device *whole, void *holder);
+extern void bd_abort_claiming(struct block_device *bdev,
+ struct block_device *whole, void *holder);
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
extern int __blkdev_reread_part(struct block_device *bdev);
extern int blkdev_reread_part(struct block_device *bdev);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 9ddcf50a3c59..a7f08fb0f865 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -247,7 +247,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline void devm_gpiod_unhinge(struct device *dev,
@@ -256,7 +256,7 @@ static inline void devm_gpiod_unhinge(struct device *dev,
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline void gpiod_put_array(struct gpio_descs *descs)
@@ -264,7 +264,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(descs);
}
static inline struct gpio_desc *__must_check
@@ -317,7 +317,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline void devm_gpiod_put_array(struct device *dev,
@@ -326,32 +326,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
might_sleep();
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(descs);
}
static inline int gpiod_get_direction(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_input(struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
@@ -359,7 +359,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
static inline int gpiod_get_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_get_array_value(unsigned int array_size,
@@ -368,13 +368,13 @@ static inline int gpiod_get_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline int gpiod_set_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -382,13 +382,13 @@ static inline int gpiod_set_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_get_raw_array_value(unsigned int array_size,
@@ -397,13 +397,13 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline int gpiod_set_raw_array_value(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -411,14 +411,14 @@ static inline int gpiod_set_raw_array_value(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
@@ -427,13 +427,13 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -441,13 +441,13 @@ static inline int gpiod_set_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
@@ -456,14 +456,14 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
int value)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
}
static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
struct gpio_desc **desc_array,
@@ -471,41 +471,41 @@ static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
unsigned long *value_bitmap)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc_array);
return 0;
}
static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -ENOSYS;
}
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_cansleep(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return 0;
}
static inline int gpiod_to_irq(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -EINVAL;
}
@@ -513,7 +513,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
const char *name)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -EINVAL;
}
@@ -525,7 +525,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
static inline int desc_to_gpio(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
- WARN_ON(1);
+ WARN_ON(desc);
return -EINVAL;
}
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index b8a08b2a10ca..7ef56dc18050 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -484,60 +484,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline bool hmm_vma_range_done(struct hmm_range *range)
-{
- bool ret = hmm_range_valid(range);
-
- hmm_range_unregister(range);
- return ret;
-}
-
-/* This is a temporary helper to avoid merge conflict between trees. */
-static inline int hmm_vma_fault(struct hmm_mirror *mirror,
- struct hmm_range *range, bool block)
-{
- long ret;
-
- /*
- * With the old API the driver must set each individual entries with
- * the requested flags (valid, write, ...). So here we set the mask to
- * keep intact the entries provided by the driver and zero out the
- * default_flags.
- */
- range->default_flags = 0;
- range->pfn_flags_mask = -1UL;
-
- ret = hmm_range_register(range, mirror,
- range->start, range->end,
- PAGE_SHIFT);
- if (ret)
- return (int)ret;
-
- if (!hmm_range_wait_until_valid(range, HMM_RANGE_DEFAULT_TIMEOUT)) {
- /*
- * The mmap_sem was taken by driver we release it here and
- * returns -EAGAIN which correspond to mmap_sem have been
- * drop in the old API.
- */
- up_read(&range->vma->vm_mm->mmap_sem);
- return -EAGAIN;
- }
-
- ret = hmm_range_fault(range, block);
- if (ret <= 0) {
- if (ret == -EBUSY || !ret) {
- /* Same as above, drop mmap_sem to match old API. */
- up_read(&range->vma->vm_mm->mmap_sem);
- ret = -EBUSY;
- } else if (ret == -EAGAIN)
- ret = -EBUSY;
- hmm_range_unregister(range);
- return ret;
- }
- return 0;
-}
-
/* Below are for HMM internal use only! Not to be used by device driver! */
static inline void hmm_mm_init(struct mm_struct *mm)
{
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 8b728750a625..69e813bcb947 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -80,6 +80,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
extern void unregister_pppox_proto(int proto_num);
extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
/* PPPoX socket states */
enum {
diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h
index b4f5403383fc..9661416a9bb4 100644
--- a/include/linux/if_rmnet.h
+++ b/include/linux/if_rmnet.h
@@ -41,11 +41,11 @@ struct rmnet_map_ul_csum_header {
__be16 csum_start_offset;
#if defined(__LITTLE_ENDIAN_BITFIELD)
u16 csum_insert_offset:14;
- u16 udp_ip4_ind:1;
+ u16 udp_ind:1;
u16 csum_enabled:1;
#elif defined (__BIG_ENDIAN_BITFIELD)
u16 csum_enabled:1;
- u16 udp_ip4_ind:1;
+ u16 udp_ind:1;
u16 csum_insert_offset:14;
#else
#error "Please fix <asm/byteorder.h>"
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 781b96ac706f..a0637abffee8 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -155,6 +155,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
+bool has_iova_flush_queue(struct iova_domain *iovad);
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -235,6 +236,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{
}
+static inline bool has_iova_flush_queue(struct iova_domain *iovad)
+{
+ return false;
+}
+
static inline int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb,
iova_entry_dtor entry_dtor)
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 04a569568eac..f049af3f3cd8 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -220,6 +220,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 30d15e80bcc7..da5e7eaed438 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -5998,10 +5998,12 @@ struct mlx5_ifc_modify_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_at_280[0x40];
+ u8 reserved_at_280[0x60];
u8 cq_umem_valid[0x1];
- u8 reserved_at_2c1[0x5bf];
+ u8 reserved_at_2e1[0x1f];
+
+ u8 reserved_at_300[0x580];
u8 pas[0][0x40];
};
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index b2c1648f7e5d..5714fd35a83c 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -814,6 +814,7 @@ struct tee_client_device_id {
/**
* struct wmi_device_id - WMI device identifier
* @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
+ * @context: pointer to driver specific data
*/
struct wmi_device_id {
const char guid_string[UUID_STRING_LEN+1];
diff --git a/include/linux/of.h b/include/linux/of.h
index 0cf857012f11..844f89e1b039 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -1164,7 +1164,7 @@ static inline int of_property_read_string_index(const struct device_node *np,
}
/**
- * of_property_read_bool - Findfrom a property
+ * of_property_read_bool - Find a property
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
*
diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h
index 1dda31825ec4..71283739ffd2 100644
--- a/include/linux/page-flags-layout.h
+++ b/include/linux/page-flags-layout.h
@@ -32,6 +32,7 @@
#endif /* CONFIG_SPARSEMEM */
+#ifndef BUILD_VDSO32_64
/*
* page->flags layout:
*
@@ -76,20 +77,22 @@
#define LAST_CPUPID_SHIFT 0
#endif
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#ifdef CONFIG_KASAN_SW_TAGS
+#define KASAN_TAG_WIDTH 8
+#else
+#define KASAN_TAG_WIDTH 0
+#endif
+
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT+LAST_CPUPID_SHIFT+KASAN_TAG_WIDTH \
+ <= BITS_PER_LONG - NR_PAGEFLAGS
#define LAST_CPUPID_WIDTH LAST_CPUPID_SHIFT
#else
#define LAST_CPUPID_WIDTH 0
#endif
-#ifdef CONFIG_KASAN_SW_TAGS
-#define KASAN_TAG_WIDTH 8
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH+LAST_CPUPID_WIDTH+KASAN_TAG_WIDTH \
> BITS_PER_LONG - NR_PAGEFLAGS
-#error "KASAN: not enough bits in page flags for tag"
-#endif
-#else
-#define KASAN_TAG_WIDTH 0
+#error "Not enough bits in page flags"
#endif
/*
@@ -104,4 +107,5 @@
#define LAST_CPUPID_NOT_IN_PAGE_FLAGS
#endif
+#endif
#endif /* _LINUX_PAGE_FLAGS_LAYOUT */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index b848517da64c..f91cb8898ff0 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -152,6 +152,8 @@ enum pageflags {
PG_savepinned = PG_dirty,
/* Has a grant mapping of another (foreign) domain's page. */
PG_foreign = PG_owner_priv_1,
+ /* Remapped by swiotlb-xen. */
+ PG_xen_remapped = PG_owner_priv_1,
/* SLOB */
PG_slob_free = PG_private,
@@ -329,6 +331,8 @@ PAGEFLAG(Pinned, pinned, PF_NO_COMPOUND)
TESTSCFLAG(Pinned, pinned, PF_NO_COMPOUND)
PAGEFLAG(SavePinned, savepinned, PF_NO_COMPOUND);
PAGEFLAG(Foreign, foreign, PF_NO_COMPOUND);
+PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
+ TESTCLEARFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND)
PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
__CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8dc1811487f5..9f51932bd543 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1092,7 +1092,15 @@ struct task_struct {
u64 last_sum_exec_runtime;
struct callback_head numa_work;
- struct numa_group *numa_group;
+ /*
+ * This pointer is only modified for current in syscall and
+ * pagefault context (and for tasks being destroyed), so it can be read
+ * from any of the following contexts:
+ * - RCU read-side critical section
+ * - current->numa_group from everywhere
+ * - task's runqueue locked, task not running
+ */
+ struct numa_group __rcu *numa_group;
/*
* numa_faults is an array split into four regions:
diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
index e7dd04a84ba8..3988762efe15 100644
--- a/include/linux/sched/numa_balancing.h
+++ b/include/linux/sched/numa_balancing.h
@@ -19,7 +19,7 @@
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
-extern void task_numa_free(struct task_struct *p);
+extern void task_numa_free(struct task_struct *p, bool final);
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
int src_nid, int dst_cpu);
#else
@@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
static inline void set_numabalancing_state(bool enabled)
{
}
-static inline void task_numa_free(struct task_struct *p)
+static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 50ced8aba9db..e4b3fb4bb77c 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -354,7 +354,13 @@ static inline void sk_psock_restore_proto(struct sock *sk,
sk->sk_write_space = psock->saved_write_space;
if (psock->sk_proto) {
- sk->sk_prot = psock->sk_proto;
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ bool has_ulp = !!icsk->icsk_ulp_data;
+
+ if (has_ulp)
+ tcp_update_ulp(sk, psock->sk_proto);
+ else
+ sk->sk_prot = psock->sk_proto;
psock->sk_proto = NULL;
}
}
diff --git a/include/linux/wait.h b/include/linux/wait.h
index b6f77cf60dd7..30c515520fb2 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -127,6 +127,19 @@ static inline int waitqueue_active(struct wait_queue_head *wq_head)
}
/**
+ * wq_has_single_sleeper - check if there is only one sleeper
+ * @wq_head: wait queue head
+ *
+ * Returns true of wq_head has only one sleeper on the list.
+ *
+ * Please refer to the comment for waitqueue_active.
+ */
+static inline bool wq_has_single_sleeper(struct wait_queue_head *wq_head)
+{
+ return list_is_singular(&wq_head->head);
+}
+
+/**
* wq_has_sleeper - check if there are any waiting processes
* @wq_head: wait queue head
*
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 35ec1f0a2bf9..8140c4837122 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -7335,6 +7335,21 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
struct cfg80211_pmsr_request *req,
gfp_t gfp);
+/**
+ * cfg80211_iftype_allowed - check whether the interface can be allowed
+ * @wiphy: the wiphy
+ * @iftype: interface type
+ * @is_4addr: use_4addr flag, must be '0' when check_swif is '1'
+ * @check_swif: check iftype against software interfaces
+ *
+ * Check whether the interface is allowed to operate; additionally, this API
+ * can be used to check iftype against the software interfaces when
+ * check_swif is '1'.
+ */
+bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
+ bool is_4addr, u8 check_swif);
+
+
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* wiphy_printk helpers, similar to dev_printk */
diff --git a/include/net/tc_act/tc_police.h b/include/net/tc_act/tc_police.h
index 8b9ef3664262..cfdc7cb82cad 100644
--- a/include/net/tc_act/tc_police.h
+++ b/include/net/tc_act/tc_police.h
@@ -54,7 +54,7 @@ static inline u64 tcf_police_rate_bytes_ps(const struct tc_action *act)
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
- params = rcu_dereference_bh(police->params);
+ params = rcu_dereference_bh_rtnl(police->params);
return params->rate.rate_bytes_ps;
}
@@ -63,7 +63,7 @@ static inline s64 tcf_police_tcfp_burst(const struct tc_action *act)
struct tcf_police *police = to_police(act);
struct tcf_police_params *params;
- params = rcu_dereference_bh(police->params);
+ params = rcu_dereference_bh_rtnl(police->params);
return params->tcfp_burst;
}
diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h
index 0a559d4b6f0f..b4fce0fae645 100644
--- a/include/net/tc_act/tc_sample.h
+++ b/include/net/tc_act/tc_sample.h
@@ -44,7 +44,7 @@ static inline int tcf_sample_trunc_size(const struct tc_action *a)
static inline struct psample_group *
tcf_sample_psample_group(const struct tc_action *a)
{
- return rcu_dereference(to_sample(a)->psample_group);
+ return rcu_dereference_rtnl(to_sample(a)->psample_group);
}
#endif /* __NET_TC_SAMPLE_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e5cf514ba118..81e8ade1e6e4 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -2108,6 +2108,8 @@ struct tcp_ulp_ops {
/* initialize ulp */
int (*init)(struct sock *sk);
+ /* update ulp */
+ void (*update)(struct sock *sk, struct proto *p);
/* cleanup ulp */
void (*release)(struct sock *sk);
@@ -2119,6 +2121,7 @@ void tcp_unregister_ulp(struct tcp_ulp_ops *type);
int tcp_set_ulp(struct sock *sk, const char *name);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
+void tcp_update_ulp(struct sock *sk, struct proto *p);
#define MODULE_ALIAS_TCP_ULP(name) \
__MODULE_INFO(alias, alias_userspace, name); \
diff --git a/include/net/tls.h b/include/net/tls.h
index 584609174fe0..41b2d41bb1b8 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -107,9 +107,7 @@ struct tls_device {
enum {
TLS_BASE,
TLS_SW,
-#ifdef CONFIG_TLS_DEVICE
TLS_HW,
-#endif
TLS_HW_RECORD,
TLS_NUM_CONFIG,
};
@@ -162,6 +160,7 @@ struct tls_sw_context_tx {
int async_capable;
#define BIT_TX_SCHEDULED 0
+#define BIT_TX_CLOSING 1
unsigned long tx_bitmask;
};
@@ -272,6 +271,8 @@ struct tls_context {
unsigned long flags;
/* cache cold stuff */
+ struct proto *sk_proto;
+
void (*sk_destruct)(struct sock *sk);
void (*sk_proto_close)(struct sock *sk, long timeout);
@@ -355,13 +356,17 @@ int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
+void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx);
+void tls_sw_strparser_done(struct tls_context *tls_ctx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
-void tls_sw_close(struct sock *sk, long timeout);
-void tls_sw_free_resources_tx(struct sock *sk);
+void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
+void tls_sw_release_resources_tx(struct sock *sk);
+void tls_sw_free_ctx_tx(struct tls_context *tls_ctx);
void tls_sw_free_resources_rx(struct sock *sk);
void tls_sw_release_resources_rx(struct sock *sk);
+void tls_sw_free_ctx_rx(struct tls_context *tls_ctx);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
bool tls_sw_stream_read(const struct sock *sk);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index c5f8a9f17063..4f225175cb91 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -2647,7 +2647,9 @@ struct ib_client {
const union ib_gid *gid,
const struct sockaddr *addr,
void *client_data);
- struct list_head list;
+
+ refcount_t uses;
+ struct completion uses_zero;
u32 client_id;
/* kverbs are not required by the client */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 0eeea520a853..e06c77d76463 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve(
/**
* rvt_qp_wqe_unreserve - clean reserved operation
* @qp - the rvt qp
- * @wqe - the send wqe
+ * @flags - send wqe flags
*
* This decrements the reserve use count.
*
@@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve(
* the compiler does not juggle the order of the s_last
* ring index and the decrementing of s_reserved_used.
*/
-static inline void rvt_qp_wqe_unreserve(
- struct rvt_qp *qp,
- struct rvt_swqe *wqe)
+static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
{
- if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
+ if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
atomic_dec(&qp->s_reserved_used);
/* insure no compiler re-order up to s_last change */
smp_mb__after_atomic();
@@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
u32 byte_len, last;
int flags = wqe->wr.send_flags;
+ rvt_qp_wqe_unreserve(qp, flags);
rvt_put_qp_swqe(qp, wqe);
need_completion =
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 2d64b53f947c..9b87e1a1c646 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -115,7 +115,7 @@ struct fc_disc_port {
struct fc_lport *lp;
struct list_head peers;
struct work_struct rport_work;
- u32 port_id;
+ u32 port_id;
};
/**
@@ -155,14 +155,14 @@ struct fc_rport_operations {
*/
struct fc_rport_libfc_priv {
struct fc_lport *local_port;
- enum fc_rport_state rp_state;
+ enum fc_rport_state rp_state;
u16 flags;
#define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
#define FC_RP_FLAGS_RETRY (1 << 1)
#define FC_RP_STARTED (1 << 2)
#define FC_RP_FLAGS_CONF_REQ (1 << 3)
- unsigned int e_d_tov;
- unsigned int r_a_tov;
+ unsigned int e_d_tov;
+ unsigned int r_a_tov;
};
/**
@@ -191,24 +191,24 @@ struct fc_rport_priv {
struct fc_lport *local_port;
struct fc_rport *rport;
struct kref kref;
- enum fc_rport_state rp_state;
+ enum fc_rport_state rp_state;
struct fc_rport_identifiers ids;
u16 flags;
- u16 max_seq;
+ u16 max_seq;
u16 disc_id;
u16 maxframe_size;
- unsigned int retries;
- unsigned int major_retries;
- unsigned int e_d_tov;
- unsigned int r_a_tov;
- struct mutex rp_mutex;
+ unsigned int retries;
+ unsigned int major_retries;
+ unsigned int e_d_tov;
+ unsigned int r_a_tov;
+ struct mutex rp_mutex;
struct delayed_work retry_work;
- enum fc_rport_event event;
+ enum fc_rport_event event;
struct fc_rport_operations *ops;
- struct list_head peers;
- struct work_struct event_work;
+ struct list_head peers;
+ struct work_struct event_work;
u32 supported_classes;
- u16 prli_count;
+ u16 prli_count;
struct rcu_head rcu;
u16 sp_features;
u8 spp_type;
@@ -618,12 +618,12 @@ struct libfc_function_template {
* @disc_callback: Callback routine called when discovery completes
*/
struct fc_disc {
- unsigned char retry_count;
- unsigned char pending;
- unsigned char requested;
- unsigned short seq_count;
- unsigned char buf_len;
- u16 disc_id;
+ unsigned char retry_count;
+ unsigned char pending;
+ unsigned char requested;
+ unsigned short seq_count;
+ unsigned char buf_len;
+ u16 disc_id;
struct list_head rports;
void *priv;
@@ -697,7 +697,7 @@ struct fc_lport {
struct fc_rport_priv *ms_rdata;
struct fc_rport_priv *ptp_rdata;
void *scsi_priv;
- struct fc_disc disc;
+ struct fc_disc disc;
/* Virtual port information */
struct list_head vports;
@@ -715,7 +715,7 @@ struct fc_lport {
u8 retry_count;
/* Fabric information */
- u32 port_id;
+ u32 port_id;
u64 wwpn;
u64 wwnn;
unsigned int service_params;
@@ -743,11 +743,11 @@ struct fc_lport {
struct fc_ns_fts fcts;
/* Miscellaneous */
- struct mutex lp_mutex;
- struct list_head list;
+ struct mutex lp_mutex;
+ struct list_head list;
struct delayed_work retry_work;
void *prov[FC_FC4_PROV_SIZE];
- struct list_head lport_list;
+ struct list_head lport_list;
};
/**
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index c50fb297e265..2568cb0627ec 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -31,7 +31,7 @@
* FIP tunable parameters.
*/
#define FCOE_CTLR_START_DELAY 2000 /* mS after first adv. to choose FCF */
-#define FCOE_CTRL_SOL_TOV 2000 /* min. solicitation interval (mS) */
+#define FCOE_CTLR_SOL_TOV 2000 /* min. solicitation interval (mS) */
#define FCOE_CTLR_FCF_LIMIT 20 /* max. number of FCF entries */
#define FCOE_CTLR_VN2VN_LOGIN_LIMIT 3 /* max. VN2VN rport login retries */
@@ -229,6 +229,7 @@ struct fcoe_fcf {
* @vn_mac: VN_Node assigned MAC address for data
*/
struct fcoe_rport {
+ struct fc_rport_priv rdata;
unsigned long time;
u16 fcoe_len;
u16 flags;
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index 3f9d6b6a5691..c1036d16ed03 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -259,7 +259,7 @@ static inline int qe_alive_during_sleep(void)
/* Structure that defines QE firmware binary files.
*
- * See Documentation/powerpc/qe_firmware.txt for a description of these
+ * See Documentation/powerpc/qe_firmware.rst for a description of these
* fields.
*/
struct qe_firmware {
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index c5188ff724d1..bc88d6f964da 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -173,10 +173,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
if (snd_BUG_ON(!stream))
return;
- if (stream->direction == SND_COMPRESS_PLAYBACK)
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
- else
- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
wake_up(&stream->runtime->sleep);
}
diff --git a/include/sound/sof/control.h b/include/sound/sof/control.h
index bded69e696d4..6080ea0facd7 100644
--- a/include/sound/sof/control.h
+++ b/include/sound/sof/control.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/dai-intel.h b/include/sound/sof/dai-intel.h
index 4bb8ee138ba7..65e4c20e567c 100644
--- a/include/sound/sof/dai-intel.h
+++ b/include/sound/sof/dai-intel.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 3d174e20aa53..5b8de1b1983c 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index 12867bbd4372..10f00c08dbb7 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index 16528d2b4a50..a9156b4a062c 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h
index 8ae3ad45bdf7..003879401d63 100644
--- a/include/sound/sof/pm.h
+++ b/include/sound/sof/pm.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h
index 643f175cb479..0b71b381b952 100644
--- a/include/sound/sof/stream.h
+++ b/include/sound/sof/stream.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index 41dcabf89899..c47b36240920 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/trace.h b/include/sound/sof/trace.h
index 9257d5473d97..fda6e8f6ead4 100644
--- a/include/sound/sof/trace.h
+++ b/include/sound/sof/trace.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/sound/sof/xtensa.h b/include/sound/sof/xtensa.h
index d25c764b10e8..dd53d36b34e1 100644
--- a/include/sound/sof/xtensa.h
+++ b/include/sound/sof/xtensa.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index 2212adda8f77..64e92d56c6a8 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -2,7 +2,7 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM dma_fence
-#if !defined(_TRACE_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_TRACE_DMA_FENCE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_DMA_FENCE_H
#include <linux/tracepoint.h>
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h
index f3a12566bed0..6678cf8b235b 100644
--- a/include/trace/events/napi.h
+++ b/include/trace/events/napi.h
@@ -3,7 +3,7 @@
#define TRACE_SYSTEM napi
#if !defined(_TRACE_NAPI_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_NAPI_H_
+#define _TRACE_NAPI_H
#include <linux/netdevice.h>
#include <linux/tracepoint.h>
@@ -38,7 +38,7 @@ TRACE_EVENT(napi_poll,
#undef NO_DEV
-#endif /* _TRACE_NAPI_H_ */
+#endif /* _TRACE_NAPI_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/include/trace/events/qdisc.h b/include/trace/events/qdisc.h
index 60d0d8bd336d..0d1a9ebf55ba 100644
--- a/include/trace/events/qdisc.h
+++ b/include/trace/events/qdisc.h
@@ -2,7 +2,7 @@
#define TRACE_SYSTEM qdisc
#if !defined(_TRACE_QDISC_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_QDISC_H_
+#define _TRACE_QDISC_H
#include <linux/skbuff.h>
#include <linux/netdevice.h>
@@ -44,7 +44,7 @@ TRACE_EVENT(qdisc_dequeue,
__entry->txq_state, __entry->packets, __entry->skbaddr )
);
-#endif /* _TRACE_QDISC_H_ */
+#endif /* _TRACE_QDISC_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/include/trace/events/tegra_apb_dma.h b/include/trace/events/tegra_apb_dma.h
index 0818f6286110..971cd02d2daf 100644
--- a/include/trace/events/tegra_apb_dma.h
+++ b/include/trace/events/tegra_apb_dma.h
@@ -1,5 +1,5 @@
#if !defined(_TRACE_TEGRA_APB_DMA_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_TEGRA_APM_DMA_H
+#define _TRACE_TEGRA_APB_DMA_H
#include <linux/tracepoint.h>
#include <linux/dmaengine.h>
@@ -55,7 +55,7 @@ TRACE_EVENT(tegra_dma_isr,
TP_printk("%s: irq %d\n", __get_str(chan), __entry->irq)
);
-#endif /* _TRACE_TEGRADMA_H */
+#endif /* _TRACE_TEGRA_APB_DMA_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
diff --git a/include/uapi/linux/bpfilter.h b/include/uapi/linux/bpfilter.h
index 2ec3cc99ea4c..cbc1f5813f50 100644
--- a/include/uapi/linux/bpfilter.h
+++ b/include/uapi/linux/bpfilter.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_LINUX_BPFILTER_H
#define _UAPI_LINUX_BPFILTER_H
diff --git a/include/uapi/linux/ipmi_bmc.h b/include/uapi/linux/ipmi_bmc.h
index 1670f0944227..782a03eb1086 100644
--- a/include/uapi/linux/ipmi_bmc.h
+++ b/include/uapi/linux/ipmi_bmc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Copyright (c) 2015-2018, Intel Corporation.
*/
diff --git a/include/uapi/linux/isst_if.h b/include/uapi/linux/isst_if.h
index d10b832c58c5..0a52b7b093d3 100644
--- a/include/uapi/linux/isst_if.h
+++ b/include/uapi/linux/isst_if.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* Intel Speed Select Interface: OS to hardware Interface
* Copyright (c) 2019, Intel Corporation.
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a7c19540ce21..5e3f12d5359e 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -116,7 +116,7 @@ struct kvm_irq_level {
* ACPI gsi notion of irq.
* For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
* For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
- * For ARM: See Documentation/virtual/kvm/api.txt
+ * For ARM: See Documentation/virt/kvm/api.txt
*/
union {
__u32 irq;
@@ -1086,7 +1086,7 @@ struct kvm_xen_hvm_config {
*
* KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
* the irqfd to operate in resampling mode for level triggered interrupt
- * emulation. See Documentation/virtual/kvm/api.txt.
+ * emulation. See Documentation/virt/kvm/api.txt.
*/
#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
diff --git a/include/uapi/linux/netfilter/nf_synproxy.h b/include/uapi/linux/netfilter/nf_synproxy.h
index 6f3791c8946f..00d787f0260e 100644
--- a/include/uapi/linux/netfilter/nf_synproxy.h
+++ b/include/uapi/linux/netfilter/nf_synproxy.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _NF_SYNPROXY_H
#define _NF_SYNPROXY_H
diff --git a/include/uapi/linux/netfilter/xt_connlabel.h b/include/uapi/linux/netfilter/xt_connlabel.h
index 2312f0ec07b2..323f0dfc2a4e 100644
--- a/include/uapi/linux/netfilter/xt_connlabel.h
+++ b/include/uapi/linux/netfilter/xt_connlabel.h
@@ -1,4 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI_XT_CONNLABEL_H
+#define _UAPI_XT_CONNLABEL_H
+
#include <linux/types.h>
#define XT_CONNLABEL_MAXBIT 127
@@ -11,3 +15,5 @@ struct xt_connlabel_mtinfo {
__u16 bit;
__u16 options;
};
+
+#endif /* _UAPI_XT_CONNLABEL_H */
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 8654b2442f6a..592a0c1b77c9 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* Userspace interface for AMD Secure Encrypted Virtualization (SEV)
* platform management commands.
diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h
index 782069dcf607..4accfa7e266d 100644
--- a/include/uapi/linux/rxrpc.h
+++ b/include/uapi/linux/rxrpc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* SPDX-License-Identifier: GPL-2.0-or-later WITH Linux-syscall-note */
/* Types and definitions for AF_RXRPC.
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 5642c05e0da0..3cc3af1c2ee1 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -150,9 +150,6 @@
#define PORT_PNX8XXX 70
-/* Hilscher netx */
-#define PORT_NETX 71
-
/* SUN4V Hypervisor Console */
#define PORT_SUNHV 72
diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h
index 8eb96021709c..c3409c8ec0dd 100644
--- a/include/uapi/linux/socket.h
+++ b/include/uapi/linux/socket.h
@@ -6,17 +6,24 @@
* Desired design of maximum size and alignment (see RFC2553)
*/
#define _K_SS_MAXSIZE 128 /* Implementation specific max size */
-#define _K_SS_ALIGNSIZE (__alignof__ (struct sockaddr *))
- /* Implementation specific desired alignment */
typedef unsigned short __kernel_sa_family_t;
+/*
+ * The definition uses anonymous union and struct in order to control the
+ * default alignment.
+ */
struct __kernel_sockaddr_storage {
- __kernel_sa_family_t ss_family; /* address family */
- /* Following field(s) are implementation specific */
- char __data[_K_SS_MAXSIZE - sizeof(unsigned short)];
+ union {
+ struct {
+ __kernel_sa_family_t ss_family; /* address family */
+ /* Following field(s) are implementation specific */
+ char __data[_K_SS_MAXSIZE - sizeof(unsigned short)];
/* space to achieve desired size, */
/* _SS_MAXSIZE value minus size of ss_family */
-} __attribute__ ((aligned(_K_SS_ALIGNSIZE))); /* force desired alignment */
+ };
+ void *__align; /* implementation specific desired alignment */
+ };
+};
#endif /* _UAPI_LINUX_SOCKET_H */
diff --git a/include/uapi/linux/usb/g_uvc.h b/include/uapi/linux/usb/g_uvc.h
index 3c9ee3020cbb..652f169a019e 100644
--- a/include/uapi/linux/usb/g_uvc.h
+++ b/include/uapi/linux/usb/g_uvc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
/*
* g_uvc.h -- USB Video Class Gadget driver API
*
diff --git a/include/uapi/linux/vbox_vmmdev_types.h b/include/uapi/linux/vbox_vmmdev_types.h
index 26f39816af14..c27289fd619a 100644
--- a/include/uapi/linux/vbox_vmmdev_types.h
+++ b/include/uapi/linux/vbox_vmmdev_types.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR CDDL-1.0) */
/*
* Virtual Device for Guest <-> VMM/Host communication, type definitions
* which are also used for the vboxguest ioctl interface / by vboxsf
diff --git a/include/uapi/linux/vboxguest.h b/include/uapi/linux/vboxguest.h
index 612f0c7d3558..9cec58a6a5ea 100644
--- a/include/uapi/linux/vboxguest.h
+++ b/include/uapi/linux/vboxguest.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR CDDL-1.0) */
/*
* VBoxGuest - VirtualBox Guest Additions Driver Interface.
*
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
index ba1b460c9944..237e36a280cb 100644
--- a/include/uapi/linux/virtio_iommu.h
+++ b/include/uapi/linux/virtio_iommu.h
@@ -1,8 +1,8 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/*
- * Virtio-iommu definition v0.9
+ * Virtio-iommu definition v0.12
*
- * Copyright (C) 2018 Arm Ltd.
+ * Copyright (C) 2019 Arm Ltd.
*/
#ifndef _UAPI_LINUX_VIRTIO_IOMMU_H
#define _UAPI_LINUX_VIRTIO_IOMMU_H
@@ -11,26 +11,31 @@
/* Feature bits */
#define VIRTIO_IOMMU_F_INPUT_RANGE 0
-#define VIRTIO_IOMMU_F_DOMAIN_BITS 1
+#define VIRTIO_IOMMU_F_DOMAIN_RANGE 1
#define VIRTIO_IOMMU_F_MAP_UNMAP 2
#define VIRTIO_IOMMU_F_BYPASS 3
#define VIRTIO_IOMMU_F_PROBE 4
+#define VIRTIO_IOMMU_F_MMIO 5
-struct virtio_iommu_range {
- __u64 start;
- __u64 end;
+struct virtio_iommu_range_64 {
+ __le64 start;
+ __le64 end;
+};
+
+struct virtio_iommu_range_32 {
+ __le32 start;
+ __le32 end;
};
struct virtio_iommu_config {
/* Supported page sizes */
- __u64 page_size_mask;
+ __le64 page_size_mask;
/* Supported IOVA range */
- struct virtio_iommu_range input_range;
+ struct virtio_iommu_range_64 input_range;
/* Max domain ID size */
- __u8 domain_bits;
- __u8 padding[3];
+ struct virtio_iommu_range_32 domain_range;
/* Probe buffer size */
- __u32 probe_size;
+ __le32 probe_size;
};
/* Request types */
@@ -49,6 +54,7 @@ struct virtio_iommu_config {
#define VIRTIO_IOMMU_S_RANGE 0x05
#define VIRTIO_IOMMU_S_NOENT 0x06
#define VIRTIO_IOMMU_S_FAULT 0x07
+#define VIRTIO_IOMMU_S_NOMEM 0x08
struct virtio_iommu_req_head {
__u8 type;
@@ -78,12 +84,10 @@ struct virtio_iommu_req_detach {
#define VIRTIO_IOMMU_MAP_F_READ (1 << 0)
#define VIRTIO_IOMMU_MAP_F_WRITE (1 << 1)
-#define VIRTIO_IOMMU_MAP_F_EXEC (1 << 2)
-#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 3)
+#define VIRTIO_IOMMU_MAP_F_MMIO (1 << 2)
#define VIRTIO_IOMMU_MAP_F_MASK (VIRTIO_IOMMU_MAP_F_READ | \
VIRTIO_IOMMU_MAP_F_WRITE | \
- VIRTIO_IOMMU_MAP_F_EXEC | \
VIRTIO_IOMMU_MAP_F_MMIO)
struct virtio_iommu_req_map {
diff --git a/include/uapi/linux/virtio_pmem.h b/include/uapi/linux/virtio_pmem.h
index 9a63ed6d062f..b022787ffb94 100644
--- a/include/uapi/linux/virtio_pmem.h
+++ b/include/uapi/linux/virtio_pmem.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */
/*
* Definitions for virtio-pmem devices.
*
diff --git a/include/uapi/linux/vmcore.h b/include/uapi/linux/vmcore.h
index 022619668e0e..3e9da91866ff 100644
--- a/include/uapi/linux/vmcore.h
+++ b/include/uapi/linux/vmcore.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI_VMCORE_H
#define _UAPI_VMCORE_H
diff --git a/include/uapi/linux/wmi.h b/include/uapi/linux/wmi.h
index c36f2d7675a4..7085c5dca9fa 100644
--- a/include/uapi/linux/wmi.h
+++ b/include/uapi/linux/wmi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
* User API methods for ACPI-WMI mapping driver
*
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index 6d701af9fc42..fb792e882cef 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __QCOM_FASTRPC_H__
#define __QCOM_FASTRPC_H__
diff --git a/include/uapi/rdma/rvt-abi.h b/include/uapi/rdma/rvt-abi.h
index 7328293c715c..7c05a02d2be5 100644
--- a/include/uapi/rdma/rvt-abi.h
+++ b/include/uapi/rdma/rvt-abi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
/*
* This file contains defines, structures, etc. that are used
diff --git a/include/uapi/rdma/siw-abi.h b/include/uapi/rdma/siw-abi.h
index 3dd8071ace7b..7de68f1dc707 100644
--- a/include/uapi/rdma/siw-abi.h
+++ b/include/uapi/rdma/siw-abi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) or BSD-3-Clause */
/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
/* Copyright (c) 2008-2019, IBM Corporation */
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index 17c7abd0803a..9988db6ad244 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* UFS Transport SGIO v4 BSG Message Support
*
diff --git a/include/uapi/sound/skl-tplg-interface.h b/include/uapi/sound/skl-tplg-interface.h
index f39352cef382..9eee32f5e407 100644
--- a/include/uapi/sound/skl-tplg-interface.h
+++ b/include/uapi/sound/skl-tplg-interface.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
* skl-tplg-interface.h - Intel DSP FW private data interface
*
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 4969817124a8..98b30c1613b2 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -109,6 +109,9 @@ static inline int xen_xlate_unmap_gfn_range(struct vm_area_struct *vma,
}
#endif
+int xen_remap_vma_range(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long len);
+
/*
* xen_remap_domain_gfn_array() - map an array of foreign frames by gfn
* @vma: VMA to map the pages into
diff --git a/kernel/Makefile b/kernel/Makefile
index a8d923b5481b..ef0d95a190b4 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -111,7 +111,6 @@ obj-$(CONFIG_CONTEXT_TRACKING) += context_tracking.o
obj-$(CONFIG_TORTURE_TEST) += torture.o
obj-$(CONFIG_HAS_IOMEM) += iomem.o
-obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_RSEQ) += rseq.o
obj-$(CONFIG_GCC_PLUGIN_STACKLEAK) += stackleak.o
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 5900cbb966b1..c84d83f86141 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -8616,8 +8616,8 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
}
if (is_narrower_load && size < target_size) {
- u8 shift = (off & (size_default - 1)) * 8;
-
+ u8 shift = bpf_ctx_narrow_load_shift(off, size,
+ size_default);
if (ctx_field_size <= 4) {
if (shift)
insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
diff --git a/kernel/cred.c b/kernel/cred.c
index f9a0ce66c9c3..c0a4c12d38b2 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -144,7 +144,10 @@ void __put_cred(struct cred *cred)
BUG_ON(cred == current->cred);
BUG_ON(cred == current->real_cred);
- call_rcu(&cred->rcu, put_cred_rcu);
+ if (cred->non_rcu)
+ put_cred_rcu(&cred->rcu);
+ else
+ call_rcu(&cred->rcu, put_cred_rcu);
}
EXPORT_SYMBOL(__put_cred);
@@ -261,6 +264,7 @@ struct cred *prepare_creds(void)
old = task->cred;
memcpy(new, old, sizeof(struct cred));
+ new->non_rcu = 0;
atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_group_info(new->group_info);
@@ -544,7 +548,19 @@ const struct cred *override_creds(const struct cred *new)
validate_creds(old);
validate_creds(new);
- get_cred(new);
+
+ /*
+ * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
+ *
+ * That means that we do not clear the 'non_rcu' flag, since
+ * we are only installing the cred into the thread-synchronous
+ * '->cred' pointer, not the '->real_cred' pointer that is
+ * visible to other threads under RCU.
+ *
+ * Also note that we did validate_creds() manually, not depending
+ * on the validation in 'get_cred()'.
+ */
+ get_new_cred((struct cred *)new);
alter_cred_subscribers(new, 1);
rcu_assign_pointer(current->cred, new);
alter_cred_subscribers(old, -1);
@@ -681,6 +697,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
validate_creds(old);
*new = *old;
+ new->non_rcu = 0;
atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_uid(new->user);
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index bfc0c17f2a3d..2bd410f934b3 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -243,8 +243,9 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
/* CMA can be used only in the context which permits sleeping */
if (cma && gfpflags_allow_blocking(gfp)) {
- align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
- page = cma_alloc(cma, count, align, gfp & __GFP_NOWARN);
+ size_t cma_align = min_t(size_t, align, CONFIG_CMA_ALIGNMENT);
+
+ page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN);
}
/* Fallback allocation of normal pages */
@@ -266,7 +267,8 @@ struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
*/
void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
{
- if (!cma_release(dev_get_cma_area(dev), page, size >> PAGE_SHIFT))
+ if (!cma_release(dev_get_cma_area(dev), page,
+ PAGE_ALIGN(size) >> PAGE_SHIFT))
__free_pages(page, get_order(size));
}
diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c
index 1f628e7ac709..b945239621d8 100644
--- a/kernel/dma/mapping.c
+++ b/kernel/dma/mapping.c
@@ -116,11 +116,16 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
int ret;
if (!dev_is_dma_coherent(dev)) {
+ unsigned long pfn;
+
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
return -ENXIO;
- page = pfn_to_page(arch_dma_coherent_to_pfn(dev, cpu_addr,
- dma_addr));
+ /* If the PFN is not valid, we do not have a struct page */
+ pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
+ if (!pfn_valid(pfn))
+ return -ENXIO;
+ page = pfn_to_page(pfn);
} else {
page = virt_to_page(cpu_addr);
}
@@ -170,7 +175,11 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
if (!dev_is_dma_coherent(dev)) {
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
return -ENXIO;
+
+ /* If the PFN is not valid, we do not have a struct page */
pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
+ if (!pfn_valid(pfn))
+ return -ENXIO;
} else {
pfn = page_to_pfn(virt_to_page(cpu_addr));
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 026a14541a38..0463c1151bae 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11274,7 +11274,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
goto err_unlock;
}
- perf_install_in_context(ctx, event, cpu);
+ perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
diff --git a/kernel/exit.c b/kernel/exit.c
index 4436158a6d30..5b4a5dcce8f8 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -734,9 +734,10 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
autoreap = true;
}
- tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
- if (tsk->exit_state == EXIT_DEAD)
+ if (autoreap) {
+ tsk->exit_state = EXIT_DEAD;
list_add(&tsk->ptrace_entry, &dead);
+ }
/* mt-exec, de_thread() is waiting for group leader */
if (unlikely(tsk->signal->notify_count < 0))
diff --git a/kernel/fork.c b/kernel/fork.c
index d8ae0f1b4148..2852d0e76ea3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -726,7 +726,7 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(tsk == current);
cgroup_free(tsk);
- task_numa_free(tsk);
+ task_numa_free(tsk, true);
security_task_free(tsk);
exit_creds(tsk);
delayacct_tsk_free(tsk);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 341f52117f88..4861cf8e274b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -448,7 +448,7 @@ static void print_lockdep_off(const char *bug_msg)
unsigned long nr_stack_trace_entries;
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+#ifdef CONFIG_PROVE_LOCKING
/*
* Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the graph_lock.
@@ -491,7 +491,7 @@ unsigned int max_lockdep_depth;
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
#endif
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+#ifdef CONFIG_PROVE_LOCKING
/*
* Locking printouts:
*/
@@ -2969,7 +2969,7 @@ static void check_chain_key(struct task_struct *curr)
#endif
}
-#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
+#ifdef CONFIG_PROVE_LOCKING
static int mark_lock(struct task_struct *curr, struct held_lock *this,
enum lock_usage_bit new_bit);
@@ -3608,7 +3608,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
return ret;
}
-#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
+#else /* CONFIG_PROVE_LOCKING */
static inline int
mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
@@ -3627,7 +3627,7 @@ static inline int separate_irq_context(struct task_struct *curr,
return 0;
}
-#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
+#endif /* CONFIG_PROVE_LOCKING */
/*
* Initialize a lock instance's lock-class mapping info:
@@ -4321,8 +4321,7 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
*/
static void check_flags(unsigned long flags)
{
-#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
- defined(CONFIG_TRACE_IRQFLAGS)
+#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
if (!debug_locks)
return;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 65b6a1600c8f..bda006f8a88b 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
static int lockdep_stats_show(struct seq_file *m, void *v)
{
- struct lock_class *class;
unsigned long nr_unused = 0, nr_uncategorized = 0,
nr_irq_safe = 0, nr_irq_unsafe = 0,
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
@@ -211,6 +210,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
sum_forward_deps = 0;
#ifdef CONFIG_PROVE_LOCKING
+ struct lock_class *class;
+
list_for_each_entry(class, &all_lock_classes, lock_entry) {
if (class->usage_mask == 0)
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index edd1c082dbf5..5e069734363c 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -908,6 +908,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
might_sleep();
+#ifdef CONFIG_DEBUG_MUTEXES
+ DEBUG_LOCKS_WARN_ON(lock->magic != lock);
+#endif
+
ww = container_of(lock, struct ww_mutex, base);
if (use_ww_ctx && ww_ctx) {
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
@@ -1379,8 +1383,13 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
*/
int __sched mutex_trylock(struct mutex *lock)
{
- bool locked = __mutex_trylock(lock);
+ bool locked;
+
+#ifdef CONFIG_DEBUG_MUTEXES
+ DEBUG_LOCKS_WARN_ON(lock->magic != lock);
+#endif
+ locked = __mutex_trylock(lock);
if (locked)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 37524a47f002..bd0f0d05724c 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -666,7 +666,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
preempt_disable();
rcu_read_lock();
owner = rwsem_owner_flags(sem, &flags);
- if ((flags & nonspinnable) || (owner && !owner_on_cpu(owner)))
+ /*
+ * Don't check the read-owner as the entry may be stale.
+ */
+ if ((flags & nonspinnable) ||
+ (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
ret = false;
rcu_read_unlock();
preempt_enable();
@@ -1000,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
adjustment = 0;
if (rwsem_optimistic_spin(sem, false)) {
+ /* rwsem_optimistic_spin() implies ACQUIRE on success */
/*
* Wake up other readers in the wait list if the front
* waiter is a reader.
@@ -1014,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
}
return sem;
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
+ /* rwsem_reader_phase_trylock() implies ACQUIRE on success */
return sem;
}
@@ -1032,6 +1038,8 @@ queue:
*/
if (adjustment && !(atomic_long_read(&sem->count) &
(RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
+ /* Provide lock ACQUIRE */
+ smp_acquire__after_ctrl_dep();
raw_spin_unlock_irq(&sem->wait_lock);
rwsem_set_reader_owned(sem);
lockevent_inc(rwsem_rlock_fast);
@@ -1065,15 +1073,18 @@ queue:
wake_up_q(&wake_q);
/* wait to be given the lock */
- while (true) {
+ for (;;) {
set_current_state(state);
- if (!waiter.task)
+ if (!smp_load_acquire(&waiter.task)) {
+ /* Matches rwsem_mark_wake()'s smp_store_release(). */
break;
+ }
if (signal_pending_state(state, current)) {
raw_spin_lock_irq(&sem->wait_lock);
if (waiter.task)
goto out_nolock;
raw_spin_unlock_irq(&sem->wait_lock);
+ /* Ordered by sem->wait_lock against rwsem_mark_wake(). */
break;
}
schedule();
@@ -1083,6 +1094,7 @@ queue:
__set_current_state(TASK_RUNNING);
lockevent_inc(rwsem_rlock);
return sem;
+
out_nolock:
list_del(&waiter.list);
if (list_empty(&sem->wait_list)) {
@@ -1123,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
/* do optimistic spinning and steal lock if possible */
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
- rwsem_optimistic_spin(sem, true))
+ rwsem_optimistic_spin(sem, true)) {
+ /* rwsem_optimistic_spin() implies ACQUIRE on success */
return sem;
+ }
/*
* Disable reader optimistic spinning for this rwsem after
@@ -1184,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
wait:
/* wait until we successfully acquire the lock */
set_current_state(state);
- while (true) {
- if (rwsem_try_write_lock(sem, wstate))
+ for (;;) {
+ if (rwsem_try_write_lock(sem, wstate)) {
+ /* rwsem_try_write_lock() implies ACQUIRE on success */
break;
+ }
raw_spin_unlock_irq(&sem->wait_lock);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 036be95a87e9..bc9cfeaac8bd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1086,6 +1086,21 @@ struct numa_group {
unsigned long faults[0];
};
+/*
+ * For functions that can be called in multiple contexts that permit reading
+ * ->numa_group (see struct task_struct for locking rules).
+ */
+static struct numa_group *deref_task_numa_group(struct task_struct *p)
+{
+ return rcu_dereference_check(p->numa_group, p == current ||
+ (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
+}
+
+static struct numa_group *deref_curr_numa_group(struct task_struct *p)
+{
+ return rcu_dereference_protected(p->numa_group, p == current);
+}
+
static inline unsigned long group_faults_priv(struct numa_group *ng);
static inline unsigned long group_faults_shared(struct numa_group *ng);
@@ -1129,10 +1144,12 @@ static unsigned int task_scan_start(struct task_struct *p)
{
unsigned long smin = task_scan_min(p);
unsigned long period = smin;
+ struct numa_group *ng;
/* Scale the maximum scan period with the amount of shared memory. */
- if (p->numa_group) {
- struct numa_group *ng = p->numa_group;
+ rcu_read_lock();
+ ng = rcu_dereference(p->numa_group);
+ if (ng) {
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
@@ -1140,6 +1157,7 @@ static unsigned int task_scan_start(struct task_struct *p)
period *= shared + 1;
period /= private + shared + 1;
}
+ rcu_read_unlock();
return max(smin, period);
}
@@ -1148,13 +1166,14 @@ static unsigned int task_scan_max(struct task_struct *p)
{
unsigned long smin = task_scan_min(p);
unsigned long smax;
+ struct numa_group *ng;
/* Watch for min being lower than max due to floor calculations */
smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
/* Scale the maximum scan period with the amount of shared memory. */
- if (p->numa_group) {
- struct numa_group *ng = p->numa_group;
+ ng = deref_curr_numa_group(p);
+ if (ng) {
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
unsigned long period = smax;
@@ -1186,7 +1205,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
p->numa_scan_period = sysctl_numa_balancing_scan_delay;
p->numa_work.next = &p->numa_work;
p->numa_faults = NULL;
- p->numa_group = NULL;
+ RCU_INIT_POINTER(p->numa_group, NULL);
p->last_task_numa_placement = 0;
p->last_sum_exec_runtime = 0;
@@ -1233,7 +1252,16 @@ static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
pid_t task_numa_group_id(struct task_struct *p)
{
- return p->numa_group ? p->numa_group->gid : 0;
+ struct numa_group *ng;
+ pid_t gid = 0;
+
+ rcu_read_lock();
+ ng = rcu_dereference(p->numa_group);
+ if (ng)
+ gid = ng->gid;
+ rcu_read_unlock();
+
+ return gid;
}
/*
@@ -1258,11 +1286,13 @@ static inline unsigned long task_faults(struct task_struct *p, int nid)
static inline unsigned long group_faults(struct task_struct *p, int nid)
{
- if (!p->numa_group)
+ struct numa_group *ng = deref_task_numa_group(p);
+
+ if (!ng)
return 0;
- return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
- p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
+ return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
+ ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
}
static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
@@ -1400,12 +1430,13 @@ static inline unsigned long task_weight(struct task_struct *p, int nid,
static inline unsigned long group_weight(struct task_struct *p, int nid,
int dist)
{
+ struct numa_group *ng = deref_task_numa_group(p);
unsigned long faults, total_faults;
- if (!p->numa_group)
+ if (!ng)
return 0;
- total_faults = p->numa_group->total_faults;
+ total_faults = ng->total_faults;
if (!total_faults)
return 0;
@@ -1419,7 +1450,7 @@ static inline unsigned long group_weight(struct task_struct *p, int nid,
bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
int src_nid, int dst_cpu)
{
- struct numa_group *ng = p->numa_group;
+ struct numa_group *ng = deref_curr_numa_group(p);
int dst_nid = cpu_to_node(dst_cpu);
int last_cpupid, this_cpupid;
@@ -1600,13 +1631,14 @@ static bool load_too_imbalanced(long src_load, long dst_load,
static void task_numa_compare(struct task_numa_env *env,
long taskimp, long groupimp, bool maymove)
{
+ struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
struct rq *dst_rq = cpu_rq(env->dst_cpu);
+ long imp = p_ng ? groupimp : taskimp;
struct task_struct *cur;
long src_load, dst_load;
- long load;
- long imp = env->p->numa_group ? groupimp : taskimp;
- long moveimp = imp;
int dist = env->dist;
+ long moveimp = imp;
+ long load;
if (READ_ONCE(dst_rq->numa_migrate_on))
return;
@@ -1645,21 +1677,22 @@ static void task_numa_compare(struct task_numa_env *env,
* If dst and source tasks are in the same NUMA group, or not
* in any group then look only at task weights.
*/
- if (cur->numa_group == env->p->numa_group) {
+ cur_ng = rcu_dereference(cur->numa_group);
+ if (cur_ng == p_ng) {
imp = taskimp + task_weight(cur, env->src_nid, dist) -
task_weight(cur, env->dst_nid, dist);
/*
* Add some hysteresis to prevent swapping the
* tasks within a group over tiny differences.
*/
- if (cur->numa_group)
+ if (cur_ng)
imp -= imp / 16;
} else {
/*
* Compare the group weights. If a task is all by itself
* (not part of a group), use the task weight instead.
*/
- if (cur->numa_group && env->p->numa_group)
+ if (cur_ng && p_ng)
imp += group_weight(cur, env->src_nid, dist) -
group_weight(cur, env->dst_nid, dist);
else
@@ -1757,11 +1790,12 @@ static int task_numa_migrate(struct task_struct *p)
.best_imp = 0,
.best_cpu = -1,
};
+ unsigned long taskweight, groupweight;
struct sched_domain *sd;
+ long taskimp, groupimp;
+ struct numa_group *ng;
struct rq *best_rq;
- unsigned long taskweight, groupweight;
int nid, ret, dist;
- long taskimp, groupimp;
/*
* Pick the lowest SD_NUMA domain, as that would have the smallest
@@ -1807,7 +1841,8 @@ static int task_numa_migrate(struct task_struct *p)
* multiple NUMA nodes; in order to better consolidate the group,
* we need to check other locations.
*/
- if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
+ ng = deref_curr_numa_group(p);
+ if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
for_each_online_node(nid) {
if (nid == env.src_nid || nid == p->numa_preferred_nid)
continue;
@@ -1840,7 +1875,7 @@ static int task_numa_migrate(struct task_struct *p)
* A task that migrated to a second choice node will be better off
* trying for a better one later. Do not set the preferred node here.
*/
- if (p->numa_group) {
+ if (ng) {
if (env.best_cpu == -1)
nid = env.src_nid;
else
@@ -2135,6 +2170,7 @@ static void task_numa_placement(struct task_struct *p)
unsigned long total_faults;
u64 runtime, period;
spinlock_t *group_lock = NULL;
+ struct numa_group *ng;
/*
* The p->mm->numa_scan_seq field gets updated without
@@ -2152,8 +2188,9 @@ static void task_numa_placement(struct task_struct *p)
runtime = numa_get_avg_runtime(p, &period);
/* If the task is part of a group prevent parallel updates to group stats */
- if (p->numa_group) {
- group_lock = &p->numa_group->lock;
+ ng = deref_curr_numa_group(p);
+ if (ng) {
+ group_lock = &ng->lock;
spin_lock_irq(group_lock);
}
@@ -2194,7 +2231,7 @@ static void task_numa_placement(struct task_struct *p)
p->numa_faults[cpu_idx] += f_diff;
faults += p->numa_faults[mem_idx];
p->total_numa_faults += diff;
- if (p->numa_group) {
+ if (ng) {
/*
* safe because we can only change our own group
*
@@ -2202,14 +2239,14 @@ static void task_numa_placement(struct task_struct *p)
* nid and priv in a specific region because it
* is at the beginning of the numa_faults array.
*/
- p->numa_group->faults[mem_idx] += diff;
- p->numa_group->faults_cpu[mem_idx] += f_diff;
- p->numa_group->total_faults += diff;
- group_faults += p->numa_group->faults[mem_idx];
+ ng->faults[mem_idx] += diff;
+ ng->faults_cpu[mem_idx] += f_diff;
+ ng->total_faults += diff;
+ group_faults += ng->faults[mem_idx];
}
}
- if (!p->numa_group) {
+ if (!ng) {
if (faults > max_faults) {
max_faults = faults;
max_nid = nid;
@@ -2220,8 +2257,8 @@ static void task_numa_placement(struct task_struct *p)
}
}
- if (p->numa_group) {
- numa_group_count_active_nodes(p->numa_group);
+ if (ng) {
+ numa_group_count_active_nodes(ng);
spin_unlock_irq(group_lock);
max_nid = preferred_group_nid(p, max_nid);
}
@@ -2255,7 +2292,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
int cpu = cpupid_to_cpu(cpupid);
int i;
- if (unlikely(!p->numa_group)) {
+ if (unlikely(!deref_curr_numa_group(p))) {
unsigned int size = sizeof(struct numa_group) +
4*nr_node_ids*sizeof(unsigned long);
@@ -2291,7 +2328,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
if (!grp)
goto no_join;
- my_grp = p->numa_group;
+ my_grp = deref_curr_numa_group(p);
if (grp == my_grp)
goto no_join;
@@ -2353,13 +2390,24 @@ no_join:
return;
}
-void task_numa_free(struct task_struct *p)
+/*
+ * Get rid of NUMA staticstics associated with a task (either current or dead).
+ * If @final is set, the task is dead and has reached refcount zero, so we can
+ * safely free all relevant data structures. Otherwise, there might be
+ * concurrent reads from places like load balancing and procfs, and we should
+ * reset the data back to default state without freeing ->numa_faults.
+ */
+void task_numa_free(struct task_struct *p, bool final)
{
- struct numa_group *grp = p->numa_group;
- void *numa_faults = p->numa_faults;
+ /* safe: p either is current or is being freed by current */
+ struct numa_group *grp = rcu_dereference_raw(p->numa_group);
+ unsigned long *numa_faults = p->numa_faults;
unsigned long flags;
int i;
+ if (!numa_faults)
+ return;
+
if (grp) {
spin_lock_irqsave(&grp->lock, flags);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
@@ -2372,8 +2420,14 @@ void task_numa_free(struct task_struct *p)
put_numa_group(grp);
}
- p->numa_faults = NULL;
- kfree(numa_faults);
+ if (final) {
+ p->numa_faults = NULL;
+ kfree(numa_faults);
+ } else {
+ p->total_numa_faults = 0;
+ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+ numa_faults[i] = 0;
+ }
}
/*
@@ -2426,7 +2480,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
* actively using should be counted as local. This allows the
* scan rate to slow down when a workload has settled down.
*/
- ng = p->numa_group;
+ ng = deref_curr_numa_group(p);
if (!priv && !local && ng && ng->active_nodes > 1 &&
numa_is_active_node(cpu_node, ng) &&
numa_is_active_node(mem_node, ng))
@@ -10444,18 +10498,22 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
{
int node;
unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
+ struct numa_group *ng;
+ rcu_read_lock();
+ ng = rcu_dereference(p->numa_group);
for_each_online_node(node) {
if (p->numa_faults) {
tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
}
- if (p->numa_group) {
- gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
- gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
+ if (ng) {
+ gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
+ gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
}
print_numa_stats(m, node, tsf, tpf, gsf, gpf);
}
+ rcu_read_unlock();
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_SCHED_DEBUG */
diff --git a/kernel/signal.c b/kernel/signal.c
index 91b789dd6e72..e667be6907d7 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -349,7 +349,7 @@ void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
* @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
* Group stop states are cleared and the group stop count is consumed if
* %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
- * stop, the appropriate %SIGNAL_* flags are set.
+ * stop, the appropriate `SIGNAL_*` flags are set.
*
* CONTEXT:
* Must be called with @task->sighand->siglock held.
@@ -1885,6 +1885,7 @@ static void do_notify_pidfd(struct task_struct *task)
{
struct pid *pid;
+ WARN_ON(task->exit_state == 0);
pid = task_pid(task);
wake_up_all(&pid->wait_pidfd);
}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 69ebf3c2f1b5..78af97163147 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -137,6 +137,13 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
return 0;
+ /*
+ * Do not trace a function if it's filtered by set_graph_notrace.
+ * Make the index of ret stack negative to indicate that it should
+ * ignore further functions. But it needs its own ret stack entry
+ * to recover the original index in order to continue tracing after
+ * returning from the function.
+ */
if (ftrace_graph_notrace_addr(trace->func)) {
trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
/*
@@ -156,16 +163,6 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
return 0;
/*
- * Do not trace a function if it's filtered by set_graph_notrace.
- * Make the index of ret stack negative to indicate that it should
- * ignore further functions. But it needs its own ret stack entry
- * to recover the original index in order to continue tracing after
- * returning from the function.
- */
- if (ftrace_graph_notrace_addr(trace->func))
- return 1;
-
- /*
* Stop here if tracing_threshold is set. We only write function return
* events to the ring buffer.
*/
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fafba1a923b..7fa97a8b5717 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -106,7 +106,6 @@ endchoice
config KASAN_STACK_ENABLE
bool "Enable stack instrumentation (unsafe)" if CC_IS_CLANG && !COMPILE_TEST
- default !(CLANG_VERSION < 90000)
depends on KASAN
help
The LLVM stack address sanitizer has a know problem that
@@ -115,11 +114,11 @@ config KASAN_STACK_ENABLE
Disabling asan-stack makes it safe to run kernels build
with clang-8 with KASAN enabled, though it loses some of
the functionality.
- This feature is always disabled when compile-testing with clang-8
- or earlier to avoid cluttering the output in stack overflow
- warnings, but clang-8 users can still enable it for builds without
- CONFIG_COMPILE_TEST. On gcc and later clang versions it is
- assumed to always be safe to use and enabled by default.
+ This feature is always disabled when compile-testing with clang
+ to avoid cluttering the output in stack overflow warnings,
+ but clang users can still enable it for builds without
+ CONFIG_COMPILE_TEST. On gcc it is assumed to always be safe
+ to use and enabled by default.
config KASAN_STACK
int
diff --git a/lib/Makefile b/lib/Makefile
index 095601ce371d..29c02a924973 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -279,7 +279,8 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o
obj-$(CONFIG_UBSAN) += ubsan.o
UBSAN_SANITIZE_ubsan.o := n
-CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+KASAN_SANITIZE_ubsan.o := n
+CFLAGS_ubsan.o := $(call cc-option, -fno-stack-protector) $(DISABLE_STACKLEAK_PLUGIN)
obj-$(CONFIG_SBITMAP) += sbitmap.o
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
index 439d641ec796..38045d6d0538 100644
--- a/lib/dim/dim.c
+++ b/lib/dim/dim.c
@@ -74,8 +74,8 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
delta_us);
curr_stats->cpms = DIV_ROUND_UP(ncomps * USEC_PER_MSEC, delta_us);
if (curr_stats->epms != 0)
- curr_stats->cpe_ratio =
- (curr_stats->cpms * 100) / curr_stats->epms;
+ curr_stats->cpe_ratio = DIV_ROUND_DOWN_ULL(
+ curr_stats->cpms * 100, curr_stats->epms);
else
curr_stats->cpe_ratio = 0;
diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c
index 5bcc902c5388..a4db51c21266 100644
--- a/lib/dim/net_dim.c
+++ b/lib/dim/net_dim.c
@@ -5,6 +5,62 @@
#include <linux/dim.h>
+/*
+ * Net DIM profiles:
+ * There are different set of profiles for each CQ period mode.
+ * There are different set of profiles for RX/TX CQs.
+ * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES
+ */
+#define NET_DIM_PARAMS_NUM_PROFILES 5
+#define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE 128
+#define NET_DIM_DEF_PROFILE_CQE 1
+#define NET_DIM_DEF_PROFILE_EQE 1
+
+#define NET_DIM_RX_EQE_PROFILES { \
+ {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define NET_DIM_RX_CQE_PROFILES { \
+ {2, 256}, \
+ {8, 128}, \
+ {16, 64}, \
+ {32, 64}, \
+ {64, 64} \
+}
+
+#define NET_DIM_TX_EQE_PROFILES { \
+ {1, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {32, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, NET_DIM_DEFAULT_TX_CQ_MODERATION_PKTS_FROM_EQE} \
+}
+
+#define NET_DIM_TX_CQE_PROFILES { \
+ {5, 128}, \
+ {8, 64}, \
+ {16, 32}, \
+ {32, 32}, \
+ {64, 32} \
+}
+
+static const struct dim_cq_moder
+rx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_RX_EQE_PROFILES,
+ NET_DIM_RX_CQE_PROFILES,
+};
+
+static const struct dim_cq_moder
+tx_profile[DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
+ NET_DIM_TX_EQE_PROFILES,
+ NET_DIM_TX_CQE_PROFILES,
+};
+
struct dim_cq_moder
net_dim_get_rx_moderation(u8 cq_period_mode, int ix)
{
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 42695bc8d451..0083b5cc646c 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -66,7 +66,7 @@ CFLAGS_vpermxor1.o += $(altivec_flags)
CFLAGS_vpermxor2.o += $(altivec_flags)
CFLAGS_vpermxor4.o += $(altivec_flags)
CFLAGS_vpermxor8.o += $(altivec_flags)
-targets += vpermxor1.o vpermxor2.o vpermxor4.o vpermxor8.o
+targets += vpermxor1.c vpermxor2.c vpermxor4.c vpermxor8.c
$(obj)/vpermxor%.c: $(src)/vpermxor.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 83ea6c4e623c..6ca97a63b3d6 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -886,8 +886,11 @@ static int __init test_firmware_init(void)
return -ENOMEM;
rc = __test_firmware_config_init();
- if (rc)
+ if (rc) {
+ kfree(test_fw_config);
+ pr_err("could not init firmware test config: %d\n", rc);
return rc;
+ }
rc = misc_register(&test_fw_misc_device);
if (rc) {
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 62d19f270cad..9729f271d150 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -222,7 +222,7 @@ static int __init do_kmem_cache_size(size_t size, bool want_ctor,
* Copy the buffer to check that it's not wiped on
* free().
*/
- buf_copy = kmalloc(size, GFP_KERNEL);
+ buf_copy = kmalloc(size, GFP_ATOMIC);
if (buf_copy)
memcpy(buf_copy, buf, size);
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 2d1c1f241fd9..e630e7ff57f1 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -51,7 +51,7 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
ns = vdso_ts->nsec;
last = vd->cycle_last;
if (unlikely((s64)cycles < 0))
- return clock_gettime_fallback(clk, ts);
+ return -1;
ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
ns >>= vd->shift;
@@ -82,14 +82,14 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk,
}
static __maybe_unused int
-__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+__cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
{
const struct vdso_data *vd = __arch_get_vdso_data();
u32 msk;
/* Check for negative values or invalid clocks */
if (unlikely((u32) clock >= MAX_CLOCKS))
- goto fallback;
+ return -1;
/*
* Convert the clockid to a bitmask and use it to check which
@@ -104,9 +104,17 @@ __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
} else if (msk & VDSO_RAW) {
return do_hres(&vd[CS_RAW], clock, ts);
}
+ return -1;
+}
+
+static __maybe_unused int
+__cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ int ret = __cvdso_clock_gettime_common(clock, ts);
-fallback:
- return clock_gettime_fallback(clock, ts);
+ if (unlikely(ret))
+ return clock_gettime_fallback(clock, ts);
+ return 0;
}
static __maybe_unused int
@@ -115,20 +123,21 @@ __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
struct __kernel_timespec ts;
int ret;
- if (res == NULL)
- goto fallback;
+ ret = __cvdso_clock_gettime_common(clock, &ts);
- ret = __cvdso_clock_gettime(clock, &ts);
+#ifdef VDSO_HAS_32BIT_FALLBACK
+ if (unlikely(ret))
+ return clock_gettime32_fallback(clock, res);
+#else
+ if (unlikely(ret))
+ ret = clock_gettime_fallback(clock, &ts);
+#endif
- if (ret == 0) {
+ if (likely(!ret)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
-
return ret;
-
-fallback:
- return clock_gettime_fallback(clock, (struct __kernel_timespec *)res);
}
static __maybe_unused int
@@ -169,17 +178,18 @@ static __maybe_unused time_t __cvdso_time(time_t *time)
#ifdef VDSO_HAS_CLOCK_GETRES
static __maybe_unused
-int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
{
const struct vdso_data *vd = __arch_get_vdso_data();
- u64 ns;
+ u64 hrtimer_res;
u32 msk;
- u64 hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
+ u64 ns;
/* Check for negative values or invalid clocks */
if (unlikely((u32) clock >= MAX_CLOCKS))
- goto fallback;
+ return -1;
+ hrtimer_res = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
/*
* Convert the clockid to a bitmask and use it to check which
* clocks are handled in the VDSO directly.
@@ -201,18 +211,22 @@ int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
*/
ns = hrtimer_res;
} else {
- goto fallback;
+ return -1;
}
- if (res) {
- res->tv_sec = 0;
- res->tv_nsec = ns;
- }
+ res->tv_sec = 0;
+ res->tv_nsec = ns;
return 0;
+}
+
+int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
+{
+ int ret = __cvdso_clock_getres_common(clock, res);
-fallback:
- return clock_getres_fallback(clock, res);
+ if (unlikely(ret))
+ return clock_getres_fallback(clock, res);
+ return 0;
}
static __maybe_unused int
@@ -221,19 +235,20 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
struct __kernel_timespec ts;
int ret;
- if (res == NULL)
- goto fallback;
+ ret = __cvdso_clock_getres_common(clock, &ts);
- ret = __cvdso_clock_getres(clock, &ts);
+#ifdef VDSO_HAS_32BIT_FALLBACK
+ if (unlikely(ret))
+ return clock_getres32_fallback(clock, res);
+#else
+ if (unlikely(ret))
+ ret = clock_getres_fallback(clock, &ts);
+#endif
- if (ret == 0) {
+ if (likely(!ret)) {
res->tv_sec = ts.tv_sec;
res->tv_nsec = ts.tv_nsec;
}
-
return ret;
-
-fallback:
- return clock_getres_fallback(clock, (struct __kernel_timespec *)res);
}
#endif /* VDSO_HAS_CLOCK_GETRES */
diff --git a/mm/Makefile b/mm/Makefile
index 338e528ad436..d0b295c3b764 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -102,5 +102,6 @@ obj-$(CONFIG_FRAME_VECTOR) += frame_vector.o
obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o
obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o
obj-$(CONFIG_PERCPU_STATS) += percpu-stats.o
+obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_HMM_MIRROR) += hmm.o
obj-$(CONFIG_MEMFD_CREATE) += memfd.o
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 83a7b614061f..798275a51887 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -21,7 +21,6 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
* memory corruption is possible and we should stop execution.
*/
BUG_ON(!trylock_page(page));
- list_del(&page->lru);
balloon_page_insert(b_dev_info, page);
unlock_page(page);
__count_vm_event(BALLOON_INFLATE);
@@ -33,8 +32,8 @@ static void balloon_page_enqueue_one(struct balloon_dev_info *b_dev_info,
* @b_dev_info: balloon device descriptor where we will insert a new page to
* @pages: pages to enqueue - allocated using balloon_page_alloc.
*
- * Driver must call it to properly enqueue a balloon pages before definitively
- * removing it from the guest system.
+ * Driver must call this function to properly enqueue balloon pages before
+ * definitively removing them from the guest system.
*
* Return: number of pages that were enqueued.
*/
@@ -47,6 +46,7 @@ size_t balloon_page_list_enqueue(struct balloon_dev_info *b_dev_info,
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
list_for_each_entry_safe(page, tmp, pages, lru) {
+ list_del(&page->lru);
balloon_page_enqueue_one(b_dev_info, page);
n_pages++;
}
@@ -63,12 +63,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_enqueue);
* @n_req_pages: number of requested pages.
*
* Driver must call this function to properly de-allocate a previous enlisted
- * balloon pages before definetively releasing it back to the guest system.
+ * balloon pages before definitively releasing it back to the guest system.
* This function tries to remove @n_req_pages from the ballooned pages and
* return them to the caller in the @pages list.
*
- * Note that this function may fail to dequeue some pages temporarily empty due
- * to compaction isolated pages.
+ * Note that this function may fail to dequeue some pages even if the balloon
+ * isn't empty - since the page list can be temporarily empty due to compaction
+ * of isolated pages.
*
* Return: number of pages that were added to the @pages list.
*/
@@ -112,12 +113,13 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
/*
* balloon_page_alloc - allocates a new page for insertion into the balloon
- * page list.
+ * page list.
+ *
+ * Driver must call this function to properly allocate a new balloon page.
+ * Driver must call balloon_page_enqueue before definitively removing the page
+ * from the guest system.
*
- * Driver must call it to properly allocate a new enlisted balloon page.
- * Driver must call balloon_page_enqueue before definitively removing it from
- * the guest system. This function returns the page address for the recently
- * allocated page or NULL in the case we fail to allocate a new page this turn.
+ * Return: struct page for the allocated page or NULL on allocation failure.
*/
struct page *balloon_page_alloc(void)
{
@@ -128,15 +130,17 @@ struct page *balloon_page_alloc(void)
EXPORT_SYMBOL_GPL(balloon_page_alloc);
/*
- * balloon_page_enqueue - allocates a new page and inserts it into the balloon
- * page list.
- * @b_dev_info: balloon device descriptor where we will insert a new page to
+ * balloon_page_enqueue - inserts a new page into the balloon page list.
+ *
+ * @b_dev_info: balloon device descriptor where we will insert a new page
* @page: new page to enqueue - allocated using balloon_page_alloc.
*
- * Driver must call it to properly enqueue a new allocated balloon page
- * before definitively removing it from the guest system.
- * This function returns the page address for the recently enqueued page or
- * NULL in the case we fail to allocate a new page this turn.
+ * Drivers must call this function to properly enqueue a new allocated balloon
+ * page before definitively removing the page from the guest system.
+ *
+ * Drivers must not call balloon_page_enqueue on pages that have been pushed to
+ * a list with balloon_page_push before removing them with balloon_page_pop. To
+ * enqueue a list of pages, use balloon_page_list_enqueue instead.
*/
void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
struct page *page)
@@ -151,14 +155,23 @@ EXPORT_SYMBOL_GPL(balloon_page_enqueue);
/*
* balloon_page_dequeue - removes a page from balloon's page list and returns
- * the its address to allow the driver release the page.
+ * its address to allow the driver to release the page.
* @b_dev_info: balloon device decriptor where we will grab a page from.
*
- * Driver must call it to properly de-allocate a previous enlisted balloon page
- * before definetively releasing it back to the guest system.
- * This function returns the page address for the recently dequeued page or
- * NULL in the case we find balloon's page list temporarily empty due to
- * compaction isolated pages.
+ * Driver must call this function to properly dequeue a previously enqueued page
+ * before definitively releasing it back to the guest system.
+ *
+ * Caller must perform its own accounting to ensure that this
+ * function is called only if some pages are actually enqueued.
+ *
+ * Note that this function may fail to dequeue some pages even if there are
+ * some enqueued pages - since the page list can be temporarily empty due to
+ * the compaction of isolated pages.
+ *
+ * TODO: remove the caller accounting requirements, and allow caller to wait
+ * until all pages can be dequeued.
+ *
+ * Return: struct page for the dequeued page, or NULL if no page was dequeued.
*/
struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
{
@@ -171,9 +184,9 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
if (n_pages != 1) {
/*
* If we are unable to dequeue a balloon page because the page
- * list is empty and there is no isolated pages, then something
+ * list is empty and there are no isolated pages, then something
* went out of track and some balloon pages are lost.
- * BUG() here, otherwise the balloon driver may get stuck into
+ * BUG() here, otherwise the balloon driver may get stuck in
* an infinite loop while attempting to release all its pages.
*/
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
@@ -224,8 +237,8 @@ int balloon_page_migrate(struct address_space *mapping,
/*
* We can not easily support the no copy case here so ignore it as it
- * is unlikely to be use with ballon pages. See include/linux/hmm.h for
- * user of the MIGRATE_SYNC_NO_COPY mode.
+ * is unlikely to be used with balloon pages. See include/linux/hmm.h
+ * for a user of the MIGRATE_SYNC_NO_COPY mode.
*/
if (mode == MIGRATE_SYNC_NO_COPY)
return -EINVAL;
diff --git a/mm/compaction.c b/mm/compaction.c
index 9e1b9acb116b..952dc2fb24e5 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -842,13 +842,15 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/*
* Periodically drop the lock (if held) regardless of its
- * contention, to give chance to IRQs. Abort async compaction
- * if contended.
+ * contention, to give chance to IRQs. Abort completely if
+ * a fatal signal is pending.
*/
if (!(low_pfn % SWAP_CLUSTER_MAX)
&& compact_unlock_should_abort(&pgdat->lru_lock,
- flags, &locked, cc))
- break;
+ flags, &locked, cc)) {
+ low_pfn = 0;
+ goto fatal_pending;
+ }
if (!pfn_valid_within(low_pfn))
goto isolate_fail;
@@ -1060,6 +1062,7 @@ isolate_abort:
trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
nr_scanned, nr_isolated);
+fatal_pending:
cc->total_migrate_scanned += nr_scanned;
if (nr_isolated)
count_compact_events(COMPACTISOLATED, nr_isolated);
diff --git a/mm/hmm.c b/mm/hmm.c
index e1eedef129cf..16b6731a34db 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -946,7 +946,7 @@ EXPORT_SYMBOL(hmm_range_unregister);
* @range: range
* Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
* permission (for instance asking for write and range is read only),
- * -EAGAIN if you need to retry, -EFAULT invalid (ie either no valid
+ * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
* vma or it is illegal to access that range), number of valid pages
* in range->pfns[] (from range start address).
*
@@ -967,7 +967,7 @@ long hmm_range_snapshot(struct hmm_range *range)
do {
/* If range is no longer valid force retry. */
if (!range->valid)
- return -EAGAIN;
+ return -EBUSY;
vma = find_vma(hmm->mm, start);
if (vma == NULL || (vma->vm_flags & device_vma))
@@ -1062,10 +1062,8 @@ long hmm_range_fault(struct hmm_range *range, bool block)
do {
/* If range is no longer valid force retry. */
- if (!range->valid) {
- up_read(&hmm->mm->mmap_sem);
- return -EAGAIN;
- }
+ if (!range->valid)
+ return -EBUSY;
vma = find_vma(hmm->mm, start);
if (vma == NULL || (vma->vm_flags & device_vma))
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index dbbd518fb6b3..6e9e8cca663e 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -114,7 +114,7 @@
/* GFP bitmask for kmemleak internal allocations */
#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
__GFP_NORETRY | __GFP_NOMEMALLOC | \
- __GFP_NOWARN | __GFP_NOFAIL)
+ __GFP_NOWARN)
/* scanning area inside a memory block */
struct kmemleak_scan_area {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2a9bbddb0e55..c73f09913165 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -132,7 +132,6 @@ static void release_memory_resource(struct resource *res)
return;
release_resource(res);
kfree(res);
- return;
}
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
@@ -979,7 +978,6 @@ static void rollback_node_hotadd(int nid)
arch_refresh_nodedata(nid, NULL);
free_percpu(pgdat->per_cpu_nodestats);
arch_free_nodedata(pgdat);
- return;
}
diff --git a/kernel/memremap.c b/mm/memremap.c
index 6ee03a816d67..6ee03a816d67 100644
--- a/kernel/memremap.c
+++ b/mm/memremap.c
diff --git a/mm/migrate.c b/mm/migrate.c
index 8992741f10aa..a42858d8e00b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -767,12 +767,12 @@ recheck_buffers:
}
bh = bh->b_this_page;
} while (bh != head);
- spin_unlock(&mapping->private_lock);
if (busy) {
if (invalidated) {
rc = -EAGAIN;
goto unlock_buffers;
}
+ spin_unlock(&mapping->private_lock);
invalidate_bh_lrus();
invalidated = true;
goto recheck_buffers;
@@ -805,6 +805,8 @@ recheck_buffers:
rc = MIGRATEPAGE_SUCCESS;
unlock_buffers:
+ if (check_refs)
+ spin_unlock(&mapping->private_lock);
bh = head;
do {
unlock_buffer(bh);
@@ -2338,16 +2340,13 @@ next:
static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
- struct mm_walk mm_walk;
-
- mm_walk.pmd_entry = migrate_vma_collect_pmd;
- mm_walk.pte_entry = NULL;
- mm_walk.pte_hole = migrate_vma_collect_hole;
- mm_walk.hugetlb_entry = NULL;
- mm_walk.test_walk = NULL;
- mm_walk.vma = migrate->vma;
- mm_walk.mm = migrate->vma->vm_mm;
- mm_walk.private = migrate;
+ struct mm_walk mm_walk = {
+ .pmd_entry = migrate_vma_collect_pmd,
+ .pte_hole = migrate_vma_collect_hole,
+ .vma = migrate->vma,
+ .mm = migrate->vma->vm_mm,
+ .private = migrate,
+ };
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm_walk.mm,
migrate->start,
diff --git a/mm/slub.c b/mm/slub.c
index e6c030e47364..8834563cdb4b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1432,7 +1432,9 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
void *old_tail = *tail ? *tail : *head;
int rsize;
- if (slab_want_init_on_free(s))
+ if (slab_want_init_on_free(s)) {
+ void *p = NULL;
+
do {
object = next;
next = get_freepointer(s, object);
@@ -1445,8 +1447,10 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s,
: 0;
memset((char *)object + s->inuse, 0,
s->size - s->inuse - rsize);
- set_freepointer(s, object, next);
+ set_freepointer(s, object, p);
+ p = object;
} while (object != old_tail);
+ }
/*
* Compiler cannot detect this function can be removed if slab_free_hook()
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4fa8d84599b0..e0fc963acc41 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1259,6 +1259,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
return false;
/*
+ * First make sure the mappings are removed from all page-tables
+ * before they are freed.
+ */
+ vmalloc_sync_all();
+
+ /*
* TODO: to calculate a flush range without looping.
* The list can be up to lazy_max_pages() elements.
*/
@@ -3038,6 +3044,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
/*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
+ *
+ * The purpose of this function is to make sure the vmalloc area
+ * mappings are identical in all page-tables in the system.
*/
void __weak vmalloc_sync_all(void)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 44df66a98f2a..dbdc46a84f63 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -699,7 +699,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
unsigned long ret, freed = 0;
struct shrinker *shrinker;
- if (!mem_cgroup_is_root(memcg))
+ /*
+ * The root memcg might be allocated even though memcg is disabled
+ * via "cgroup_disable=memory" boot parameter. This could make
+ * mem_cgroup_is_root() return false, then just run memcg slab
+ * shrink, but skip global shrink. This may result in premature
+ * oom.
+ */
+ if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
if (!down_read_trylock(&shrinker_rwsem))
diff --git a/net/bridge/br.c b/net/bridge/br.c
index d164f63a4345..8a8f9e5f264f 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -37,12 +37,15 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
int err;
if (dev->priv_flags & IFF_EBRIDGE) {
+ err = br_vlan_bridge_event(dev, event, ptr);
+ if (err)
+ return notifier_from_errno(err);
+
if (event == NETDEV_REGISTER) {
/* register of bridge completed, add sysfs entries */
br_sysfs_addbr(dev);
return NOTIFY_DONE;
}
- br_vlan_bridge_event(dev, event, ptr);
}
/* not a port of a bridge */
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 3d4b2817687f..9b379e110129 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1388,6 +1388,9 @@ br_multicast_leave_group(struct net_bridge *br,
if (!br_port_group_equal(p, port, src))
continue;
+ if (p->flags & MDB_PG_FLAGS_PERMANENT)
+ break;
+
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index c4fd307fbfdc..b7a4942ff1b3 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -895,8 +895,8 @@ int nbp_get_num_vlan_infos(struct net_bridge_port *p, u32 filter_mask);
void br_vlan_get_stats(const struct net_bridge_vlan *v,
struct br_vlan_stats *stats);
void br_vlan_port_event(struct net_bridge_port *p, unsigned long event);
-void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
- void *ptr);
+int br_vlan_bridge_event(struct net_device *dev, unsigned long event,
+ void *ptr);
static inline struct net_bridge_vlan_group *br_vlan_group(
const struct net_bridge *br)
@@ -1086,9 +1086,10 @@ static inline void br_vlan_port_event(struct net_bridge_port *p,
{
}
-static inline void br_vlan_bridge_event(struct net_device *dev,
- unsigned long event, void *ptr)
+static inline int br_vlan_bridge_event(struct net_device *dev,
+ unsigned long event, void *ptr)
{
+ return 0;
}
#endif
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 021cc9f66804..f5b2aeebbfe9 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -1053,7 +1053,6 @@ int br_vlan_init(struct net_bridge *br)
{
struct net_bridge_vlan_group *vg;
int ret = -ENOMEM;
- bool changed;
vg = kzalloc(sizeof(*vg), GFP_KERNEL);
if (!vg)
@@ -1068,17 +1067,10 @@ int br_vlan_init(struct net_bridge *br)
br->vlan_proto = htons(ETH_P_8021Q);
br->default_pvid = 1;
rcu_assign_pointer(br->vlgrp, vg);
- ret = br_vlan_add(br, 1,
- BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
- BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
- if (ret)
- goto err_vlan_add;
out:
return ret;
-err_vlan_add:
- vlan_tunnel_deinit(vg);
err_tunnel_init:
rhashtable_destroy(&vg->vlan_hash);
err_rhtbl:
@@ -1464,13 +1456,23 @@ static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
}
/* Must be protected by RTNL. */
-void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
- void *ptr)
+int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
{
struct netdev_notifier_changeupper_info *info;
- struct net_bridge *br;
+ struct net_bridge *br = netdev_priv(dev);
+ bool changed;
+ int ret = 0;
switch (event) {
+ case NETDEV_REGISTER:
+ ret = br_vlan_add(br, br->default_pvid,
+ BRIDGE_VLAN_INFO_PVID |
+ BRIDGE_VLAN_INFO_UNTAGGED |
+ BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
+ break;
+ case NETDEV_UNREGISTER:
+ br_vlan_delete(br, br->default_pvid);
+ break;
case NETDEV_CHANGEUPPER:
info = ptr;
br_vlan_upper_change(dev, info->upper_dev, info->linking);
@@ -1478,12 +1480,13 @@ void br_vlan_bridge_event(struct net_device *dev, unsigned long event,
case NETDEV_CHANGE:
case NETDEV_UP:
- br = netdev_priv(dev);
if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
- return;
+ break;
br_vlan_link_state_change(dev, br);
break;
}
+
+ return ret;
}
/* Must be protected by RTNL. */
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 963dfdc14827..c8177a89f52c 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1770,20 +1770,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
return 0;
}
+static int ebt_compat_init_offsets(unsigned int number)
+{
+ if (number > INT_MAX)
+ return -EINVAL;
+
+ /* also count the base chain policies */
+ number += NF_BR_NUMHOOKS;
+
+ return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
+}
static int compat_table_info(const struct ebt_table_info *info,
struct compat_ebt_replace *newinfo)
{
unsigned int size = info->entries_size;
const void *entries = info->entries;
+ int ret;
newinfo->entries_size = size;
- if (info->nentries) {
- int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
- info->nentries);
- if (ret)
- return ret;
- }
+ ret = ebt_compat_init_offsets(info->nentries);
+ if (ret)
+ return ret;
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
entries, newinfo);
@@ -2234,11 +2242,9 @@ static int compat_do_replace(struct net *net, void __user *user,
xt_compat_lock(NFPROTO_BRIDGE);
- if (tmp.nentries) {
- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
- if (ret < 0)
- goto out_unlock;
- }
+ ret = ebt_compat_init_offsets(tmp.nentries);
+ if (ret < 0)
+ goto out_unlock;
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
if (ret < 0)
@@ -2261,8 +2267,10 @@ static int compat_do_replace(struct net *net, void __user *user,
state.buf_kern_len = size64;
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
- if (WARN_ON(ret < 0))
+ if (WARN_ON(ret < 0)) {
+ vfree(entries_tmp);
goto out_unlock;
+ }
vfree(entries_tmp);
tmp.entries_size = size64;
diff --git a/net/bridge/netfilter/nft_meta_bridge.c b/net/bridge/netfilter/nft_meta_bridge.c
index bed66f536b34..1804e867f715 100644
--- a/net/bridge/netfilter/nft_meta_bridge.c
+++ b/net/bridge/netfilter/nft_meta_bridge.c
@@ -30,13 +30,9 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
switch (priv->key) {
case NFT_META_BRI_IIFNAME:
br_dev = nft_meta_get_bridge(in);
- if (!br_dev)
- goto err;
break;
case NFT_META_BRI_OIFNAME:
br_dev = nft_meta_get_bridge(out);
- if (!br_dev)
- goto err;
break;
case NFT_META_BRI_IIFPVID: {
u16 p_pvid;
@@ -61,13 +57,11 @@ static void nft_meta_bridge_get_eval(const struct nft_expr *expr,
return;
}
default:
- goto out;
+ return nft_meta_get_eval(expr, regs, pkt);
}
- strncpy((char *)dest, br_dev->name, IFNAMSIZ);
+ strncpy((char *)dest, br_dev ? br_dev->name : "", IFNAMSIZ);
return;
-out:
- return nft_meta_get_eval(expr, regs, pkt);
err:
regs->verdict.code = NFT_BREAK;
}
diff --git a/net/can/gw.c b/net/can/gw.c
index 8abae841d504..ce17f836262b 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1047,32 +1047,50 @@ static __init int cgw_module_init(void)
pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
max_hops);
- register_pernet_subsys(&cangw_pernet_ops);
+ ret = register_pernet_subsys(&cangw_pernet_ops);
+ if (ret)
+ return ret;
+
+ ret = -ENOMEM;
cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
0, 0, NULL);
-
if (!cgw_cache)
- return -ENOMEM;
+ goto out_cache_create;
/* set notifier */
notifier.notifier_call = cgw_notifier;
- register_netdevice_notifier(&notifier);
+ ret = register_netdevice_notifier(&notifier);
+ if (ret)
+ goto out_register_notifier;
ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
NULL, cgw_dump_jobs, 0);
- if (ret) {
- unregister_netdevice_notifier(&notifier);
- kmem_cache_destroy(cgw_cache);
- return -ENOBUFS;
- }
-
- /* Only the first call to rtnl_register_module can fail */
- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
- cgw_create_job, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
- cgw_remove_job, NULL, 0);
+ if (ret)
+ goto out_rtnl_register1;
+
+ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
+ cgw_create_job, NULL, 0);
+ if (ret)
+ goto out_rtnl_register2;
+ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
+ cgw_remove_job, NULL, 0);
+ if (ret)
+ goto out_rtnl_register3;
return 0;
+
+out_rtnl_register3:
+ rtnl_unregister(PF_CAN, RTM_NEWROUTE);
+out_rtnl_register2:
+ rtnl_unregister(PF_CAN, RTM_GETROUTE);
+out_rtnl_register1:
+ unregister_netdevice_notifier(&notifier);
+out_register_notifier:
+ kmem_cache_destroy(cgw_cache);
+out_cache_create:
+ unregister_pernet_subsys(&cangw_pernet_ops);
+
+ return ret;
}
static __exit void cgw_module_exit(void)
diff --git a/net/core/dev.c b/net/core/dev.c
index e2a11c62197b..af071b0ce88e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4374,12 +4374,17 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
act = bpf_prog_run_xdp(xdp_prog, xdp);
+ /* check if bpf_xdp_adjust_head was used */
off = xdp->data - orig_data;
- if (off > 0)
- __skb_pull(skb, off);
- else if (off < 0)
- __skb_push(skb, -off);
- skb->mac_header += off;
+ if (off) {
+ if (off > 0)
+ __skb_pull(skb, off);
+ else if (off < 0)
+ __skb_push(skb, -off);
+
+ skb->mac_header += off;
+ skb_reset_network_header(skb);
+ }
/* check if bpf_xdp_adjust_tail was used. it can only "shrink"
* pckt.
@@ -9701,6 +9706,8 @@ static void __net_exit default_device_exit(struct net *net)
/* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+ if (__dev_get_by_name(&init_net, fb_name))
+ snprintf(fb_name, IFNAMSIZ, "dev%%d");
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
pr_emerg("%s: failed to move %s to init_net: %d\n",
diff --git a/net/core/filter.c b/net/core/filter.c
index 4e2a79b2fd77..7878f918b8c0 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7455,12 +7455,12 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct __sk_buff, gso_segs):
/* si->dst_reg = skb_shinfo(SKB); */
#ifdef NET_SKBUFF_DATA_USES_OFFSET
- *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
- si->dst_reg, si->src_reg,
- offsetof(struct sk_buff, head));
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
BPF_REG_AX, si->src_reg,
offsetof(struct sk_buff, end));
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
+ si->dst_reg, si->src_reg,
+ offsetof(struct sk_buff, head));
*insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
#else
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 93bffaad2135..6832eeb4b785 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -585,12 +585,12 @@ EXPORT_SYMBOL_GPL(sk_psock_destroy);
void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
{
- rcu_assign_sk_user_data(sk, NULL);
sk_psock_cork_free(psock);
sk_psock_zap_ingress(psock);
- sk_psock_restore_proto(sk, psock);
write_lock_bh(&sk->sk_callback_lock);
+ sk_psock_restore_proto(sk, psock);
+ rcu_assign_sk_user_data(sk, NULL);
if (psock->progs.skb_parser)
sk_psock_stop_strp(sk, psock);
write_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 52d4faeee18b..1330a7442e5b 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -247,6 +247,8 @@ static void sock_map_free(struct bpf_map *map)
raw_spin_unlock_bh(&stab->lock);
rcu_read_unlock();
+ synchronize_rcu();
+
bpf_map_area_free(stab->sks);
kfree(stab);
}
@@ -276,16 +278,20 @@ static int __sock_map_delete(struct bpf_stab *stab, struct sock *sk_test,
struct sock **psk)
{
struct sock *sk;
+ int err = 0;
raw_spin_lock_bh(&stab->lock);
sk = *psk;
if (!sk_test || sk_test == sk)
- *psk = NULL;
+ sk = xchg(psk, NULL);
+
+ if (likely(sk))
+ sock_map_unref(sk, psk);
+ else
+ err = -EINVAL;
+
raw_spin_unlock_bh(&stab->lock);
- if (unlikely(!sk))
- return -EINVAL;
- sock_map_unref(sk, psk);
- return 0;
+ return err;
}
static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
@@ -328,6 +334,7 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
struct sock *sk, u64 flags)
{
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
+ struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_psock_link *link;
struct sk_psock *psock;
struct sock *osk;
@@ -338,6 +345,8 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
return -EINVAL;
if (unlikely(idx >= map->max_entries))
return -E2BIG;
+ if (unlikely(icsk->icsk_ulp_data))
+ return -EINVAL;
link = sk_psock_init_link();
if (!link)
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 26363d72d25b..47ee88163a9d 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -165,6 +165,7 @@ static struct sk_buff
"Expected meta frame, is %12llx "
"in the DSA master multicast filter?\n",
SJA1105_META_DMAC);
+ kfree_skb(sp->data->stampable_skb);
}
/* Hold a reference to avoid dsa_switch_rcv
@@ -211,17 +212,8 @@ static struct sk_buff
* for further processing up the network stack.
*/
kfree_skb(skb);
-
- skb = skb_copy(stampable_skb, GFP_ATOMIC);
- if (!skb) {
- dev_err_ratelimited(dp->ds->dev,
- "Failed to copy stampable skb\n");
- spin_unlock(&sp->data->meta_lock);
- return NULL;
- }
+ skb = stampable_skb;
sja1105_transfer_meta(skb, meta);
- /* The cached copy will be freed now */
- skb_unref(stampable_skb);
spin_unlock(&sp->data->meta_lock);
}
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index d666756be5f1..a999451345f9 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -331,7 +331,7 @@ struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
if (!prev)
fq = inet_frag_create(fqdir, key, &prev);
- if (prev && !IS_ERR(prev)) {
+ if (!IS_ERR_OR_NULL(prev)) {
fq = prev;
if (!refcount_inc_not_zero(&fq->refcnt))
fq = NULL;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 43adfc1641ba..2f01cf6fa0de 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -275,6 +275,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
const struct iphdr *tiph = &tunnel->parms.iph;
u8 ipproto;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_error;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
ipproto = IPPROTO_IPIP;
diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
index 3d8a1d835471..4849edb62d52 100644
--- a/net/ipv4/tcp_ulp.c
+++ b/net/ipv4/tcp_ulp.c
@@ -96,6 +96,19 @@ void tcp_get_available_ulp(char *buf, size_t maxlen)
rcu_read_unlock();
}
+void tcp_update_ulp(struct sock *sk, struct proto *proto)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ if (!icsk->icsk_ulp_ops) {
+ sk->sk_prot = proto;
+ return;
+ }
+
+ if (icsk->icsk_ulp_ops->update)
+ icsk->icsk_ulp_ops->update(sk, proto);
+}
+
void tcp_cleanup_ulp(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c2049c72f3e5..dd2d0b963260 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -660,12 +660,13 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
struct flowi6 *fl6, __u8 *dsfield,
int *encap_limit)
{
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ipv6hdr *ipv6h;
struct ip6_tnl *t = netdev_priv(dev);
__u16 offset;
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+ ipv6h = ipv6_hdr(skb);
if (offset > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 3134fbb65d7f..754a484d35df 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1278,12 +1278,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
}
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+ dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
- dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
-
skb_set_inner_ipproto(skb, IPPROTO_IPIP);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1367,12 +1366,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
}
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+ dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
- dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
-
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 3c5c331b50f1..7a5d331cdefa 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1951,7 +1951,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
if (!arg.match)
- return;
+ goto unlock;
fib6_nh = arg.match;
} else {
fib6_nh = from->fib6_nh;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 09e1694b6d34..ebb62a4ebe30 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -512,7 +512,9 @@ static void iucv_sock_close(struct sock *sk)
sk->sk_state = IUCV_DISCONN;
sk->sk_state_change(sk);
}
- case IUCV_DISCONN: /* fall through */
+ /* fall through */
+
+ case IUCV_DISCONN:
sk->sk_state = IUCV_CLOSING;
sk->sk_state_change(sk);
@@ -525,8 +527,9 @@ static void iucv_sock_close(struct sock *sk)
iucv_sock_in_state(sk, IUCV_CLOSED, 0),
timeo);
}
+ /* fall through */
- case IUCV_CLOSING: /* fall through */
+ case IUCV_CLOSING:
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
@@ -535,8 +538,9 @@ static void iucv_sock_close(struct sock *sk)
skb_queue_purge(&iucv->send_skb_q);
skb_queue_purge(&iucv->backlog_skb_q);
+ /* fall through */
- default: /* fall through */
+ default:
iucv_sever_path(sk, 1);
}
@@ -2247,10 +2251,10 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
kfree_skb(skb);
break;
}
- /* fall through and receive non-zero length data */
+ /* fall through - and receive non-zero length data */
case (AF_IUCV_FLAG_SHT):
/* shutdown request */
- /* fall through and receive zero length data */
+ /* fall through - and receive zero length data */
case 0:
/* plain data frame */
IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1d0e5904dedf..c54cb59593ef 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1681,6 +1681,9 @@ static const struct proto_ops pppol2tp_ops = {
.recvmsg = pppol2tp_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppol2tp_proto = {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 06aac0aaae64..8dc6580e1787 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1222,7 +1222,6 @@ static void ieee80211_if_setup(struct net_device *dev)
static void ieee80211_if_setup_no_queue(struct net_device *dev)
{
ieee80211_if_setup(dev);
- dev->features |= NETIF_F_LLTX;
dev->priv_flags |= IFF_NO_QUEUE;
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index e89ed800f012..641876982ab9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2042,6 +2042,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac);
}
+ /* WMM specification requires all 4 ACIs. */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ if (params[ac].cw_min == 0) {
+ sdata_info(sdata,
+ "AP has invalid WMM params (missing AC %d), using defaults\n",
+ ac);
+ return false;
+ }
+ }
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
mlme_dbg(sdata,
"WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index caa317faee3c..286c7ee35e63 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -3856,9 +3856,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
}
/* Always allow software iftypes */
- if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
- (iftype == NL80211_IFTYPE_AP_VLAN &&
- local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
+ if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) {
if (radar_detect)
return -EINVAL;
return 0;
@@ -3893,7 +3891,8 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
if (sdata_iter == sdata ||
!ieee80211_sdata_running(sdata_iter) ||
- local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
+ cfg80211_iftype_allowed(local->hw.wiphy,
+ wdev_iter->iftype, 0, 1))
continue;
params.iftype_num[wdev_iter->iftype]++;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index ca7ac4a25ada..1d4e63326e68 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -226,7 +226,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
e.id = ip_to_id(map, ip);
- if (opt->flags & IPSET_DIM_ONE_SRC)
+ if (opt->flags & IPSET_DIM_TWO_SRC)
ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
else
ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 2e151856ad99..e64d5f9a89dd 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1161,7 +1161,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
return -ENOENT;
write_lock_bh(&ip_set_ref_lock);
- if (set->ref != 0) {
+ if (set->ref != 0 || set->ref_netlink != 0) {
ret = -IPSET_ERR_REFERENCED;
goto out;
}
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index faf59b6a998f..24d8f4df4230 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -89,15 +89,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
- /* MAC can be src only */
- if (!(opt->flags & IPSET_DIM_TWO_SRC))
- return 0;
-
if (skb_mac_header(skb) < skb->head ||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
- if (opt->flags & IPSET_DIM_ONE_SRC)
+ if (opt->flags & IPSET_DIM_TWO_SRC)
ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
else
ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index f1b1d948c07b..f69afb9ff3cb 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -60,24 +60,16 @@ void nft_meta_get_eval(const struct nft_expr *expr,
*dest = skb->mark;
break;
case NFT_META_IIF:
- if (in == NULL)
- goto err;
- *dest = in->ifindex;
+ *dest = in ? in->ifindex : 0;
break;
case NFT_META_OIF:
- if (out == NULL)
- goto err;
- *dest = out->ifindex;
+ *dest = out ? out->ifindex : 0;
break;
case NFT_META_IIFNAME:
- if (in == NULL)
- goto err;
- strncpy((char *)dest, in->name, IFNAMSIZ);
+ strncpy((char *)dest, in ? in->name : "", IFNAMSIZ);
break;
case NFT_META_OIFNAME:
- if (out == NULL)
- goto err;
- strncpy((char *)dest, out->name, IFNAMSIZ);
+ strncpy((char *)dest, out ? out->name : "", IFNAMSIZ);
break;
case NFT_META_IIFTYPE:
if (in == NULL)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 96740d389377..c4f54ad2b98a 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -967,6 +967,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
window = skb->data[20];
+ sock_hold(make);
skb->sk = make;
skb->destructor = sock_efree;
make->sk_state = TCP_ESTABLISHED;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 12d985029eb1..65122bbccd27 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1050,7 +1050,7 @@ error:
}
/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
-static struct sw_flow_actions *get_flow_actions(struct net *net,
+static noinline_for_stack struct sw_flow_actions *get_flow_actions(struct net *net,
const struct nlattr *a,
const struct sw_flow_key *key,
const struct sw_flow_mask *mask,
@@ -1084,12 +1084,13 @@ static struct sw_flow_actions *get_flow_actions(struct net *net,
* we should not to return match object with dangling reference
* to mask.
* */
-static int ovs_nla_init_match_and_action(struct net *net,
- struct sw_flow_match *match,
- struct sw_flow_key *key,
- struct nlattr **a,
- struct sw_flow_actions **acts,
- bool log)
+static noinline_for_stack int
+ovs_nla_init_match_and_action(struct net *net,
+ struct sw_flow_match *match,
+ struct sw_flow_key *key,
+ struct nlattr **a,
+ struct sw_flow_actions **acts,
+ bool log)
{
struct sw_flow_mask mask;
int error = 0;
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index ff74c4bbb9fc..9986d6065c4d 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -105,7 +105,8 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
break;
case RDMA_CM_EVENT_ESTABLISHED:
- trans->cm_connect_complete(conn, event);
+ if (conn)
+ trans->cm_connect_complete(conn, event);
break;
case RDMA_CM_EVENT_REJECTED:
@@ -137,6 +138,8 @@ static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id,
break;
case RDMA_CM_EVENT_DISCONNECTED:
+ if (!conn)
+ break;
rdsdebug("DISCONNECT event - dropping connection "
"%pI6c->%pI6c\n", &conn->c_laddr,
&conn->c_faddr);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index bea2a02850af..63b26baa108a 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -1065,6 +1065,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
void rxrpc_put_peer(struct rxrpc_peer *);
+void rxrpc_put_peer_locked(struct rxrpc_peer *);
/*
* proc.c
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index 9f2f45c09e58..7666ec72d37e 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -378,7 +378,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
spin_lock_bh(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
- rxrpc_put_peer(peer);
+ rxrpc_put_peer_locked(peer);
}
spin_unlock_bh(&rxnet->peer_hash_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 9d3ce81cf8ae..9c3ac96f71cb 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -437,6 +437,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
}
/*
+ * Drop a ref on a peer record where the caller already holds the
+ * peer_hash_lock.
+ */
+void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
+{
+ const void *here = __builtin_return_address(0);
+ int n;
+
+ n = atomic_dec_return(&peer->usage);
+ trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+ if (n == 0) {
+ hash_del_rcu(&peer->hash_link);
+ list_del_init(&peer->keepalive_link);
+ kfree_rcu(peer, rcu);
+ }
+}
+
+/*
* Make sure all peer records have been discarded.
*/
void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 5d3f33ce6d41..bae14438f869 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -226,6 +226,7 @@ static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
rxrpc_set_call_completion(call,
RXRPC_CALL_LOCAL_ERROR,
0, ret);
+ rxrpc_notify_socket(call);
goto out;
}
_debug("need instant resend %d", ret);
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 8126b26f125e..fd1f7e799e23 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -285,6 +285,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
struct tcf_bpf *prog;
bool is_bpf, is_ebpf;
int ret, res = 0;
+ u32 index;
if (!nla)
return -EINVAL;
@@ -298,13 +299,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
-
- ret = tcf_idr_check_alloc(tn, &parm->index, act, bind);
+ index = parm->index;
+ ret = tcf_idr_check_alloc(tn, &index, act, bind);
if (!ret) {
- ret = tcf_idr_create(tn, parm->index, est, act,
+ ret = tcf_idr_create(tn, index, est, act,
&act_bpf_ops, bind, true);
if (ret < 0) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index ce36b0f7e1dc..32ac04d77a45 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -103,6 +103,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
struct tcf_connmark_info *ci;
struct tc_connmark *parm;
int ret = 0, err;
+ u32 index;
if (!nla)
return -EINVAL;
@@ -116,13 +117,13 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
-
- ret = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ ret = tcf_idr_check_alloc(tn, &index, a, bind);
if (!ret) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_connmark_ops, bind, false);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 621fb22ce2a9..9b9288267a54 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -52,6 +52,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
struct tc_csum *parm;
struct tcf_csum *p;
int ret = 0, err;
+ u32 index;
if (nla == NULL)
return -EINVAL;
@@ -64,13 +65,13 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
if (tb[TCA_CSUM_PARMS] == NULL)
return -EINVAL;
parm = nla_data(tb[TCA_CSUM_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_csum_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index b501ce0cf116..33a1a7406e87 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -666,6 +666,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
struct tc_ct *parm;
struct tcf_ct *c;
int err, res = 0;
+ u32 index;
if (!nla) {
NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
@@ -681,16 +682,16 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
return -EINVAL;
}
parm = nla_data(tb[TCA_CT_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
if (!err) {
- err = tcf_idr_create(tn, parm->index, est, a,
+ err = tcf_idr_create(tn, index, est, a,
&act_ct_ops, bind, true);
if (err) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return err;
}
res = ACT_P_CREATED;
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 10eb2bb99861..06ef74b74911 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -157,10 +157,10 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct tc_action_net *tn = net_generic(net, ctinfo_net_id);
+ u32 dscpmask = 0, dscpstatemask, index;
struct nlattr *tb[TCA_CTINFO_MAX + 1];
struct tcf_ctinfo_params *cp_new;
struct tcf_chain *goto_ch = NULL;
- u32 dscpmask = 0, dscpstatemask;
struct tc_ctinfo *actparm;
struct tcf_ctinfo *ci;
u8 dscpmaskshift;
@@ -206,12 +206,13 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
}
/* done the validation:now to the actual action allocation */
- err = tcf_idr_check_alloc(tn, &actparm->index, a, bind);
+ index = actparm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, actparm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_ctinfo_ops, bind, false);
if (ret) {
- tcf_idr_cleanup(tn, actparm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index b2380c5284e6..8f0140c6ca58 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -61,6 +61,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
struct tc_gact *parm;
struct tcf_gact *gact;
int ret = 0;
+ u32 index;
int err;
#ifdef CONFIG_GACT_PROB
struct tc_gact_p *p_parm = NULL;
@@ -77,6 +78,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
if (tb[TCA_GACT_PARMS] == NULL)
return -EINVAL;
parm = nla_data(tb[TCA_GACT_PARMS]);
+ index = parm->index;
#ifndef CONFIG_GACT_PROB
if (tb[TCA_GACT_PROB] != NULL)
@@ -94,12 +96,12 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
}
#endif
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_gact_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 41d5398dd2f2..92ee853d43e6 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -479,8 +479,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
u8 *saddr = NULL;
bool exists = false;
int ret = 0;
+ u32 index;
int err;
+ if (!nla) {
+ NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
+ return -EINVAL;
+ }
+
err = nla_parse_nested_deprecated(tb, TCA_IFE_MAX, nla, ife_policy,
NULL);
if (err < 0)
@@ -502,7 +508,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (!p)
return -ENOMEM;
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0) {
kfree(p);
return err;
@@ -514,10 +521,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
}
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
+ ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
kfree(p);
return ret;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 055faa298c8e..be3f88dfc37e 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -104,6 +104,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
struct net_device *dev;
bool exists = false;
int ret, err;
+ u32 index;
if (!nla) {
NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
@@ -118,8 +119,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
return -EINVAL;
}
parm = nla_data(tb[TCA_MIRRED_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -136,21 +137,21 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
return -EINVAL;
}
if (!exists) {
if (!parm->ifindex) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
return -EINVAL;
}
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_mirred_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index ca2597ce4ac9..0f299e3b618c 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -138,6 +138,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
struct tcf_mpls *m;
int ret = 0, err;
u8 mpls_ttl = 0;
+ u32 index;
if (!nla) {
NL_SET_ERR_MSG_MOD(extack, "Missing netlink attributes");
@@ -153,6 +154,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
return -EINVAL;
}
parm = nla_data(tb[TCA_MPLS_PARMS]);
+ index = parm->index;
/* Verify parameters against action type. */
switch (parm->m_action) {
@@ -209,7 +211,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
return -EINVAL;
}
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -217,10 +219,10 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
return 0;
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_mpls_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 45923ebb7a4f..7b858c11b1b5 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -44,6 +44,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
struct tc_nat *parm;
int ret = 0, err;
struct tcf_nat *p;
+ u32 index;
if (nla == NULL)
return -EINVAL;
@@ -56,13 +57,13 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
if (tb[TCA_NAT_PARMS] == NULL)
return -EINVAL;
parm = nla_data(tb[TCA_NAT_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_nat_ops, bind, false);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 45e9d6bfddb3..17360c6faeaa 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -149,6 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
struct tcf_pedit *p;
int ret = 0, err;
int ksize;
+ u32 index;
if (!nla) {
NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
@@ -179,18 +180,19 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (IS_ERR(keys_ex))
return PTR_ERR(keys_ex);
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
if (!parm->nkeys) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
ret = -EINVAL;
goto out_free;
}
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_pedit_ops, bind, false);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
goto out_free;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index a065f62fa79c..49cec3e64a4d 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -57,6 +57,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
struct tc_action_net *tn = net_generic(net, police_net_id);
struct tcf_police_params *new;
bool exists = false;
+ u32 index;
if (nla == NULL)
return -EINVAL;
@@ -73,7 +74,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_POLICE_TBF]);
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -81,10 +83,10 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
return 0;
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, NULL, a,
+ ret = tcf_idr_create(tn, index, NULL, a,
&act_police_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 274d7a0c0e25..595308d60133 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -41,8 +41,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
struct tc_action_net *tn = net_generic(net, sample_net_id);
struct nlattr *tb[TCA_SAMPLE_MAX + 1];
struct psample_group *psample_group;
+ u32 psample_group_num, rate, index;
struct tcf_chain *goto_ch = NULL;
- u32 psample_group_num, rate;
struct tc_sample *parm;
struct tcf_sample *s;
bool exists = false;
@@ -59,8 +59,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_SAMPLE_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -68,10 +68,10 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
return 0;
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_sample_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index f28ddbabff76..33aefa25b545 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -95,6 +95,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
struct tcf_defact *d;
bool exists = false;
int ret = 0, err;
+ u32 index;
if (nla == NULL)
return -EINVAL;
@@ -108,7 +109,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_DEF_PARMS]);
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -119,15 +121,15 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_simp_ops, bind, false);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 215a06705cef..b100870f02a6 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -99,6 +99,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
u16 *queue_mapping = NULL, *ptype = NULL;
bool exists = false;
int ret = 0, err;
+ u32 index;
if (nla == NULL)
return -EINVAL;
@@ -146,8 +147,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
}
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -158,15 +159,15 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_skbedit_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 4f07706eff07..7da3518e18ef 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -87,12 +87,12 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
struct tcf_skbmod_params *p, *p_old;
struct tcf_chain *goto_ch = NULL;
struct tc_skbmod *parm;
+ u32 lflags = 0, index;
struct tcf_skbmod *d;
bool exists = false;
u8 *daddr = NULL;
u8 *saddr = NULL;
u16 eth_type = 0;
- u32 lflags = 0;
int ret = 0, err;
if (!nla)
@@ -122,10 +122,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
}
parm = nla_data(tb[TCA_SKBMOD_PARMS]);
+ index = parm->index;
if (parm->flags & SKBMOD_F_SWAPMAC)
lflags = SKBMOD_F_SWAPMAC;
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -136,15 +137,15 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_skbmod_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 10dffda1d5cc..6d0debdc9b97 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -225,6 +225,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
__be16 flags = 0;
u8 tos, ttl;
int ret = 0;
+ u32 index;
int err;
if (!nla) {
@@ -245,7 +246,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
}
parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -345,7 +347,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
}
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_tunnel_key_ops, bind, true);
if (ret) {
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
@@ -403,7 +405,7 @@ err_out:
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 9269d350fb8a..a3c9eea1ee8a 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -116,6 +116,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
u8 push_prio = 0;
bool exists = false;
int ret = 0, err;
+ u32 index;
if (!nla)
return -EINVAL;
@@ -128,7 +129,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (!tb[TCA_VLAN_PARMS])
return -EINVAL;
parm = nla_data(tb[TCA_VLAN_PARMS]);
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -144,7 +146,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
@@ -152,7 +154,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -ERANGE;
}
@@ -166,7 +168,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EPROTONOSUPPORT;
}
} else {
@@ -180,16 +182,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
action = parm->v_action;
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_vlan_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
@@ -306,6 +308,14 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index)
return tcf_idr_search(tn, a, index);
}
+static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
+{
+ return nla_total_size(sizeof(struct tc_vlan))
+ + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */
+ + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
+ + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
+}
+
static struct tc_action_ops act_vlan_ops = {
.kind = "vlan",
.id = TCA_ID_VLAN,
@@ -315,6 +325,7 @@ static struct tc_action_ops act_vlan_ops = {
.init = tcf_vlan_init,
.cleanup = tcf_vlan_cleanup,
.walk = tcf_vlan_walker,
+ .get_fill_size = tcf_vlan_get_fill_size,
.lookup = tcf_vlan_search,
.size = sizeof(struct tcf_vlan),
};
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 25ef172c23df..30169b3adbbb 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
struct Qdisc *sch = ctx;
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
- if (skb)
+ if (skb) {
sch->qstats.backlog -= qdisc_pkt_len(skb);
-
- prefetch(&skb->end); /* we'll need skb_shinfo() */
+ prefetch(&skb->end); /* we'll need skb_shinfo() */
+ }
return skb;
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 2f7e88c46dd2..12503e16fa96 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -985,7 +985,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
return -EINVAL;
kaddrs = memdup_user(addrs, addrs_size);
- if (unlikely(IS_ERR(kaddrs)))
+ if (IS_ERR(kaddrs))
return PTR_ERR(kaddrs);
/* Walk through the addrs buffer and count the number of addresses. */
@@ -1304,7 +1304,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
return -EINVAL;
kaddrs = memdup_user(addrs, addrs_size);
- if (unlikely(IS_ERR(kaddrs)))
+ if (IS_ERR(kaddrs))
return PTR_ERR(kaddrs);
/* Allow security module to validate connectx addresses. */
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 302e355f2ebc..5b932583e407 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -263,7 +263,7 @@ static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
/* Check if socket is already active */
rc = -EINVAL;
- if (sk->sk_state != SMC_INIT)
+ if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
goto out_rel;
smc->clcsock->sk->sk_reuse = sk->sk_reuse;
@@ -1390,7 +1390,8 @@ static int smc_listen(struct socket *sock, int backlog)
lock_sock(sk);
rc = -EINVAL;
- if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN))
+ if ((sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) ||
+ smc->connect_nonblock)
goto out;
rc = 0;
@@ -1518,7 +1519,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
goto out;
if (msg->msg_flags & MSG_FASTOPEN) {
- if (sk->sk_state == SMC_INIT) {
+ if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
smc_switch_to_fallback(smc);
smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
} else {
@@ -1732,14 +1733,18 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
}
break;
case TCP_NODELAY:
- if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
+ if (sk->sk_state != SMC_INIT &&
+ sk->sk_state != SMC_LISTEN &&
+ sk->sk_state != SMC_CLOSED) {
if (val && !smc->use_fallback)
mod_delayed_work(system_wq, &smc->conn.tx_work,
0);
}
break;
case TCP_CORK:
- if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
+ if (sk->sk_state != SMC_INIT &&
+ sk->sk_state != SMC_LISTEN &&
+ sk->sk_state != SMC_CLOSED) {
if (!val && !smc->use_fallback)
mod_delayed_work(system_wq, &smc->conn.tx_work,
0);
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index d86030ef1232..e135d4e11231 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
int rep_type;
int rep_size;
int req_type;
+ int req_size;
struct net *net;
struct sk_buff *rep;
struct tlv_desc *req;
@@ -257,7 +258,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
int err;
struct sk_buff *arg;
- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
return -EINVAL;
msg->rep = tipc_tlv_alloc(msg->rep_size);
@@ -354,7 +356,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
{
int err;
- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
return -EINVAL;
err = __tipc_nl_compat_doit(cmd, msg);
@@ -1278,8 +1281,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
goto send;
}
- len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (!len || !TLV_OK(msg.req, len)) {
+ msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+ if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index dd8537f988c4..83ae41d7e554 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -485,9 +485,8 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
tsk_set_unreturnable(tsk, true);
if (sock->type == SOCK_DGRAM)
tsk_set_unreliable(tsk, true);
- __skb_queue_head_init(&tsk->mc_method.deferredq);
}
-
+ __skb_queue_head_init(&tsk->mc_method.deferredq);
trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
return 0;
}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index 4674e57e66b0..9cbbae606ced 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -261,24 +261,9 @@ void tls_ctx_free(struct tls_context *ctx)
kfree(ctx);
}
-static void tls_sk_proto_close(struct sock *sk, long timeout)
+static void tls_sk_proto_cleanup(struct sock *sk,
+ struct tls_context *ctx, long timeo)
{
- struct tls_context *ctx = tls_get_ctx(sk);
- long timeo = sock_sndtimeo(sk, 0);
- void (*sk_proto_close)(struct sock *sk, long timeout);
- bool free_ctx = false;
-
- lock_sock(sk);
- sk_proto_close = ctx->sk_proto_close;
-
- if (ctx->tx_conf == TLS_HW_RECORD && ctx->rx_conf == TLS_HW_RECORD)
- goto skip_tx_cleanup;
-
- if (ctx->tx_conf == TLS_BASE && ctx->rx_conf == TLS_BASE) {
- free_ctx = true;
- goto skip_tx_cleanup;
- }
-
if (unlikely(sk->sk_write_pending) &&
!wait_on_pending_writer(sk, &timeo))
tls_handle_open_record(sk, 0);
@@ -287,7 +272,7 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
if (ctx->tx_conf == TLS_SW) {
kfree(ctx->tx.rec_seq);
kfree(ctx->tx.iv);
- tls_sw_free_resources_tx(sk);
+ tls_sw_release_resources_tx(sk);
#ifdef CONFIG_TLS_DEVICE
} else if (ctx->tx_conf == TLS_HW) {
tls_device_free_resources_tx(sk);
@@ -295,26 +280,44 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
}
if (ctx->rx_conf == TLS_SW)
- tls_sw_free_resources_rx(sk);
+ tls_sw_release_resources_rx(sk);
#ifdef CONFIG_TLS_DEVICE
if (ctx->rx_conf == TLS_HW)
tls_device_offload_cleanup_rx(sk);
-
- if (ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW) {
-#else
- {
#endif
- tls_ctx_free(ctx);
- ctx = NULL;
- }
+}
-skip_tx_cleanup:
+static void tls_sk_proto_close(struct sock *sk, long timeout)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tls_context *ctx = tls_get_ctx(sk);
+ long timeo = sock_sndtimeo(sk, 0);
+ bool free_ctx;
+
+ if (ctx->tx_conf == TLS_SW)
+ tls_sw_cancel_work_tx(ctx);
+
+ lock_sock(sk);
+ free_ctx = ctx->tx_conf != TLS_HW && ctx->rx_conf != TLS_HW;
+
+ if (ctx->tx_conf != TLS_BASE || ctx->rx_conf != TLS_BASE)
+ tls_sk_proto_cleanup(sk, ctx, timeo);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ if (free_ctx)
+ icsk->icsk_ulp_data = NULL;
+ sk->sk_prot = ctx->sk_proto;
+ write_unlock_bh(&sk->sk_callback_lock);
release_sock(sk);
- sk_proto_close(sk, timeout);
- /* free ctx for TLS_HW_RECORD, used by tcp_set_state
- * for sk->sk_prot->unhash [tls_hw_unhash]
- */
+ if (ctx->tx_conf == TLS_SW)
+ tls_sw_free_ctx_tx(ctx);
+ if (ctx->rx_conf == TLS_SW || ctx->rx_conf == TLS_HW)
+ tls_sw_strparser_done(ctx);
+ if (ctx->rx_conf == TLS_SW)
+ tls_sw_free_ctx_rx(ctx);
+ ctx->sk_proto_close(sk, timeout);
+
if (free_ctx)
tls_ctx_free(ctx);
}
@@ -526,6 +529,8 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
{
#endif
rc = tls_set_sw_offload(sk, ctx, 1);
+ if (rc)
+ goto err_crypto_info;
conf = TLS_SW;
}
} else {
@@ -537,13 +542,13 @@ static int do_tls_setsockopt_conf(struct sock *sk, char __user *optval,
{
#endif
rc = tls_set_sw_offload(sk, ctx, 0);
+ if (rc)
+ goto err_crypto_info;
conf = TLS_SW;
}
+ tls_sw_strparser_arm(sk, ctx);
}
- if (rc)
- goto err_crypto_info;
-
if (tx)
ctx->tx_conf = conf;
else
@@ -607,6 +612,7 @@ static struct tls_context *create_ctx(struct sock *sk)
ctx->setsockopt = sk->sk_prot->setsockopt;
ctx->getsockopt = sk->sk_prot->getsockopt;
ctx->sk_proto_close = sk->sk_prot->close;
+ ctx->unhash = sk->sk_prot->unhash;
return ctx;
}
@@ -764,7 +770,6 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG],
prot[TLS_HW_RECORD][TLS_HW_RECORD] = *base;
prot[TLS_HW_RECORD][TLS_HW_RECORD].hash = tls_hw_hash;
prot[TLS_HW_RECORD][TLS_HW_RECORD].unhash = tls_hw_unhash;
- prot[TLS_HW_RECORD][TLS_HW_RECORD].close = tls_sk_proto_close;
}
static int tls_init(struct sock *sk)
@@ -773,7 +778,7 @@ static int tls_init(struct sock *sk)
int rc = 0;
if (tls_hw_prot(sk))
- goto out;
+ return 0;
/* The TLS ulp is currently supported only for TCP sockets
* in ESTABLISHED state.
@@ -784,21 +789,38 @@ static int tls_init(struct sock *sk)
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTSUPP;
+ tls_build_proto(sk);
+
/* allocate tls context */
+ write_lock_bh(&sk->sk_callback_lock);
ctx = create_ctx(sk);
if (!ctx) {
rc = -ENOMEM;
goto out;
}
- tls_build_proto(sk);
ctx->tx_conf = TLS_BASE;
ctx->rx_conf = TLS_BASE;
+ ctx->sk_proto = sk->sk_prot;
update_sk_prot(sk, ctx);
out:
+ write_unlock_bh(&sk->sk_callback_lock);
return rc;
}
+static void tls_update(struct sock *sk, struct proto *p)
+{
+ struct tls_context *ctx;
+
+ ctx = tls_get_ctx(sk);
+ if (likely(ctx)) {
+ ctx->sk_proto_close = p->close;
+ ctx->sk_proto = p;
+ } else {
+ sk->sk_prot = p;
+ }
+}
+
void tls_register_device(struct tls_device *device)
{
spin_lock_bh(&device_spinlock);
@@ -819,6 +841,7 @@ static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = {
.name = "tls",
.owner = THIS_MODULE,
.init = tls_init,
+ .update = tls_update,
};
static int __init tls_register(void)
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 53b4ad94e74a..91d21b048a9b 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2054,7 +2054,16 @@ static void tls_data_ready(struct sock *sk)
}
}
-void tls_sw_free_resources_tx(struct sock *sk)
+void tls_sw_cancel_work_tx(struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+
+ set_bit(BIT_TX_CLOSING, &ctx->tx_bitmask);
+ set_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask);
+ cancel_delayed_work_sync(&ctx->tx_work.work);
+}
+
+void tls_sw_release_resources_tx(struct sock *sk)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
@@ -2065,11 +2074,6 @@ void tls_sw_free_resources_tx(struct sock *sk)
if (atomic_read(&ctx->encrypt_pending))
crypto_wait_req(-EINPROGRESS, &ctx->async_wait);
- release_sock(sk);
- cancel_delayed_work_sync(&ctx->tx_work.work);
- lock_sock(sk);
-
- /* Tx whatever records we can transmit and abandon the rest */
tls_tx_records(sk, -1);
/* Free up un-sent records in tx_list. First, free
@@ -2092,6 +2096,11 @@ void tls_sw_free_resources_tx(struct sock *sk)
crypto_free_aead(ctx->aead_send);
tls_free_open_rec(sk);
+}
+
+void tls_sw_free_ctx_tx(struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
kfree(ctx);
}
@@ -2110,25 +2119,40 @@ void tls_sw_release_resources_rx(struct sock *sk)
skb_queue_purge(&ctx->rx_list);
crypto_free_aead(ctx->aead_recv);
strp_stop(&ctx->strp);
- write_lock_bh(&sk->sk_callback_lock);
- sk->sk_data_ready = ctx->saved_data_ready;
- write_unlock_bh(&sk->sk_callback_lock);
- release_sock(sk);
- strp_done(&ctx->strp);
- lock_sock(sk);
+ /* If tls_sw_strparser_arm() was not called (cleanup paths)
+ * we still want to strp_stop(), but sk->sk_data_ready was
+ * never swapped.
+ */
+ if (ctx->saved_data_ready) {
+ write_lock_bh(&sk->sk_callback_lock);
+ sk->sk_data_ready = ctx->saved_data_ready;
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
}
}
-void tls_sw_free_resources_rx(struct sock *sk)
+void tls_sw_strparser_done(struct tls_context *tls_ctx)
{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- tls_sw_release_resources_rx(sk);
+ strp_done(&ctx->strp);
+}
+
+void tls_sw_free_ctx_rx(struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
kfree(ctx);
}
+void tls_sw_free_resources_rx(struct sock *sk)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+
+ tls_sw_release_resources_rx(sk);
+ tls_sw_free_ctx_rx(tls_ctx);
+}
+
/* The work handler to transmitt the encrypted records in tx_list */
static void tx_work_handler(struct work_struct *work)
{
@@ -2137,11 +2161,17 @@ static void tx_work_handler(struct work_struct *work)
struct tx_work, work);
struct sock *sk = tx_work->sk;
struct tls_context *tls_ctx = tls_get_ctx(sk);
- struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx);
+ struct tls_sw_context_tx *ctx;
- if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+ if (unlikely(!tls_ctx))
return;
+ ctx = tls_sw_ctx_tx(tls_ctx);
+ if (test_bit(BIT_TX_CLOSING, &ctx->tx_bitmask))
+ return;
+
+ if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
+ return;
lock_sock(sk);
tls_tx_records(sk, -1);
release_sock(sk);
@@ -2160,6 +2190,18 @@ void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
}
}
+void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
+{
+ struct tls_sw_context_rx *rx_ctx = tls_sw_ctx_rx(tls_ctx);
+
+ write_lock_bh(&sk->sk_callback_lock);
+ rx_ctx->saved_data_ready = sk->sk_data_ready;
+ sk->sk_data_ready = tls_data_ready;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ strp_check_rcv(&rx_ctx->strp);
+}
+
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
@@ -2357,13 +2399,6 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
cb.parse_msg = tls_read_size;
strp_init(&sw_ctx_rx->strp, sk, &cb);
-
- write_lock_bh(&sk->sk_callback_lock);
- sw_ctx_rx->saved_data_ready = sk->sk_data_ready;
- sk->sk_data_ready = tls_data_ready;
- write_unlock_bh(&sk->sk_callback_lock);
-
- strp_check_rcv(&sw_ctx_rx->strp);
}
goto out;
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 2a1719c0f8d2..261521d286d6 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -312,6 +312,11 @@ static void hvs_close_connection(struct vmbus_channel *chan)
lock_sock(sk);
hvs_do_close_lock_held(vsock_sk(sk), true);
release_sock(sk);
+
+ /* Release the refcnt for the channel that's opened in
+ * hvs_open_connection().
+ */
+ sock_put(sk);
}
static void hvs_open_connection(struct vmbus_channel *chan)
@@ -407,6 +412,9 @@ static void hvs_open_connection(struct vmbus_channel *chan)
}
set_per_channel_state(chan, conn_from_host ? new : sk);
+
+ /* This reference will be dropped by hvs_close_connection(). */
+ sock_hold(conn_from_host ? new : sk);
vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
/* Set the pending send size to max packet size to always get
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 742986c73490..a599469b8157 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1405,10 +1405,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
}
break;
case NETDEV_PRE_UP:
- if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
- !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
- rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
- wdev->use_4addr))
+ if (!cfg80211_iftype_allowed(wdev->wiphy, wdev->iftype,
+ wdev->use_4addr, 0))
return notifier_from_errno(-EOPNOTSUPP);
if (rfkill_blocked(rdev->rfkill))
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1a107f29016b..92e06482563c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3538,9 +3538,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
return err;
}
- if (!(rdev->wiphy.interface_modes & (1 << type)) &&
- !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
- rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
+ if (!cfg80211_iftype_allowed(&rdev->wiphy, type, params.use_4addr, 0))
return -EOPNOTSUPP;
err = nl80211_parse_mon_options(rdev, type, info, &params);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 1c39d6a2e850..d0e35b7b9e35 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1697,7 +1697,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
num_interfaces += params->iftype_num[iftype];
if (params->iftype_num[iftype] > 0 &&
- !(wiphy->software_iftypes & BIT(iftype)))
+ !cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
used_iftypes |= BIT(iftype);
}
@@ -1719,7 +1719,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
return -ENOMEM;
for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
- if (wiphy->software_iftypes & BIT(iftype))
+ if (cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
continue;
for (j = 0; j < c->n_limits; j++) {
all_iftypes |= limits[j].types;
@@ -2072,3 +2072,26 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap,
return max_vht_nss;
}
EXPORT_SYMBOL(ieee80211_get_vht_max_nss);
+
+bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
+ bool is_4addr, u8 check_swif)
+
+{
+ bool is_vlan = iftype == NL80211_IFTYPE_AP_VLAN;
+
+ switch (check_swif) {
+ case 0:
+ if (is_vlan && is_4addr)
+ return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
+ return wiphy->interface_modes & BIT(iftype);
+ case 1:
+ if (!(wiphy->software_iftypes & BIT(iftype)) && is_vlan)
+ return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
+ return wiphy->software_iftypes & BIT(iftype);
+ default:
+ break;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(cfg80211_iftype_allowed);
diff --git a/samples/vfio-mdev/mdpy-defs.h b/samples/vfio-mdev/mdpy-defs.h
index 96b3b1b49d34..eb26421b6429 100644
--- a/samples/vfio-mdev/mdpy-defs.h
+++ b/samples/vfio-mdev/mdpy-defs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Simple pci display device.
*
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 77c742fa4fb1..4b0432e095ae 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -190,9 +190,6 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\
# printing commands
cmd = @set -e; $(echo-cmd) $(cmd_$(1))
-# Add $(obj)/ for paths that are not absolute
-objectify = $(foreach o,$(1),$(if $(filter /%,$(o)),$(o),$(obj)/$(o)))
-
###
# if_changed - execute command if any prerequisite is newer than
# target, or command line has changed
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index 8a5c4d645eb1..4bbf4fc163a2 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -25,7 +25,7 @@ failure = $(if-success,$(1),n,y)
# $(cc-option,<flag>)
# Return y if the compiler supports <flag>, n otherwise
-cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
+cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
# $(ld-option,<flag>)
# Return y if the linker supports <flag>, n otherwise
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 5241d0751eb0..41c50f9461e5 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -45,7 +45,6 @@ subdir-ym := $(sort $(subdir-y) $(subdir-m))
multi-used-y := $(sort $(foreach m,$(obj-y), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))), $(m))))
multi-used-m := $(sort $(foreach m,$(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))), $(m))))
multi-used := $(multi-used-y) $(multi-used-m)
-single-used-m := $(sort $(filter-out $(multi-used-m),$(obj-m)))
# $(subdir-obj-y) is the list of objects in $(obj-y) which uses dir/ to
# tell kbuild to descend
@@ -91,7 +90,6 @@ lib-y := $(addprefix $(obj)/,$(lib-y))
subdir-obj-y := $(addprefix $(obj)/,$(subdir-obj-y))
real-obj-y := $(addprefix $(obj)/,$(real-obj-y))
real-obj-m := $(addprefix $(obj)/,$(real-obj-m))
-single-used-m := $(addprefix $(obj)/,$(single-used-m))
multi-used-m := $(addprefix $(obj)/,$(multi-used-m))
subdir-ym := $(addprefix $(obj)/,$(subdir-ym))
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 6b19c1a4eae5..92ed02d7cd5e 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -38,12 +38,39 @@
# symbols in the final module linking stage
# KBUILD_MODPOST_NOFINAL can be set to skip the final link of modules.
# This is solely useful to speed up test compiles
-PHONY := _modpost
-_modpost: __modpost
+
+PHONY := __modpost
+__modpost:
include include/config/auto.conf
include scripts/Kbuild.include
+kernelsymfile := $(objtree)/Module.symvers
+modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers
+
+MODPOST = scripts/mod/modpost \
+ $(if $(CONFIG_MODVERSIONS),-m) \
+ $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a) \
+ $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
+ $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
+ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
+ $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
+ $(if $(KBUILD_MODPOST_WARN),-w)
+
+ifdef MODPOST_VMLINUX
+
+__modpost: vmlinux.o
+
+quiet_cmd_modpost = MODPOST $@
+ cmd_modpost = $(MODPOST) $@
+
+PHONY += vmlinux.o
+vmlinux.o:
+ $(call cmd,modpost)
+
+else
+
# When building external modules load the Kbuild file to retrieve EXTRA_SYMBOLS info
ifneq ($(KBUILD_EXTMOD),)
@@ -58,50 +85,27 @@ endif
include scripts/Makefile.lib
-kernelsymfile := $(objtree)/Module.symvers
-modulesymfile := $(firstword $(KBUILD_EXTMOD))/Module.symvers
-
modorder := $(if $(KBUILD_EXTMOD),$(KBUILD_EXTMOD)/)modules.order
-# Step 1), find all modules listed in modules.order
-ifdef CONFIG_MODULES
+# find all modules listed in modules.order
modules := $(sort $(shell cat $(modorder)))
-endif
# Stop after building .o files if NOFINAL is set. Makes compile tests quicker
-_modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules))
-
-# Step 2), invoke modpost
-# Includes step 3,4
-modpost = scripts/mod/modpost \
- $(if $(CONFIG_MODVERSIONS),-m) \
- $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
- $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
- $(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
- $(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
- $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
- $(if $(KBUILD_MODPOST_WARN),-w)
-
-MODPOST_OPT=$(subst -i,-n,$(filter -i,$(MAKEFLAGS)))
+__modpost: $(if $(KBUILD_MODPOST_NOFINAL), $(modules:.ko:.o),$(modules))
+ @:
-# We can go over command line length here, so be careful.
-quiet_cmd_modpost = MODPOST $(words $(filter-out vmlinux FORCE, $^)) modules
- cmd_modpost = sed 's/ko$$/o/' $(modorder) | $(modpost) $(MODPOST_OPT) -s -T -
+MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - $(wildcard vmlinux)
-PHONY += __modpost
-__modpost: $(modules:.ko=.o) FORCE
- $(call cmd,modpost) $(wildcard vmlinux)
-
-quiet_cmd_kernel-mod = MODPOST $@
- cmd_kernel-mod = $(modpost) $@
+# We can go over command line length here, so be careful.
+quiet_cmd_modpost = MODPOST $(words $(modules)) modules
+ cmd_modpost = sed 's/ko$$/o/' $(modorder) | $(MODPOST)
-vmlinux.o: FORCE
- $(call cmd,kernel-mod)
+PHONY += modules-modpost
+modules-modpost:
+ $(call cmd,modpost)
# Declare generated files as targets for modpost
-$(modules:.ko=.mod.c): __modpost ;
-
+$(modules:.ko=.mod.c): modules-modpost
# Step 5), compile all *.mod.c files
@@ -145,10 +149,10 @@ FORCE:
# optimization, we don't need to read them if the target does not
# exist, we will rebuild anyway in that case.
-cmd_files := $(wildcard $(foreach f,$(sort $(targets)),$(dir $(f)).$(notdir $(f)).cmd))
+existing-targets := $(wildcard $(sort $(targets)))
+
+-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd)
-ifneq ($(cmd_files),)
- include $(cmd_files)
endif
.PHONY: $(PHONY)
diff --git a/scripts/gen_compile_commands.py b/scripts/gen_compile_commands.py
index 7915823b92a5..c458696ef3a7 100755
--- a/scripts/gen_compile_commands.py
+++ b/scripts/gen_compile_commands.py
@@ -21,9 +21,9 @@ _LINE_PATTERN = r'^cmd_[^ ]*\.o := (.* )([^ ]*\.c)$'
_VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
# A kernel build generally has over 2000 entries in its compile_commands.json
-# database. If this code finds 500 or fewer, then warn the user that they might
+# database. If this code finds 300 or fewer, then warn the user that they might
# not have all the .cmd files, and they might need to compile the kernel.
-_LOW_COUNT_THRESHOLD = 500
+_LOW_COUNT_THRESHOLD = 300
def parse_arguments():
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index 47f6f3ea0771..bbaf29386995 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -23,6 +23,12 @@ TMPFILE=$OUTFILE.tmp
trap 'rm -f $OUTFILE $TMPFILE' EXIT
+# SPDX-License-Identifier with GPL variants must have "WITH Linux-syscall-note"
+if [ -n "$(sed -n -e "/SPDX-License-Identifier:.*GPL-/{/WITH Linux-syscall-note/!p}" $INFILE)" ]; then
+ echo "error: $INFILE: missing \"WITH Linux-syscall-note\" for SPDX-License-Identifier" >&2
+ exit 1
+fi
+
sed -E -e '
s/([[:space:](])(__user|__force|__iomem)[[:space:]]/\1/g
s/__attribute_const__([[:space:]]|$)/\1/g
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 1134892599da..3569d2dec37c 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -848,6 +848,7 @@ int conf_write(const char *name)
const char *str;
char tmpname[PATH_MAX + 1], oldname[PATH_MAX + 1];
char *env;
+ int i;
bool need_newline = false;
if (!name)
@@ -930,6 +931,9 @@ next:
}
fclose(out);
+ for_all_symbols(i, sym)
+ sym->flags &= ~SYMBOL_WRITTEN;
+
if (*tmpname) {
if (is_same(name, tmpname)) {
conf_message("No change to %s", name);
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index a7124f895b24..915775eb2921 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -210,7 +210,7 @@ info LD vmlinux.o
modpost_link vmlinux.o
# modpost vmlinux.o to check for section mismatches
-${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
+${MAKE} -f "${srctree}/scripts/Makefile.modpost" MODPOST_VMLINUX=1
info MODINFO modules.builtin.modinfo
${OBJCOPY} -j .modinfo -O binary vmlinux.o modules.builtin.modinfo
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
index f230e65329a2..3b638c0e1a4f 100755
--- a/scripts/sphinx-pre-install
+++ b/scripts/sphinx-pre-install
@@ -83,6 +83,17 @@ sub check_missing(%)
foreach my $prog (sort keys %missing) {
my $is_optional = $missing{$prog};
+ # At least on some LTS distros like CentOS 7, texlive doesn't
+ # provide all packages we need. When such distros are
+ # detected, we have to disable PDF output.
+ #
+ # So, we need to ignore the packages that distros would
+ # need for LaTeX to work
+ if ($is_optional == 2 && !$pdf) {
+ $optional--;
+ next;
+ }
+
if ($is_optional) {
print "Warning: better to also install \"$prog\".\n";
} else {
@@ -333,10 +344,13 @@ sub give_debian_hints()
if ($pdf) {
check_missing_file("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
- "fonts-dejavu", 1);
+ "fonts-dejavu", 2);
+
+ check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
+ "fonts-noto-cjk", 2);
}
- check_program("dvipng", 1) if ($pdf);
+ check_program("dvipng", 2) if ($pdf);
check_missing(\%map);
return if (!$need && !$optional);
@@ -363,6 +377,7 @@ sub give_redhat_hints()
my @fedora_tex_pkgs = (
"texlive-collection-fontsrecommended",
"texlive-collection-latex",
+ "texlive-xecjk",
"dejavu-sans-fonts",
"dejavu-serif-fonts",
"dejavu-sans-mono-fonts",
@@ -371,22 +386,45 @@ sub give_redhat_hints()
#
# Checks valid for RHEL/CentOS version 7.x.
#
- if (! $system_release =~ /Fedora/) {
+ my $old = 0;
+ my $rel;
+ $rel = $1 if ($system_release =~ /release\s+(\d+)/);
+
+ if (!($system_release =~ /Fedora/)) {
$map{"virtualenv"} = "python-virtualenv";
- }
- my $release;
+ if ($rel && $rel < 8) {
+ $old = 1;
+ $pdf = 0;
+
+ printf("Note: texlive packages on RHEL/CENTOS <= 7 are incomplete. Can't support PDF output\n");
+ printf("If you want to build PDF, please read:\n");
+ printf("\thttps://www.systutorials.com/241660/how-to-install-tex-live-on-centos-7-linux/\n");
+ }
+ } else {
+ if ($rel && $rel < 26) {
+ $old = 1;
+ }
+ }
+ if (!$rel) {
+ printf("Couldn't identify release number\n");
+ $old = 1;
+ $pdf = 0;
+ }
- $release = $1 if ($system_release =~ /Fedora\s+release\s+(\d+)/);
+ if ($pdf) {
+ check_missing_file("/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
+ "google-noto-sans-cjk-ttc-fonts", 2);
+ }
- check_rpm_missing(\@fedora26_opt_pkgs, 1) if ($pdf && $release >= 26);
- check_rpm_missing(\@fedora_tex_pkgs, 1) if ($pdf);
- check_missing_tex(1) if ($pdf);
+ check_rpm_missing(\@fedora26_opt_pkgs, 2) if ($pdf && !$old);
+ check_rpm_missing(\@fedora_tex_pkgs, 2) if ($pdf);
+ check_missing_tex(2) if ($pdf);
check_missing(\%map);
return if (!$need && !$optional);
- if ($release >= 18) {
+ if (!$old) {
# dnf, for Fedora 18+
printf("You should run:\n\n\tsudo dnf install -y $install\n");
} else {
@@ -425,8 +463,15 @@ sub give_opensuse_hints()
"texlive-zapfding",
);
- check_rpm_missing(\@suse_tex_pkgs, 1) if ($pdf);
- check_missing_tex(1) if ($pdf);
+ $map{"latexmk"} = "texlive-latexmk-bin";
+
+ # FIXME: add support for installing CJK fonts
+ #
+ # I tried hard, but was unable to find a way to install
+ # "Noto Sans CJK SC" on openSUSE
+
+ check_rpm_missing(\@suse_tex_pkgs, 2) if ($pdf);
+ check_missing_tex(2) if ($pdf);
check_missing(\%map);
return if (!$need && !$optional);
@@ -450,7 +495,14 @@ sub give_mageia_hints()
"texlive-fontsextra",
);
- check_rpm_missing(\@tex_pkgs, 1) if ($pdf);
+ $map{"latexmk"} = "texlive-collection-basic";
+
+ if ($pdf) {
+ check_missing_file("/usr/share/fonts/google-noto-cjk/NotoSansCJK-Regular.ttc",
+ "google-noto-sans-cjk-ttc-fonts", 2);
+ }
+
+ check_rpm_missing(\@tex_pkgs, 2) if ($pdf);
check_missing(\%map);
return if (!$need && !$optional);
@@ -473,7 +525,13 @@ sub give_arch_linux_hints()
"texlive-latexextra",
"ttf-dejavu",
);
- check_pacman_missing(\@archlinux_tex_pkgs, 1) if ($pdf);
+ check_pacman_missing(\@archlinux_tex_pkgs, 2) if ($pdf);
+
+ if ($pdf) {
+ check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJK-Regular.ttc",
+ "noto-fonts-cjk", 2);
+ }
+
check_missing(\%map);
return if (!$need && !$optional);
@@ -492,15 +550,31 @@ sub give_gentoo_hints()
);
check_missing_file("/usr/share/fonts/dejavu/DejaVuSans.ttf",
- "media-fonts/dejavu", 1) if ($pdf);
+ "media-fonts/dejavu", 2) if ($pdf);
+
+ if ($pdf) {
+ check_missing_file("/usr/share/fonts/noto-cjk/NotoSansCJKsc-Regular.otf",
+ "media-fonts/noto-cjk", 2);
+ }
check_missing(\%map);
return if (!$need && !$optional);
printf("You should run:\n\n");
- printf("\tsudo su -c 'echo \"media-gfx/imagemagick svg png\" > /etc/portage/package.use/imagemagick'\n");
- printf("\tsudo su -c 'echo \"media-gfx/graphviz cairo pdf\" > /etc/portage/package.use/graphviz'\n");
+
+ my $imagemagick = "media-gfx/imagemagick svg png";
+ my $cairo = "media-gfx/graphviz cairo pdf";
+ my $portage_imagemagick = "/etc/portage/package.use/imagemagick";
+ my $portage_cairo = "/etc/portage/package.use/graphviz";
+
+ if (qx(cat $portage_imagemagick) ne "$imagemagick\n") {
+ printf("\tsudo su -c 'echo \"$imagemagick\" > $portage_imagemagick'\n")
+ }
+ if (qx(cat $portage_cairo) ne "$cairo\n") {
+ printf("\tsudo su -c 'echo \"$cairo\" > $portage_cairo'\n");
+ }
+
printf("\tsudo emerge --ask $install\n");
}
@@ -560,7 +634,7 @@ sub check_distros()
my %map = (
"sphinx-build" => "sphinx"
);
- check_missing_tex(1) if ($pdf);
+ check_missing_tex(2) if ($pdf);
check_missing(\%map);
print "I don't know distro $system_release.\n";
print "So, I can't provide you a hint with the install procedure.\n";
@@ -589,11 +663,13 @@ sub check_needs()
check_program("make", 0);
check_program("gcc", 0);
check_python_module("sphinx_rtd_theme", 1) if (!$virtualenv);
- check_program("xelatex", 1) if ($pdf);
check_program("dot", 1);
check_program("convert", 1);
- check_program("rsvg-convert", 1) if ($pdf);
- check_program("latexmk", 1) if ($pdf);
+
+ # Extra PDF files - should use 2 for is_optional
+ check_program("xelatex", 2) if ($pdf);
+ check_program("rsvg-convert", 2) if ($pdf);
+ check_program("latexmk", 2) if ($pdf);
check_distros();
diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening
index a1ffe2eb4d5f..af4c979b38ee 100644
--- a/security/Kconfig.hardening
+++ b/security/Kconfig.hardening
@@ -61,6 +61,7 @@ choice
config GCC_PLUGIN_STRUCTLEAK_BYREF
bool "zero-init structs passed by reference (strong)"
depends on GCC_PLUGINS
+ depends on !(KASAN && KASAN_STACK=1)
select GCC_PLUGIN_STRUCTLEAK
help
Zero-initialize any structures on the stack that may
@@ -70,9 +71,15 @@ choice
exposures, like CVE-2017-1000410:
https://git.kernel.org/linus/06e7e776ca4d3654
+ As a side-effect, this keeps a lot of variables on the
+ stack that can otherwise be optimized out, so combining
+ this with CONFIG_KASAN_STACK can lead to a stack overflow
+ and is disallowed.
+
config GCC_PLUGIN_STRUCTLEAK_BYREF_ALL
bool "zero-init anything passed by reference (very strong)"
depends on GCC_PLUGINS
+ depends on !(KASAN && KASAN_STACK=1)
select GCC_PLUGIN_STRUCTLEAK
help
Zero-initialize any stack variables that may be passed
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 624ccc6ac744..f8efaa9f647c 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -272,6 +272,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
return v;
}
+static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
+
/*
* Initialize a policy database structure.
*/
@@ -319,8 +321,10 @@ static int policydb_init(struct policydb *p)
out:
hashtab_destroy(p->filename_trans);
hashtab_destroy(p->range_tr);
- for (i = 0; i < SYM_NUM; i++)
+ for (i = 0; i < SYM_NUM; i++) {
+ hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
hashtab_destroy(p->symtab[i].table);
+ }
return rc;
}
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index e63a90ff2728..1f0a6eaa2d6a 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -286,6 +286,11 @@ static int sidtab_reverse_lookup(struct sidtab *s, struct context *context,
++count;
}
+ /* bail out if we already reached max entries */
+ rc = -EOVERFLOW;
+ if (count >= SIDTAB_MAX)
+ goto out_unlock;
+
/* insert context into new entry */
rc = -ENOMEM;
dst = sidtab_do_lookup(s, count, 1);
diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
index 7b977b753a03..7985dd8198b6 100644
--- a/sound/ac97/bus.c
+++ b/sound/ac97/bus.c
@@ -122,17 +122,12 @@ static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx,
vendor_id);
ret = device_add(&codec->dev);
- if (ret)
- goto err_free_codec;
+ if (ret) {
+ put_device(&codec->dev);
+ return ret;
+ }
return 0;
-err_free_codec:
- of_node_put(codec->dev.of_node);
- put_device(&codec->dev);
- kfree(codec);
- ac97_ctrl->codecs[idx] = NULL;
-
- return ret;
}
unsigned int snd_ac97_bus_scan_one(struct ac97_controller *adrv,
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 99b882158705..41905afada63 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -574,10 +574,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
stream->metadata_set = false;
stream->next_track = false;
- if (stream->direction == SND_COMPRESS_PLAYBACK)
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
- else
- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
} else {
return -EPERM;
}
@@ -693,8 +690,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_SETUP:
+ if (stream->direction != SND_COMPRESS_CAPTURE)
+ return -EPERM;
+ break;
+ case SNDRV_PCM_STATE_PREPARED:
+ break;
+ default:
return -EPERM;
+ }
+
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
if (!retval)
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
@@ -705,9 +711,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
return -EPERM;
+ default:
+ break;
+ }
+
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
if (!retval) {
snd_compr_drain_notify(stream);
@@ -795,9 +807,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
return -EPERM;
+ case SNDRV_PCM_STATE_XRUN:
+ return -EPIPE;
+ default:
+ break;
+ }
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
if (retval) {
@@ -817,6 +837,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
return -EPERM;
+ /* next track doesn't have any meaning for capture streams */
+ if (stream->direction == SND_COMPRESS_CAPTURE)
+ return -EPERM;
+
/* you can signal next track if this is intended to be a gapless stream
* and current track metadata is set
*/
@@ -834,9 +858,23 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
static int snd_compr_partial_drain(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
+ return -EPERM;
+ case SNDRV_PCM_STATE_XRUN:
+ return -EPIPE;
+ default:
+ break;
+ }
+
+ /* partial drain doesn't have any meaning for capture streams */
+ if (stream->direction == SND_COMPRESS_CAPTURE)
return -EPERM;
+
/* stream can be drained only when next track has been signalled */
if (stream->next_track == false)
return -EPERM;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 860543a4c840..703857aab00f 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -77,7 +77,7 @@ void snd_pcm_group_init(struct snd_pcm_group *group)
spin_lock_init(&group->lock);
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->substreams);
- refcount_set(&group->refs, 0);
+ refcount_set(&group->refs, 1);
}
/* define group lock helpers */
@@ -1096,8 +1096,7 @@ static void snd_pcm_group_unref(struct snd_pcm_group *group,
if (!group)
return;
- do_free = refcount_dec_and_test(&group->refs) &&
- list_empty(&group->substreams);
+ do_free = refcount_dec_and_test(&group->refs);
snd_pcm_group_unlock(group, substream->pcm->nonatomic);
if (do_free)
kfree(group);
@@ -1874,6 +1873,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
if (!to_check)
break; /* all drained */
init_waitqueue_entry(&wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&to_check->sleep, &wait);
snd_pcm_stream_unlock_irq(substream);
if (runtime->no_period_wakeup)
@@ -1886,7 +1886,7 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
}
tout = msecs_to_jiffies(tout * 1000);
}
- tout = schedule_timeout_interruptible(tout);
+ tout = schedule_timeout(tout);
snd_pcm_stream_lock_irq(substream);
group = snd_pcm_stream_group_ref(substream);
@@ -2020,6 +2020,7 @@ static int snd_pcm_link(struct snd_pcm_substream *substream, int fd)
snd_pcm_group_lock_irq(target_group, nonatomic);
snd_pcm_stream_lock(substream1);
snd_pcm_group_assign(substream1, target_group);
+ refcount_inc(&target_group->refs);
snd_pcm_stream_unlock(substream1);
snd_pcm_group_unlock_irq(target_group, nonatomic);
_end:
@@ -2056,13 +2057,14 @@ static int snd_pcm_unlink(struct snd_pcm_substream *substream)
snd_pcm_group_lock_irq(group, nonatomic);
relink_to_local(substream);
+ refcount_dec(&group->refs);
/* detach the last stream, too */
if (list_is_singular(&group->substreams)) {
relink_to_local(list_first_entry(&group->substreams,
struct snd_pcm_substream,
link_list));
- do_free = !refcount_read(&group->refs);
+ do_free = refcount_dec_and_test(&group->refs);
}
snd_pcm_group_unlock_irq(group, nonatomic);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 1192c7561d62..3c2db3816029 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -136,10 +136,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
if (!acomp)
return -ENODEV;
if (!acomp->ops) {
- request_module("i915");
- /* 60s timeout */
- wait_for_completion_timeout(&bind_complete,
- msecs_to_jiffies(60 * 1000));
+ if (!IS_ENABLED(CONFIG_MODULES) ||
+ !request_module("i915")) {
+ /* 60s timeout */
+ wait_for_completion_timeout(&bind_complete,
+ msecs_to_jiffies(60 * 1000));
+ }
}
if (!acomp->ops) {
dev_info(bus->dev, "couldn't bind with audio component\n");
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index e30e86ca6b72..51f10ed9bc43 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2942,7 +2942,7 @@ static int hda_codec_runtime_resume(struct device *dev)
static int hda_codec_force_resume(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
- bool forced_resume = !codec->relaxed_resume;
+ bool forced_resume = !codec->relaxed_resume && codec->jacktbl.used;
int ret;
/* The get/put pair below enforces the runtime resume even if the
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index cb8b0945547c..1e14d7270adf 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -313,11 +313,10 @@ enum {
#define AZX_DCAPS_INTEL_SKYLAKE \
(AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
+ AZX_DCAPS_SYNC_WRITE |\
AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT)
-#define AZX_DCAPS_INTEL_BROXTON \
- (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
- AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT)
+#define AZX_DCAPS_INTEL_BROXTON AZX_DCAPS_INTEL_SKYLAKE
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4f8d0845ee1e..f299f137eaea 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -1083,6 +1083,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
*/
static const struct hda_device_id snd_hda_id_conexant[] = {
+ HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
diff --git a/sound/usb/helper.c b/sound/usb/helper.c
index 71d5f540334a..4c12cc5b53fd 100644
--- a/sound/usb/helper.c
+++ b/sound/usb/helper.c
@@ -72,7 +72,7 @@ int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe)
struct usb_host_endpoint *ep;
ep = usb_pipe_endpoint(dev, pipe);
- if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
+ if (!ep || usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)])
return -EINVAL;
return 0;
}
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index f0662bd4e50f..27bf61c177c0 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -368,7 +368,7 @@ static const struct line6_properties podhd_properties_table[] = {
.name = "POD HD500",
.capabilities = LINE6_CAP_PCM
| LINE6_CAP_HWMON,
- .altsetting = 1,
+ .altsetting = 0,
.ep_ctrl_r = 0x81,
.ep_ctrl_w = 0x01,
.ep_audio_r = 0x86,
diff --git a/sound/usb/line6/variax.c b/sound/usb/line6/variax.c
index 0d24c72c155f..ed158f04de80 100644
--- a/sound/usb/line6/variax.c
+++ b/sound/usb/line6/variax.c
@@ -244,5 +244,5 @@ static struct usb_driver variax_driver = {
module_usb_driver(variax_driver);
-MODULE_DESCRIPTION("Vairax Workbench USB driver");
+MODULE_DESCRIPTION("Variax Workbench USB driver");
MODULE_LICENSE("GPL");
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index 4602464ebdfb..a4217c1a5d01 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -214,6 +214,18 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
+ /* Higher values mean better protection. */
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
+ /* Higher values mean better protection. */
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
/* Device Control API: ARM VGIC */
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index d819a3e8b552..9a507716ae2f 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -229,6 +229,16 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
KVM_REG_ARM_FW | ((r) & 0xffff))
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
+#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
/* SVE registers */
#define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT)
diff --git a/tools/arch/powerpc/include/uapi/asm/mman.h b/tools/arch/powerpc/include/uapi/asm/mman.h
index f33105bc5ca6..8601d824a9c6 100644
--- a/tools/arch/powerpc/include/uapi/asm/mman.h
+++ b/tools/arch/powerpc/include/uapi/asm/mman.h
@@ -4,12 +4,8 @@
#define MAP_DENYWRITE 0x0800
#define MAP_EXECUTABLE 0x1000
#define MAP_GROWSDOWN 0x0100
-#define MAP_HUGETLB 0x40000
#define MAP_LOCKED 0x80
-#define MAP_NONBLOCK 0x10000
#define MAP_NORESERVE 0x40
-#define MAP_POPULATE 0x8000
-#define MAP_STACK 0x20000
#include <uapi/asm-generic/mman-common.h>
/* MAP_32BIT is undefined on powerpc, fix it for perf */
#define MAP_32BIT 0
diff --git a/tools/arch/sparc/include/uapi/asm/mman.h b/tools/arch/sparc/include/uapi/asm/mman.h
index 38920eed8cbf..7b94dccc843d 100644
--- a/tools/arch/sparc/include/uapi/asm/mman.h
+++ b/tools/arch/sparc/include/uapi/asm/mman.h
@@ -4,12 +4,8 @@
#define MAP_DENYWRITE 0x0800
#define MAP_EXECUTABLE 0x1000
#define MAP_GROWSDOWN 0x0200
-#define MAP_HUGETLB 0x40000
#define MAP_LOCKED 0x100
-#define MAP_NONBLOCK 0x10000
#define MAP_NORESERVE 0x40
-#define MAP_POPULATE 0x8000
-#define MAP_STACK 0x20000
#include <uapi/asm-generic/mman-common.h>
/* MAP_32BIT is undefined on sparc, fix it for perf */
#define MAP_32BIT 0
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index d6ab5b4d15e5..503d3f42da16 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -378,10 +378,11 @@ struct kvm_sync_regs {
struct kvm_vcpu_events events;
};
-#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
-#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
-#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
-#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
+#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
+#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
+#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
+#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1 /* unused */
@@ -432,4 +433,17 @@ struct kvm_nested_state {
} data;
};
+/* for KVM_CAP_PMU_EVENT_FILTER */
+struct kvm_pmu_event_filter {
+ __u32 action;
+ __u32 nevents;
+ __u32 fixed_counter_bitmap;
+ __u32 flags;
+ __u32 pad[4];
+ __u64 events[0];
+};
+
+#define KVM_PMU_EVENT_ALLOW 0
+#define KVM_PMU_EVENT_DENY 1
+
#endif /* _ASM_X86_KVM_H */
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h
index d213ec5c3766..f0b0c90dd398 100644
--- a/tools/arch/x86/include/uapi/asm/vmx.h
+++ b/tools/arch/x86/include/uapi/asm/vmx.h
@@ -146,7 +146,6 @@
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
#define VMX_ABORT_LOAD_HOST_PDPTE_FAIL 2
-#define VMX_ABORT_VMCS_CORRUPTED 3
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
#endif /* _UAPIVMX_H */
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index abd238d0f7a4..63b1f506ea67 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -19,15 +19,18 @@
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
-#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
-# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
-#else
-# define MAP_UNINITIALIZED 0x0 /* Don't support this flag */
-#endif
-/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */
+/* 0x0100 - 0x4000 flags are defined in asm-generic/mman.h */
+#define MAP_POPULATE 0x008000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x010000 /* do not block on IO */
+#define MAP_STACK 0x020000 /* give out an address that is best suited for process/thread stacks */
+#define MAP_HUGETLB 0x040000 /* create a huge page mapping */
+#define MAP_SYNC 0x080000 /* perform synchronous page faults for the mapping */
#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */
+#define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be
+ * uninitialized */
+
/*
* Flags for mlock
*/
diff --git a/tools/include/uapi/asm-generic/mman.h b/tools/include/uapi/asm-generic/mman.h
index 36c197fc44a0..406f7718f9ad 100644
--- a/tools/include/uapi/asm-generic/mman.h
+++ b/tools/include/uapi/asm-generic/mman.h
@@ -9,13 +9,11 @@
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
#define MAP_LOCKED 0x2000 /* pages are locked */
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
-#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
-#define MAP_NONBLOCK 0x10000 /* do not block on IO */
-#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
-#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
-#define MAP_SYNC 0x80000 /* perform synchronous page faults for the mapping */
-/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
+/*
+ * Bits [26:31] are reserved, see asm-generic/hugetlb_encode.h
+ * for MAP_HUGETLB usage
+ */
#define MCL_CURRENT 1 /* lock all current mappings */
#define MCL_FUTURE 2 /* lock all future mappings */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index a87904daf103..1be0e798e362 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -844,9 +844,15 @@ __SYSCALL(__NR_fsconfig, sys_fsconfig)
__SYSCALL(__NR_fsmount, sys_fsmount)
#define __NR_fspick 433
__SYSCALL(__NR_fspick, sys_fspick)
+#define __NR_pidfd_open 434
+__SYSCALL(__NR_pidfd_open, sys_pidfd_open)
+#ifdef __ARCH_WANT_SYS_CLONE3
+#define __NR_clone3 435
+__SYSCALL(__NR_clone3, sys_clone3)
+#endif
#undef __NR_syscalls
-#define __NR_syscalls 434
+#define __NR_syscalls 436
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 661d73f9a919..8a5b2f8f8eb9 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -50,6 +50,7 @@ typedef unsigned int drm_handle_t;
#else /* One of the BSDs */
+#include <stdint.h>
#include <sys/ioccom.h>
#include <sys/types.h>
typedef int8_t __s8;
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 3a73f5316766..328d05e77d9f 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -136,6 +136,8 @@ enum drm_i915_gem_engine_class {
struct i915_engine_class_instance {
__u16 engine_class; /* see enum drm_i915_gem_engine_class */
__u16 engine_instance;
+#define I915_ENGINE_CLASS_INVALID_NONE -1
+#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
};
/**
@@ -355,6 +357,8 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_PERF_ADD_CONFIG 0x37
#define DRM_I915_PERF_REMOVE_CONFIG 0x38
#define DRM_I915_QUERY 0x39
+#define DRM_I915_GEM_VM_CREATE 0x3a
+#define DRM_I915_GEM_VM_DESTROY 0x3b
/* Must be kept compact -- no holes */
#define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -415,6 +419,8 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
#define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
+#define DRM_IOCTL_I915_GEM_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
+#define DRM_IOCTL_I915_GEM_VM_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
@@ -598,6 +604,12 @@ typedef struct drm_i915_irq_wait {
*/
#define I915_PARAM_MMAP_GTT_COHERENT 52
+/*
+ * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
+ * execution through use of explicit fence support.
+ * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
+ */
+#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
/* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam {
@@ -1120,7 +1132,16 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_FENCE_ARRAY (1<<19)
-#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
+/*
+ * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
+ * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
+ * the batch.
+ *
+ * Returns -EINVAL if the sync_file fd cannot be found.
+ */
+#define I915_EXEC_FENCE_SUBMIT (1 << 20)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1))
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
@@ -1464,8 +1485,9 @@ struct drm_i915_gem_context_create_ext {
__u32 ctx_id; /* output: id of new context*/
__u32 flags;
#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1u << 1)
#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
- (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
+ (-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
__u64 extensions;
};
@@ -1507,6 +1529,41 @@ struct drm_i915_gem_context_param {
* On creation, all new contexts are marked as recoverable.
*/
#define I915_CONTEXT_PARAM_RECOVERABLE 0x8
+
+ /*
+ * The id of the associated virtual memory address space (ppGTT) of
+ * this context. Can be retrieved and passed to another context
+ * (on the same fd) for both to use the same ppGTT and so share
+ * address layouts, and avoid reloading the page tables on context
+ * switches between themselves.
+ *
+ * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
+ */
+#define I915_CONTEXT_PARAM_VM 0x9
+
+/*
+ * I915_CONTEXT_PARAM_ENGINES:
+ *
+ * Bind this context to operate on this subset of available engines. Henceforth,
+ * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
+ * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
+ * and upwards. Slots 0...N are filled in using the specified (class, instance).
+ * Use
+ * engine_class: I915_ENGINE_CLASS_INVALID,
+ * engine_instance: I915_ENGINE_CLASS_INVALID_NONE
+ * to specify a gap in the array that can be filled in later, e.g. by a
+ * virtual engine used for load balancing.
+ *
+ * Setting the number of engines bound to the context to 0, by passing a zero
+ * sized argument, will revert back to default settings.
+ *
+ * See struct i915_context_param_engines.
+ *
+ * Extensions:
+ * i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
+ * i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
+ */
+#define I915_CONTEXT_PARAM_ENGINES 0xa
/* Must be kept compact -- no holes and well documented */
__u64 value;
@@ -1540,9 +1597,10 @@ struct drm_i915_gem_context_param_sseu {
struct i915_engine_class_instance engine;
/*
- * Unused for now. Must be cleared to zero.
+ * Unknown flags must be cleared to zero.
*/
__u32 flags;
+#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
/*
* Mask of slices to enable for the context. Valid values are a subset
@@ -1570,12 +1628,115 @@ struct drm_i915_gem_context_param_sseu {
__u32 rsvd;
};
+/*
+ * i915_context_engines_load_balance:
+ *
+ * Enable load balancing across this set of engines.
+ *
+ * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
+ * used will proxy the execbuffer request onto one of the set of engines
+ * in such a way as to distribute the load evenly across the set.
+ *
+ * The set of engines must be compatible (e.g. the same HW class) as they
+ * will share the same logical GPU context and ring.
+ *
+ * To intermix rendering with the virtual engine and direct rendering onto
+ * the backing engines (bypassing the load balancing proxy), the context must
+ * be defined to use a single timeline for all engines.
+ */
+struct i915_context_engines_load_balance {
+ struct i915_user_extension base;
+
+ __u16 engine_index;
+ __u16 num_siblings;
+ __u32 flags; /* all undefined flags must be zero */
+
+ __u64 mbz64; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
+ struct i915_user_extension base; \
+ __u16 engine_index; \
+ __u16 num_siblings; \
+ __u32 flags; \
+ __u64 mbz64; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+/*
+ * i915_context_engines_bond:
+ *
+ * Constructed bonded pairs for execution within a virtual engine.
+ *
+ * All engines are equal, but some are more equal than others. Given
+ * the distribution of resources in the HW, it may be preferable to run
+ * a request on a given subset of engines in parallel to a request on a
+ * specific engine. We enable this selection of engines within a virtual
+ * engine by specifying bonding pairs, for any given master engine we will
+ * only execute on one of the corresponding siblings within the virtual engine.
+ *
+ * To execute a request in parallel on the master engine and a sibling requires
+ * coordination with a I915_EXEC_FENCE_SUBMIT.
+ */
+struct i915_context_engines_bond {
+ struct i915_user_extension base;
+
+ struct i915_engine_class_instance master;
+
+ __u16 virtual_index; /* index of virtual engine in ctx->engines[] */
+ __u16 num_bonds;
+
+ __u64 flags; /* all undefined flags must be zero */
+ __u64 mbz64[4]; /* reserved for future use; must be zero */
+
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
+ struct i915_user_extension base; \
+ struct i915_engine_class_instance master; \
+ __u16 virtual_index; \
+ __u16 num_bonds; \
+ __u64 flags; \
+ __u64 mbz64[4]; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
+struct i915_context_param_engines {
+ __u64 extensions; /* linked chain of extension blocks, 0 terminates */
+#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
+#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
+ struct i915_engine_class_instance engines[0];
+} __attribute__((packed));
+
+#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
+ __u64 extensions; \
+ struct i915_engine_class_instance engines[N__]; \
+} __attribute__((packed)) name__
+
struct drm_i915_gem_context_create_ext_setparam {
#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
struct i915_user_extension base;
struct drm_i915_gem_context_param param;
};
+struct drm_i915_gem_context_create_ext_clone {
+#define I915_CONTEXT_CREATE_EXT_CLONE 1
+ struct i915_user_extension base;
+ __u32 clone_id;
+ __u32 flags;
+#define I915_CONTEXT_CLONE_ENGINES (1u << 0)
+#define I915_CONTEXT_CLONE_FLAGS (1u << 1)
+#define I915_CONTEXT_CLONE_SCHEDATTR (1u << 2)
+#define I915_CONTEXT_CLONE_SSEU (1u << 3)
+#define I915_CONTEXT_CLONE_TIMELINE (1u << 4)
+#define I915_CONTEXT_CLONE_VM (1u << 5)
+#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
+ __u64 rsvd;
+};
+
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
@@ -1821,6 +1982,7 @@ struct drm_i915_perf_oa_config {
struct drm_i915_query_item {
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
+#define DRM_I915_QUERY_ENGINE_INFO 2
/* Must be kept compact -- no holes and well documented */
/*
@@ -1919,6 +2081,47 @@ struct drm_i915_query_topology_info {
__u8 data[];
};
+/**
+ * struct drm_i915_engine_info
+ *
+ * Describes one engine and it's capabilities as known to the driver.
+ */
+struct drm_i915_engine_info {
+ /** Engine class and instance. */
+ struct i915_engine_class_instance engine;
+
+ /** Reserved field. */
+ __u32 rsvd0;
+
+ /** Engine flags. */
+ __u64 flags;
+
+ /** Capabilities of this engine. */
+ __u64 capabilities;
+#define I915_VIDEO_CLASS_CAPABILITY_HEVC (1 << 0)
+#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC (1 << 1)
+
+ /** Reserved fields. */
+ __u64 rsvd1[4];
+};
+
+/**
+ * struct drm_i915_query_engine_info
+ *
+ * Engine info query enumerates all engines known to the driver by filling in
+ * an array of struct drm_i915_engine_info structures.
+ */
+struct drm_i915_query_engine_info {
+ /** Number of struct drm_i915_engine_info structs following. */
+ __u32 num_engines;
+
+ /** MBZ */
+ __u32 rsvd[3];
+
+ /** Marker for drm_i915_engine_info structures. */
+ struct drm_i915_engine_info engines[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 7d113a9602f0..4a8c02cafa9a 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -695,6 +695,7 @@ enum {
IFLA_VF_IB_NODE_GUID, /* VF Infiniband node GUID */
IFLA_VF_IB_PORT_GUID, /* VF Infiniband port GUID */
IFLA_VF_VLAN_LIST, /* nested list of vlans, option for QinQ */
+ IFLA_VF_BROADCAST, /* VF broadcast */
__IFLA_VF_MAX,
};
@@ -705,6 +706,10 @@ struct ifla_vf_mac {
__u8 mac[32]; /* MAX_ADDR_LEN */
};
+struct ifla_vf_broadcast {
+ __u8 broadcast[32];
+};
+
struct ifla_vf_vlan {
__u32 vf;
__u32 vlan; /* 0 - 4095, 0 disables VLAN filter */
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index c2152f3dd02d..5e3f12d5359e 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -116,7 +116,7 @@ struct kvm_irq_level {
* ACPI gsi notion of irq.
* For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
* For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
- * For ARM: See Documentation/virtual/kvm/api.txt
+ * For ARM: See Documentation/virt/kvm/api.txt
*/
union {
__u32 irq;
@@ -995,6 +995,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_ARM_SVE 170
#define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
#define KVM_CAP_ARM_PTRAUTH_GENERIC 172
+#define KVM_CAP_PMU_EVENT_FILTER 173
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1085,7 +1086,7 @@ struct kvm_xen_hvm_config {
*
* KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
* the irqfd to operate in resampling mode for level triggered interrupt
- * emulation. See Documentation/virtual/kvm/api.txt.
+ * emulation. See Documentation/virt/kvm/api.txt.
*/
#define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
@@ -1329,6 +1330,8 @@ struct kvm_s390_ucas_mapping {
#define KVM_PPC_GET_RMMU_INFO _IOW(KVMIO, 0xb0, struct kvm_ppc_rmmu_info)
/* Available with KVM_CAP_PPC_GET_CPU_CHAR */
#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
+/* Available with KVM_CAP_PMU_EVENT_FILTER */
+#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
diff --git a/tools/include/uapi/linux/sched.h b/tools/include/uapi/linux/sched.h
index ed4ee170bee2..b3105ac1381a 100644
--- a/tools/include/uapi/linux/sched.h
+++ b/tools/include/uapi/linux/sched.h
@@ -2,6 +2,8 @@
#ifndef _UAPI_LINUX_SCHED_H
#define _UAPI_LINUX_SCHED_H
+#include <linux/types.h>
+
/*
* cloning flags:
*/
@@ -32,6 +34,20 @@
#define CLONE_IO 0x80000000 /* Clone io context */
/*
+ * Arguments for the clone3 syscall
+ */
+struct clone_args {
+ __aligned_u64 flags;
+ __aligned_u64 pidfd;
+ __aligned_u64 child_tid;
+ __aligned_u64 parent_tid;
+ __aligned_u64 exit_signal;
+ __aligned_u64 stack;
+ __aligned_u64 stack_size;
+ __aligned_u64 tls;
+};
+
+/*
* Scheduling policies
*/
#define SCHED_NORMAL 0
@@ -51,9 +67,21 @@
#define SCHED_FLAG_RESET_ON_FORK 0x01
#define SCHED_FLAG_RECLAIM 0x02
#define SCHED_FLAG_DL_OVERRUN 0x04
+#define SCHED_FLAG_KEEP_POLICY 0x08
+#define SCHED_FLAG_KEEP_PARAMS 0x10
+#define SCHED_FLAG_UTIL_CLAMP_MIN 0x20
+#define SCHED_FLAG_UTIL_CLAMP_MAX 0x40
+
+#define SCHED_FLAG_KEEP_ALL (SCHED_FLAG_KEEP_POLICY | \
+ SCHED_FLAG_KEEP_PARAMS)
+
+#define SCHED_FLAG_UTIL_CLAMP (SCHED_FLAG_UTIL_CLAMP_MIN | \
+ SCHED_FLAG_UTIL_CLAMP_MAX)
#define SCHED_FLAG_ALL (SCHED_FLAG_RESET_ON_FORK | \
SCHED_FLAG_RECLAIM | \
- SCHED_FLAG_DL_OVERRUN)
+ SCHED_FLAG_DL_OVERRUN | \
+ SCHED_FLAG_KEEP_ALL | \
+ SCHED_FLAG_UTIL_CLAMP)
#endif /* _UAPI_LINUX_SCHED_H */
diff --git a/tools/include/uapi/linux/usbdevice_fs.h b/tools/include/uapi/linux/usbdevice_fs.h
index 964e87217be4..78efe870c2b7 100644
--- a/tools/include/uapi/linux/usbdevice_fs.h
+++ b/tools/include/uapi/linux/usbdevice_fs.h
@@ -76,6 +76,26 @@ struct usbdevfs_connectinfo {
unsigned char slow;
};
+struct usbdevfs_conninfo_ex {
+ __u32 size; /* Size of the structure from the kernel's */
+ /* point of view. Can be used by userspace */
+ /* to determine how much data can be */
+ /* used/trusted. */
+ __u32 busnum; /* USB bus number, as enumerated by the */
+ /* kernel, the device is connected to. */
+ __u32 devnum; /* Device address on the bus. */
+ __u32 speed; /* USB_SPEED_* constants from ch9.h */
+ __u8 num_ports; /* Number of ports the device is connected */
+ /* to on the way to the root hub. It may */
+ /* be bigger than size of 'ports' array so */
+ /* userspace can detect overflows. */
+ __u8 ports[7]; /* List of ports on the way from the root */
+ /* hub to the device. Current limit in */
+ /* USB specification is 7 tiers (root hub, */
+ /* 5 intermediate hubs, device), which */
+ /* gives at most 6 port entries. */
+};
+
#define USBDEVFS_URB_SHORT_NOT_OK 0x01
#define USBDEVFS_URB_ISO_ASAP 0x02
#define USBDEVFS_URB_BULK_CONTINUATION 0x04
@@ -137,6 +157,7 @@ struct usbdevfs_hub_portinfo {
#define USBDEVFS_CAP_REAP_AFTER_DISCONNECT 0x10
#define USBDEVFS_CAP_MMAP 0x20
#define USBDEVFS_CAP_DROP_PRIVILEGES 0x40
+#define USBDEVFS_CAP_CONNINFO_EX 0x80
/* USBDEVFS_DISCONNECT_CLAIM flags & struct */
@@ -197,5 +218,10 @@ struct usbdevfs_streams {
#define USBDEVFS_FREE_STREAMS _IOR('U', 29, struct usbdevfs_streams)
#define USBDEVFS_DROP_PRIVILEGES _IOW('U', 30, __u32)
#define USBDEVFS_GET_SPEED _IO('U', 31)
+/*
+ * Returns struct usbdevfs_conninfo_ex; length is variable to allow
+ * extending size of the data returned.
+ */
+#define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len)
#endif /* _UAPI_LINUX_USBDEVICE_FS_H */
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 467224feb43b..d821107f55f9 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
+#include <endian.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
@@ -419,9 +420,9 @@ done:
static bool btf_check_endianness(const GElf_Ehdr *ehdr)
{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#if __BYTE_ORDER == __LITTLE_ENDIAN
return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#elif __BYTE_ORDER == __BIG_ENDIAN
return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
#else
# error "Unrecognized __BYTE_ORDER__"
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
index 03748a742146..bae8879cdf58 100644
--- a/tools/lib/bpf/hashmap.h
+++ b/tools/lib/bpf/hashmap.h
@@ -10,6 +10,11 @@
#include <stdbool.h>
#include <stddef.h>
+#ifdef __GLIBC__
+#include <bits/wordsize.h>
+#else
+#include <bits/reg.h>
+#endif
#include "libbpf_internal.h"
static inline size_t hash_bits(size_t h, int bits)
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 794dd5064ae8..2586b6cb8f34 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -20,6 +20,7 @@
#include <inttypes.h>
#include <string.h>
#include <unistd.h>
+#include <endian.h>
#include <fcntl.h>
#include <errno.h>
#include <asm/unistd.h>
@@ -612,10 +613,10 @@ errout:
static int bpf_object__check_endianness(struct bpf_object *obj)
{
-#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#if __BYTE_ORDER == __LITTLE_ENDIAN
if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
return 0;
-#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#elif __BYTE_ORDER == __BIG_ENDIAN
if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
return 0;
#else
@@ -1377,8 +1378,13 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj)
if (!has_datasec && kind == BTF_KIND_VAR) {
/* replace VAR with INT */
t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
- t->size = sizeof(int);
- *(int *)(t+1) = BTF_INT_ENC(0, 0, 32);
+ /*
+ * using size = 1 is the safest choice, 4 will be too
+ * big and cause kernel BTF validation failure if
+ * original variable took less than 4 bytes
+ */
+ t->size = 1;
+ *(int *)(t+1) = BTF_INT_ENC(0, 0, 8);
} else if (!has_datasec && kind == BTF_KIND_DATASEC) {
/* replace DATASEC with STRUCT */
struct btf_var_secinfo *v = (void *)(t + 1);
@@ -1500,6 +1506,12 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
BTF_ELF_SEC, err);
btf__free(obj->btf);
obj->btf = NULL;
+ /* btf_ext can't exist without btf, so free it as well */
+ if (obj->btf_ext) {
+ btf_ext__free(obj->btf_ext);
+ obj->btf_ext = NULL;
+ }
+
if (bpf_object__is_btf_mandatory(obj))
return err;
}
@@ -4507,13 +4519,13 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
const struct perf_buffer_opts *opts)
{
struct perf_buffer_params p = {};
- struct perf_event_attr attr = {
- .config = PERF_COUNT_SW_BPF_OUTPUT,
- .type = PERF_TYPE_SOFTWARE,
- .sample_type = PERF_SAMPLE_RAW,
- .sample_period = 1,
- .wakeup_events = 1,
- };
+ struct perf_event_attr attr = { 0, };
+
+ attr.config = PERF_COUNT_SW_BPF_OUTPUT,
+ attr.type = PERF_TYPE_SOFTWARE;
+ attr.sample_type = PERF_SAMPLE_RAW;
+ attr.sample_period = 1;
+ attr.wakeup_events = 1;
p.attr = &attr;
p.sample_cb = opts ? opts->sample_cb : NULL;
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index 5007b5d4fd2c..680e63066cf3 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -317,17 +317,16 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
static int xsk_get_max_queues(struct xsk_socket *xsk)
{
- struct ethtool_channels channels;
- struct ifreq ifr;
+ struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
+ struct ifreq ifr = {};
int fd, err, ret;
fd = socket(AF_INET, SOCK_DGRAM, 0);
if (fd < 0)
return -errno;
- channels.cmd = ETHTOOL_GCHANNELS;
ifr.ifr_data = (void *)&channels;
- strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
+ memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
ifr.ifr_name[IFNAMSIZ - 1] = '\0';
err = ioctl(fd, SIOCETHTOOL, &ifr);
if (err && errno != EOPNOTSUPP) {
@@ -335,7 +334,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
goto out;
}
- if (channels.max_combined == 0 || errno == EOPNOTSUPP)
+ if (err || channels.max_combined == 0)
/* If the device says it has no channels, then all traffic
* is sent to a single stream, so max queues = 1.
*/
@@ -517,7 +516,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
err = -errno;
goto out_socket;
}
- strncpy(xsk->ifname, ifname, IFNAMSIZ - 1);
+ memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
xsk->ifname[IFNAMSIZ - 1] = '\0';
err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 5f26620f13f5..176f2f084060 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -1946,6 +1946,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
struct alternative *alt;
struct instruction *insn, *next_insn;
struct section *sec;
+ u8 visited;
int ret;
insn = first;
@@ -1972,12 +1973,12 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
return 1;
}
+ visited = 1 << state.uaccess;
if (insn->visited) {
if (!insn->hint && !insn_state_match(insn, &state))
return 1;
- /* If we were here with AC=0, but now have AC=1, go again */
- if (insn->state.uaccess || !state.uaccess)
+ if (insn->visited & visited)
return 0;
}
@@ -2024,7 +2025,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
} else
insn->state = state;
- insn->visited = true;
+ insn->visited |= visited;
if (!insn->ignore_alts) {
bool skip_orig = false;
diff --git a/tools/objtool/check.h b/tools/objtool/check.h
index b881fafcf55d..6d875ca6fce0 100644
--- a/tools/objtool/check.h
+++ b/tools/objtool/check.h
@@ -33,8 +33,9 @@ struct instruction {
unsigned int len;
enum insn_type type;
unsigned long immediate;
- bool alt_group, visited, dead_end, ignore, hint, save, restore, ignore_alts;
+ bool alt_group, dead_end, ignore, hint, save, restore, ignore_alts;
bool retpoline_safe;
+ u8 visited;
struct symbol *call_dest;
struct instruction *jump_dest;
struct instruction *first_jump_src;
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index d4e2e18a5881..caaab28f8400 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -228,11 +228,11 @@ OPTIONS
With the metric option perf script can compute metrics for
sampling periods, similar to perf stat. This requires
- specifying a group with multiple metrics with the :S option
+ specifying a group with multiple events defining metrics with the :S option
for perf record. perf will sample on the first event, and
- compute metrics for all the events in the group. Please note
+ print computed metrics for all the events in the group. Please note
that the metric computed is averaged over the whole sampling
- period, not just for the sample point.
+ period (since the last sample), not just for the sample point.
For sample events it's possible to display misc field with -F +misc option,
following letters are displayed for each bit:
@@ -384,7 +384,7 @@ include::itrace.txt[]
perf script --time 0%-10%,30%-40%
--max-blocks::
- Set the maximum number of program blocks to print with brstackasm for
+ Set the maximum number of program blocks to print with brstackinsn for
each sample.
--reltime::
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index 5f54feb19977..d030c87ed9f5 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -126,7 +126,7 @@ vendor,family,model,stepping. For example: GenuineIntel,6,69,1
HEADER_TOTAL_MEM = 10,
-An uint64_t with the total memory in bytes.
+An uint64_t with the total memory in kilobytes.
HEADER_CMDLINE = 11,
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index b4e6f9e6204a..c29976eca4a8 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -355,6 +355,8 @@
431 common fsconfig __x64_sys_fsconfig
432 common fsmount __x64_sys_fsmount
433 common fspick __x64_sys_fspick
+434 common pidfd_open __x64_sys_pidfd_open
+435 common clone3 __x64_sys_clone3/ptregs
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 6418782951a4..3d0ffd41fb55 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -698,6 +698,16 @@ __cmd_probe(int argc, const char **argv)
ret = perf_add_probe_events(params.events, params.nevents);
if (ret < 0) {
+
+ /*
+ * When perf_add_probe_events() fails it calls
+ * cleanup_perf_probe_events(pevs, npevs), i.e.
+ * cleanup_perf_probe_events(params.events, params.nevents), which
+ * will call clear_perf_probe_event(), so set nevents to zero
+ * to avoid cleanup_params() to call clear_perf_probe_event() again
+ * on the same pevs.
+ */
+ params.nevents = 0;
pr_err_with_code(" Error: Failed to add events.", ret);
return ret;
}
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 8f24865596af..0140ddb8dd0b 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1059,7 +1059,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
if (ip == end) {
- printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp,
+ printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, ++insn, fp,
&total_cycles);
if (PRINT_FIELD(SRCCODE))
printed += print_srccode(thread, x.cpumode, ip);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index b55a534b4de0..352cf39d7c2f 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -607,7 +607,13 @@ try_again:
* group leaders.
*/
read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
- perf_evlist__close(evsel_list);
+
+ /*
+ * We need to keep evsel_list alive, because it's processed
+ * later the evsel_list will be closed after.
+ */
+ if (!STAT_RECORD)
+ perf_evlist__close(evsel_list);
return WEXITSTATUS(status);
}
@@ -1997,6 +2003,7 @@ int cmd_stat(int argc, const char **argv)
perf_session__write_header(perf_stat.session, evsel_list, fd, true);
}
+ perf_evlist__close(evsel_list);
perf_session__delete(perf_stat.session);
}
diff --git a/tools/perf/trace/beauty/usbdevfs_ioctl.sh b/tools/perf/trace/beauty/usbdevfs_ioctl.sh
index 930b80f422e8..aa597ae53747 100755
--- a/tools/perf/trace/beauty/usbdevfs_ioctl.sh
+++ b/tools/perf/trace/beauty/usbdevfs_ioctl.sh
@@ -3,10 +3,13 @@
[ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/
+# also as:
+# #define USBDEVFS_CONNINFO_EX(len) _IOC(_IOC_READ, 'U', 32, len)
+
printf "static const char *usbdevfs_ioctl_cmds[] = {\n"
-regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)[[:space:]]+_IO[WR]{0,2}\([[:space:]]*'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
-egrep $regex ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
- sed -r "s/$regex/\2 \1/g" | \
+regex="^#[[:space:]]*define[[:space:]]+USBDEVFS_(\w+)(\(\w+\))?[[:space:]]+_IO[CWR]{0,2}\([[:space:]]*(_IOC_\w+,[[:space:]]*)?'U'[[:space:]]*,[[:space:]]*([[:digit:]]+).*"
+egrep "$regex" ${header_dir}/usbdevice_fs.h | egrep -v 'USBDEVFS_\w+32[[:space:]]' | \
+ sed -r "s/$regex/\4 \1/g" | \
sort | xargs printf "\t[%s] = \"%s\",\n"
printf "};\n\n"
printf "#if 0\n"
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index ebb46da4dfe5..52459dd5ad0c 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1291,6 +1291,7 @@ static void perf_evsel__free_id(struct perf_evsel *evsel)
xyarray__delete(evsel->sample_id);
evsel->sample_id = NULL;
zfree(&evsel->id);
+ evsel->ids = 0;
}
static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
@@ -2077,6 +2078,7 @@ void perf_evsel__close(struct perf_evsel *evsel)
perf_evsel__close_fd(evsel);
perf_evsel__free_fd(evsel);
+ perf_evsel__free_id(evsel);
}
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index c24db7f4909c..1903d7ec9797 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3559,6 +3559,13 @@ int perf_session__read_header(struct perf_session *session)
data->file.path);
}
+ if (f_header.attr_size == 0) {
+ pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
+ "Was the 'perf record' command properly terminated?\n",
+ data->file.path);
+ return -EINVAL;
+ }
+
nr_attrs = f_header.attrs.size / f_header.attr_size;
lseek(fd, f_header.attrs.offset, SEEK_SET);
@@ -3639,7 +3646,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
size += sizeof(struct perf_event_header);
size += ids * sizeof(u64);
- ev = malloc(size);
+ ev = zalloc(size);
if (ev == NULL)
return -ENOMEM;
@@ -3747,7 +3754,7 @@ int perf_event__process_feature(struct perf_session *session,
return 0;
ff.buf = (void *)fe->data;
- ff.size = event->header.size - sizeof(event->header);
+ ff.size = event->header.size - sizeof(*fe);
ff.ph = &session->header;
if (feat_ops[feat].process(&ff, NULL))
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index cd1eb73cfe83..8394d48f8b32 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2230,6 +2230,7 @@ void clear_perf_probe_event(struct perf_probe_event *pev)
field = next;
}
}
+ pev->nargs = 0;
zfree(&pev->args);
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index d0fd6c614e68..37efa1f43d8b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -36,10 +36,16 @@ static int perf_session__process_compressed_event(struct perf_session *session,
void *src;
size_t decomp_size, src_size;
u64 decomp_last_rem = 0;
- size_t decomp_len = session->header.env.comp_mmap_len;
+ size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
struct decomp *decomp, *decomp_last = session->decomp_last;
- decomp = mmap(NULL, sizeof(struct decomp) + decomp_len, PROT_READ|PROT_WRITE,
+ if (decomp_last) {
+ decomp_last_rem = decomp_last->size - decomp_last->head;
+ decomp_len += decomp_last_rem;
+ }
+
+ mmap_len = sizeof(struct decomp) + decomp_len;
+ decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
if (decomp == MAP_FAILED) {
pr_err("Couldn't allocate memory for decompression\n");
@@ -47,10 +53,10 @@ static int perf_session__process_compressed_event(struct perf_session *session,
}
decomp->file_pos = file_offset;
+ decomp->mmap_len = mmap_len;
decomp->head = 0;
- if (decomp_last) {
- decomp_last_rem = decomp_last->size - decomp_last->head;
+ if (decomp_last_rem) {
memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
decomp->size = decomp_last_rem;
}
@@ -61,7 +67,7 @@ static int perf_session__process_compressed_event(struct perf_session *session,
decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
&(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
if (!decomp_size) {
- munmap(decomp, sizeof(struct decomp) + decomp_len);
+ munmap(decomp, mmap_len);
pr_err("Couldn't decompress data\n");
return -1;
}
@@ -255,15 +261,15 @@ static void perf_session__delete_threads(struct perf_session *session)
static void perf_session__release_decomp_events(struct perf_session *session)
{
struct decomp *next, *decomp;
- size_t decomp_len;
+ size_t mmap_len;
next = session->decomp;
- decomp_len = session->header.env.comp_mmap_len;
do {
decomp = next;
if (decomp == NULL)
break;
next = decomp->next;
- munmap(decomp, decomp_len + sizeof(struct decomp));
+ mmap_len = decomp->mmap_len;
+ munmap(decomp, mmap_len);
} while (1);
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index dd8920b745bc..863dbad87849 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -46,6 +46,7 @@ struct perf_session {
struct decomp {
struct decomp *next;
u64 file_pos;
+ size_t mmap_len;
u64 head;
size_t size;
char data[];
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 656065af4971..accb1bf1cfd8 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -819,7 +819,8 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
"stalled cycles per insn",
ratio);
} else if (have_frontend_stalled) {
- print_metric(config, ctxp, NULL, NULL,
+ out->new_line(config, ctxp);
+ print_metric(config, ctxp, NULL, "%7.2f ",
"stalled cycles per insn", 0);
}
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
index 23bdb9884576..d2202392ffdb 100644
--- a/tools/perf/util/zstd.c
+++ b/tools/perf/util/zstd.c
@@ -99,8 +99,8 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size
while (input.pos < input.size) {
ret = ZSTD_decompressStream(data->dstream, &output, &input);
if (ZSTD_isError(ret)) {
- pr_err("failed to decompress (B): %ld -> %ld : %s\n",
- src_size, output.size, ZSTD_getErrorName(ret));
+ pr_err("failed to decompress (B): %ld -> %ld, dst_size %ld : %s\n",
+ src_size, output.size, dst_size, ZSTD_getErrorName(ret));
break;
}
output.dst = dst + output.pos;
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
index 495066bafbe3..ded7a950dc40 100644
--- a/tools/scripts/Makefile.include
+++ b/tools/scripts/Makefile.include
@@ -32,7 +32,6 @@ EXTRA_WARNINGS += -Wno-system-headers
EXTRA_WARNINGS += -Wold-style-definition
EXTRA_WARNINGS += -Wpacked
EXTRA_WARNINGS += -Wredundant-decls
-EXTRA_WARNINGS += -Wshadow
EXTRA_WARNINGS += -Wstrict-prototypes
EXTRA_WARNINGS += -Wswitch-default
EXTRA_WARNINGS += -Wswitch-enum
@@ -69,8 +68,16 @@ endif
# will do for now and keep the above -Wstrict-aliasing=3 in place
# in newer systems.
# Needed for the __raw_cmpxchg in tools/arch/x86/include/asm/cmpxchg.h
+#
+# See https://lkml.org/lkml/2006/11/28/253 and https://gcc.gnu.org/gcc-4.8/changes.html,
+# that takes into account Linus's comments (search for Wshadow) for the reasoning about
+# -Wshadow not being interesting before gcc 4.8.
+
ifneq ($(filter 3.%,$(MAKE_VERSION)),) # make-3
EXTRA_WARNINGS += -fno-strict-aliasing
+EXTRA_WARNINGS += -Wno-shadow
+else
+EXTRA_WARNINGS += -Wshadow
endif
ifneq ($(findstring $(MAKEFLAGS), w),w)
diff --git a/tools/testing/ktest/config-bisect.pl b/tools/testing/ktest/config-bisect.pl
index 72525426654b..6fd864935319 100755
--- a/tools/testing/ktest/config-bisect.pl
+++ b/tools/testing/ktest/config-bisect.pl
@@ -663,7 +663,7 @@ while ($#ARGV >= 0) {
}
else {
- die "Unknow option $opt\n";
+ die "Unknown option $opt\n";
}
}
@@ -732,7 +732,7 @@ if ($start) {
}
}
run_command "cp $good_start $good" or die "failed to copy to $good\n";
- run_command "cp $bad_start $bad" or die "faield to copy to $bad\n";
+ run_command "cp $bad_start $bad" or die "failed to copy to $bad\n";
} else {
if ( ! -f $good ) {
die "Can not find file $good\n";
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 11c9c62c3362..c085964e1d05 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -57,7 +57,8 @@ TEST_PROGS := test_kmod.sh \
test_lirc_mode2.sh \
test_skb_cgroup_id.sh \
test_flow_dissector.sh \
- test_xdp_vlan.sh \
+ test_xdp_vlan_mode_generic.sh \
+ test_xdp_vlan_mode_native.sh \
test_lwt_ip_encap.sh \
test_tcp_check_syncookie.sh \
test_tc_tunnel.sh \
diff --git a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
index 5aeaa284fc47..a68062820410 100644
--- a/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/progs/sendmsg6_prog.c
@@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
}
/* Rewrite destination. */
- if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) &&
- ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) {
+ if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan.sh b/tools/testing/selftests/bpf/test_xdp_vlan.sh
index 51a3a31d1aac..bb8b0da91686 100755
--- a/tools/testing/selftests/bpf/test_xdp_vlan.sh
+++ b/tools/testing/selftests/bpf/test_xdp_vlan.sh
@@ -1,6 +1,14 @@
#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Author: Jesper Dangaard Brouer <hawk@kernel.org>
-TESTNAME=xdp_vlan
+# Allow wrapper scripts to name test
+if [ -z "$TESTNAME" ]; then
+ TESTNAME=xdp_vlan
+fi
+
+# Default XDP mode
+XDP_MODE=xdpgeneric
usage() {
echo "Testing XDP + TC eBPF VLAN manipulations: $TESTNAME"
@@ -9,9 +17,23 @@ usage() {
echo " -v | --verbose : Verbose"
echo " --flush : Flush before starting (e.g. after --interactive)"
echo " --interactive : Keep netns setup running after test-run"
+ echo " --mode=XXX : Choose XDP mode (xdp | xdpgeneric | xdpdrv)"
echo ""
}
+valid_xdp_mode()
+{
+ local mode=$1
+
+ case "$mode" in
+ xdpgeneric | xdpdrv | xdp)
+ return 0
+ ;;
+ *)
+ return 1
+ esac
+}
+
cleanup()
{
local status=$?
@@ -37,7 +59,7 @@ cleanup()
# Using external program "getopt" to get --long-options
OPTIONS=$(getopt -o hvfi: \
- --long verbose,flush,help,interactive,debug -- "$@")
+ --long verbose,flush,help,interactive,debug,mode: -- "$@")
if (( $? != 0 )); then
usage
echo "selftests: $TESTNAME [FAILED] Error calling getopt, unknown option?"
@@ -60,6 +82,11 @@ while true; do
cleanup
shift
;;
+ --mode )
+ shift
+ XDP_MODE=$1
+ shift
+ ;;
-- )
shift
break
@@ -81,8 +108,14 @@ if [ "$EUID" -ne 0 ]; then
exit 1
fi
-ip link set dev lo xdp off 2>/dev/null > /dev/null
-if [ $? -ne 0 ];then
+valid_xdp_mode $XDP_MODE
+if [ $? -ne 0 ]; then
+ echo "selftests: $TESTNAME [FAILED] unknown XDP mode ($XDP_MODE)"
+ exit 1
+fi
+
+ip link set dev lo xdpgeneric off 2>/dev/null > /dev/null
+if [ $? -ne 0 ]; then
echo "selftests: $TESTNAME [SKIP] need ip xdp support"
exit 0
fi
@@ -155,7 +188,7 @@ ip netns exec ns2 ip link set lo up
# At this point, the hosts cannot reach each-other,
# because ns2 are using VLAN tags on the packets.
-ip netns exec ns2 sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Okay ping fails"'
+ip netns exec ns2 sh -c 'ping -W 1 -c 1 100.64.41.1 || echo "Success: First ping must fail"'
# Now we can use the test_xdp_vlan.c program to pop/push these VLAN tags
@@ -166,7 +199,7 @@ export FILE=test_xdp_vlan.o
# First test: Remove VLAN by setting VLAN ID 0, using "xdp_vlan_change"
export XDP_PROG=xdp_vlan_change
-ip netns exec ns1 ip link set $DEVNS1 xdp object $FILE section $XDP_PROG
+ip netns exec ns1 ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
# In ns1: egress use TC to add back VLAN tag 4011
# (del cmd)
@@ -177,8 +210,8 @@ ip netns exec ns1 tc filter add dev $DEVNS1 egress \
prio 1 handle 1 bpf da obj $FILE sec tc_vlan_push
# Now the namespaces can reach each-other, test with ping:
-ip netns exec ns2 ping -W 2 -c 3 $IPADDR1
-ip netns exec ns1 ping -W 2 -c 3 $IPADDR2
+ip netns exec ns2 ping -i 0.2 -W 2 -c 2 $IPADDR1
+ip netns exec ns1 ping -i 0.2 -W 2 -c 2 $IPADDR2
# Second test: Replace xdp prog, that fully remove vlan header
#
@@ -187,9 +220,9 @@ ip netns exec ns1 ping -W 2 -c 3 $IPADDR2
# ETH_P_8021Q indication, and this cause overwriting of our changes.
#
export XDP_PROG=xdp_vlan_remove_outer2
-ip netns exec ns1 ip link set $DEVNS1 xdp off
-ip netns exec ns1 ip link set $DEVNS1 xdp object $FILE section $XDP_PROG
+ip netns exec ns1 ip link set $DEVNS1 $XDP_MODE off
+ip netns exec ns1 ip link set $DEVNS1 $XDP_MODE object $FILE section $XDP_PROG
# Now the namespaces should still be able reach each-other, test with ping:
-ip netns exec ns2 ping -W 2 -c 3 $IPADDR1
-ip netns exec ns1 ping -W 2 -c 3 $IPADDR2
+ip netns exec ns2 ping -i 0.2 -W 2 -c 2 $IPADDR1
+ip netns exec ns1 ping -i 0.2 -W 2 -c 2 $IPADDR2
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh b/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh
new file mode 100755
index 000000000000..c515326d6d59
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_vlan_mode_generic.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Exit on failure
+set -e
+
+# Wrapper script to test generic-XDP
+export TESTNAME=xdp_vlan_mode_generic
+./test_xdp_vlan.sh --mode=xdpgeneric
diff --git a/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh b/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh
new file mode 100755
index 000000000000..5cf7ce1f16c1
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_xdp_vlan_mode_native.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# Exit on failure
+set -e
+
+# Wrapper script to test native-XDP
+export TESTNAME=xdp_vlan_mode_native
+./test_xdp_vlan.sh --mode=xdpdrv
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
index b0fda2877119..d438193804b2 100644
--- a/tools/testing/selftests/bpf/verifier/ctx_skb.c
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -975,6 +975,17 @@
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
+ "read gso_segs from CGROUP_SKB",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+ offsetof(struct __sk_buff, gso_segs)),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+},
+{
"write gso_segs from CGROUP_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 4c223266299a..bdb69599c4bd 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -191,8 +191,7 @@ int cg_find_unified_root(char *root, size_t len)
strtok(NULL, delim);
strtok(NULL, delim);
- if (strcmp(fs, "cgroup") == 0 &&
- strcmp(type, "cgroup2") == 0) {
+ if (strcmp(type, "cgroup2") == 0) {
strncpy(root, mount, len);
return 0;
}
diff --git a/tools/testing/selftests/kmod/kmod.sh b/tools/testing/selftests/kmod/kmod.sh
index 0a76314b4414..8b944cf042f6 100755
--- a/tools/testing/selftests/kmod/kmod.sh
+++ b/tools/testing/selftests/kmod/kmod.sh
@@ -28,7 +28,7 @@
# override by exporting to your environment prior running this script.
# For instance this script assumes you do not have xfs loaded upon boot.
# If this is false, export DEFAULT_KMOD_FS="ext4" prior to running this
-# script if the filesyste module you don't have loaded upon bootup
+# script if the filesystem module you don't have loaded upon bootup
# is ext4 instead. Refer to allow_user_defaults() for a list of user
# override variables possible.
#
@@ -263,7 +263,7 @@ config_get_test_result()
config_reset()
{
if ! echo -n "1" >"$DIR"/reset; then
- echo "$0: reset shuld have worked" >&2
+ echo "$0: reset should have worked" >&2
exit 1
fi
}
@@ -488,7 +488,7 @@ usage()
echo Example uses:
echo
echo "${TEST_NAME}.sh -- executes all tests"
- echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recomended"
+ echo "${TEST_NAME}.sh -t 0008 -- Executes test ID 0008 number of times is recommended"
echo "${TEST_NAME}.sh -w 0008 -- Watch test ID 0008 run until an error occurs"
echo "${TEST_NAME}.sh -s 0008 -- Run test ID 0008 once"
echo "${TEST_NAME}.sh -c 0008 3 -- Run test ID 0008 three times"
diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h
index ec15c4f6af55..0ac49d91a260 100644
--- a/tools/testing/selftests/kselftest.h
+++ b/tools/testing/selftests/kselftest.h
@@ -10,6 +10,7 @@
#ifndef __KSELFTEST_H
#define __KSELFTEST_H
+#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdarg.h>
@@ -81,58 +82,68 @@ static inline void ksft_print_cnts(void)
static inline void ksft_print_msg(const char *msg, ...)
{
+ int saved_errno = errno;
va_list args;
va_start(args, msg);
printf("# ");
+ errno = saved_errno;
vprintf(msg, args);
va_end(args);
}
static inline void ksft_test_result_pass(const char *msg, ...)
{
+ int saved_errno = errno;
va_list args;
ksft_cnt.ksft_pass++;
va_start(args, msg);
printf("ok %d ", ksft_test_num());
+ errno = saved_errno;
vprintf(msg, args);
va_end(args);
}
static inline void ksft_test_result_fail(const char *msg, ...)
{
+ int saved_errno = errno;
va_list args;
ksft_cnt.ksft_fail++;
va_start(args, msg);
printf("not ok %d ", ksft_test_num());
+ errno = saved_errno;
vprintf(msg, args);
va_end(args);
}
static inline void ksft_test_result_skip(const char *msg, ...)
{
+ int saved_errno = errno;
va_list args;
ksft_cnt.ksft_xskip++;
va_start(args, msg);
printf("not ok %d # SKIP ", ksft_test_num());
+ errno = saved_errno;
vprintf(msg, args);
va_end(args);
}
static inline void ksft_test_result_error(const char *msg, ...)
{
+ int saved_errno = errno;
va_list args;
ksft_cnt.ksft_error++;
va_start(args, msg);
printf("not ok %d # error ", ksft_test_num());
+ errno = saved_errno;
vprintf(msg, args);
va_end(args);
}
@@ -152,10 +163,12 @@ static inline int ksft_exit_fail(void)
static inline int ksft_exit_fail_msg(const char *msg, ...)
{
+ int saved_errno = errno;
va_list args;
va_start(args, msg);
printf("Bail out! ");
+ errno = saved_errno;
vprintf(msg, args);
va_end(args);
@@ -178,10 +191,12 @@ static inline int ksft_exit_xpass(void)
static inline int ksft_exit_skip(const char *msg, ...)
{
if (msg) {
+ int saved_errno = errno;
va_list args;
va_start(args, msg);
printf("not ok %d # SKIP ", 1 + ksft_test_num());
+ errno = saved_errno;
vprintf(msg, args);
va_end(args);
} else {
diff --git a/tools/testing/selftests/livepatch/functions.sh b/tools/testing/selftests/livepatch/functions.sh
index 30195449c63c..79b0affd21fb 100644
--- a/tools/testing/selftests/livepatch/functions.sh
+++ b/tools/testing/selftests/livepatch/functions.sh
@@ -13,6 +13,14 @@ function log() {
echo "$1" > /dev/kmsg
}
+# skip(msg) - testing can't proceed
+# msg - explanation
+function skip() {
+ log "SKIP: $1"
+ echo "SKIP: $1" >&2
+ exit 4
+}
+
# die(msg) - game over, man
# msg - dying words
function die() {
@@ -21,13 +29,27 @@ function die() {
exit 1
}
-# set_dynamic_debug() - setup kernel dynamic debug
-# TODO - push and pop this config?
+function push_dynamic_debug() {
+ DYNAMIC_DEBUG=$(grep '^kernel/livepatch' /sys/kernel/debug/dynamic_debug/control | \
+ awk -F'[: ]' '{print "file " $1 " line " $2 " " $4}')
+}
+
+function pop_dynamic_debug() {
+ if [[ -n "$DYNAMIC_DEBUG" ]]; then
+ echo -n "$DYNAMIC_DEBUG" > /sys/kernel/debug/dynamic_debug/control
+ fi
+}
+
+# set_dynamic_debug() - save the current dynamic debug config and tweak
+# it for the self-tests. Set a script exit trap
+# that restores the original config.
function set_dynamic_debug() {
- cat << EOF > /sys/kernel/debug/dynamic_debug/control
-file kernel/livepatch/* +p
-func klp_try_switch_task -p
-EOF
+ push_dynamic_debug
+ trap pop_dynamic_debug EXIT INT TERM HUP
+ cat <<-EOF > /sys/kernel/debug/dynamic_debug/control
+ file kernel/livepatch/* +p
+ func klp_try_switch_task -p
+ EOF
}
# loop_until(cmd) - loop a command until it is successful or $MAX_RETRIES,
@@ -43,6 +65,12 @@ function loop_until() {
done
}
+function assert_mod() {
+ local mod="$1"
+
+ modprobe --dry-run "$mod" &>/dev/null
+}
+
function is_livepatch_mod() {
local mod="$1"
@@ -75,6 +103,9 @@ function __load_mod() {
function load_mod() {
local mod="$1"; shift
+ assert_mod "$mod" ||
+ skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
+
is_livepatch_mod "$mod" &&
die "use load_lp() to load the livepatch module $mod"
@@ -88,6 +119,9 @@ function load_mod() {
function load_lp_nowait() {
local mod="$1"; shift
+ assert_mod "$mod" ||
+ skip "unable to load module ${mod}, verify CONFIG_TEST_LIVEPATCH=m and run self-tests as root"
+
is_livepatch_mod "$mod" ||
die "module $mod is not a livepatch"
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 4ce0bc1612f5..c7cced739c34 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -17,7 +17,7 @@ tcp_inq
tls
txring_overwrite
ip_defrag
+ipv6_flowlabel
+ipv6_flowlabel_mgr
so_txtime
-flowlabel
-flowlabel_mgr
tcp_fastopen_backup_key
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
index cca2baa03fb8..a8d8e8b3dc81 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
@@ -93,18 +93,10 @@ sw1_create()
ip route add vrf v$ol1 192.0.2.16/28 \
nexthop dev g1a \
nexthop dev g1b
-
- tc qdisc add dev $ul1 clsact
- tc filter add dev $ul1 egress pref 111 prot ipv4 \
- flower dst_ip 192.0.2.66 action pass
- tc filter add dev $ul1 egress pref 222 prot ipv4 \
- flower dst_ip 192.0.2.82 action pass
}
sw1_destroy()
{
- tc qdisc del dev $ul1 clsact
-
ip route del vrf v$ol1 192.0.2.16/28
ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
@@ -139,10 +131,18 @@ sw2_create()
ip route add vrf v$ol2 192.0.2.0/28 \
nexthop dev g2a \
nexthop dev g2b
+
+ tc qdisc add dev $ul2 clsact
+ tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
+ flower vlan_id 111 action pass
+ tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
+ flower vlan_id 222 action pass
}
sw2_destroy()
{
+ tc qdisc del dev $ul2 clsact
+
ip route del vrf v$ol2 192.0.2.0/28
ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
@@ -187,12 +187,16 @@ setup_prepare()
sw1_create
sw2_create
h2_create
+
+ forwarding_enable
}
cleanup()
{
pre_cleanup
+ forwarding_restore
+
h2_destroy
sw2_destroy
sw1_destroy
@@ -211,15 +215,15 @@ multipath4_test()
nexthop dev g1a weight $weight1 \
nexthop dev g1b weight $weight2
- local t0_111=$(tc_rule_stats_get $ul1 111 egress)
- local t0_222=$(tc_rule_stats_get $ul1 222 egress)
+ local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
+ local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
-d 1msec -t udp "sp=1024,dp=0-32768"
- local t1_111=$(tc_rule_stats_get $ul1 111 egress)
- local t1_222=$(tc_rule_stats_get $ul1 222 egress)
+ local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
+ local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
local d111=$((t1_111 - t0_111))
local d222=$((t1_222 - t0_222))
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 090fff9dbc48..4c285b6e1db8 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -25,6 +25,80 @@
#define TLS_PAYLOAD_MAX_LEN 16384
#define SOL_TLS 282
+#ifndef ENOTSUPP
+#define ENOTSUPP 524
+#endif
+
+FIXTURE(tls_basic)
+{
+ int fd, cfd;
+ bool notls;
+};
+
+FIXTURE_SETUP(tls_basic)
+{
+ struct sockaddr_in addr;
+ socklen_t len;
+ int sfd, ret;
+
+ self->notls = false;
+ len = sizeof(addr);
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ self->fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ret = bind(sfd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+ ret = listen(sfd, 10);
+ ASSERT_EQ(ret, 0);
+
+ ret = getsockname(sfd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
+ ret = connect(self->fd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+
+ self->cfd = accept(sfd, &addr, &len);
+ ASSERT_GE(self->cfd, 0);
+
+ close(sfd);
+
+ ret = setsockopt(self->fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ if (ret != 0) {
+ ASSERT_EQ(errno, ENOENT);
+ self->notls = true;
+ printf("Failure setting TCP_ULP, testing without tls\n");
+ return;
+ }
+
+ ret = setsockopt(self->cfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ ASSERT_EQ(ret, 0);
+}
+
+FIXTURE_TEARDOWN(tls_basic)
+{
+ close(self->fd);
+ close(self->cfd);
+}
+
+/* Send some data through with ULP but no keys */
+TEST_F(tls_basic, base_base)
+{
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10];
+
+ ASSERT_EQ(strlen(test_str) + 1, send_len);
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+};
+
FIXTURE(tls)
{
int fd, cfd;
@@ -165,6 +239,16 @@ TEST_F(tls, msg_more)
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
+TEST_F(tls, msg_more_unsent)
+{
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10];
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
+ EXPECT_EQ(recv(self->cfd, buf, send_len, MSG_DONTWAIT), -1);
+}
+
TEST_F(tls, sendmsg_single)
{
struct msghdr msg;
@@ -610,6 +694,42 @@ TEST_F(tls, recv_lowat)
EXPECT_EQ(memcmp(send_mem, recv_mem + 10, 5), 0);
}
+TEST_F(tls, bidir)
+{
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10];
+ int ret;
+
+ if (!self->notls) {
+ struct tls12_crypto_info_aes_gcm_128 tls12;
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_3_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_128;
+
+ ret = setsockopt(self->fd, SOL_TLS, TLS_RX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(self->cfd, SOL_TLS, TLS_TX, &tls12,
+ sizeof(tls12));
+ ASSERT_EQ(ret, 0);
+ }
+
+ ASSERT_EQ(strlen(test_str) + 1, send_len);
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+
+ memset(buf, 0, sizeof(buf));
+
+ EXPECT_EQ(send(self->cfd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(self->fd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+};
+
TEST_F(tls, pollin)
{
char const *test_str = "test_poll";
@@ -837,6 +957,109 @@ TEST_F(tls, control_msg)
EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
}
+TEST_F(tls, shutdown)
+{
+ char const *test_str = "test_read";
+ int send_len = 10;
+ char buf[10];
+
+ ASSERT_EQ(strlen(test_str) + 1, send_len);
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, 0), send_len);
+ EXPECT_NE(recv(self->cfd, buf, send_len, 0), -1);
+ EXPECT_EQ(memcmp(buf, test_str, send_len), 0);
+
+ shutdown(self->fd, SHUT_RDWR);
+ shutdown(self->cfd, SHUT_RDWR);
+}
+
+TEST_F(tls, shutdown_unsent)
+{
+ char const *test_str = "test_read";
+ int send_len = 10;
+
+ EXPECT_EQ(send(self->fd, test_str, send_len, MSG_MORE), send_len);
+
+ shutdown(self->fd, SHUT_RDWR);
+ shutdown(self->cfd, SHUT_RDWR);
+}
+
+TEST_F(tls, shutdown_reuse)
+{
+ struct sockaddr_in addr;
+ int ret;
+
+ shutdown(self->fd, SHUT_RDWR);
+ shutdown(self->cfd, SHUT_RDWR);
+ close(self->cfd);
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ ret = bind(self->fd, &addr, sizeof(addr));
+ EXPECT_EQ(ret, 0);
+ ret = listen(self->fd, 10);
+ EXPECT_EQ(ret, -1);
+ EXPECT_EQ(errno, EINVAL);
+
+ ret = connect(self->fd, &addr, sizeof(addr));
+ EXPECT_EQ(ret, -1);
+ EXPECT_EQ(errno, EISCONN);
+}
+
+TEST(non_established) {
+ struct tls12_crypto_info_aes_gcm_256 tls12;
+ struct sockaddr_in addr;
+ int sfd, ret, fd;
+ socklen_t len;
+
+ len = sizeof(addr);
+
+ memset(&tls12, 0, sizeof(tls12));
+ tls12.info.version = TLS_1_2_VERSION;
+ tls12.info.cipher_type = TLS_CIPHER_AES_GCM_256;
+
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = htonl(INADDR_ANY);
+ addr.sin_port = 0;
+
+ fd = socket(AF_INET, SOCK_STREAM, 0);
+ sfd = socket(AF_INET, SOCK_STREAM, 0);
+
+ ret = bind(sfd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+ ret = listen(sfd, 10);
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ EXPECT_EQ(ret, -1);
+ /* TLS ULP not supported */
+ if (errno == ENOENT)
+ return;
+ EXPECT_EQ(errno, ENOTSUPP);
+
+ ret = setsockopt(sfd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ EXPECT_EQ(ret, -1);
+ EXPECT_EQ(errno, ENOTSUPP);
+
+ ret = getsockname(sfd, &addr, &len);
+ ASSERT_EQ(ret, 0);
+
+ ret = connect(fd, &addr, sizeof(addr));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ ASSERT_EQ(ret, 0);
+
+ ret = setsockopt(fd, IPPROTO_TCP, TCP_ULP, "tls", sizeof("tls"));
+ EXPECT_EQ(ret, -1);
+ EXPECT_EQ(errno, EEXIST);
+
+ close(fd);
+ close(sfd);
+}
+
TEST(keysizes) {
struct tls12_crypto_info_aes_gcm_256 tls12;
struct sockaddr_in addr;
diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c
index 7eaa8a3de262..b632965e60eb 100644
--- a/tools/testing/selftests/pidfd/pidfd_test.c
+++ b/tools/testing/selftests/pidfd/pidfd_test.c
@@ -339,13 +339,9 @@ static int test_pidfd_send_signal_syscall_support(void)
ret = sys_pidfd_send_signal(pidfd, 0, NULL, 0);
if (ret < 0) {
- /*
- * pidfd_send_signal() will currently return ENOSYS when
- * CONFIG_PROC_FS is not set.
- */
if (errno == ENOSYS)
ksft_exit_skip(
- "%s test: pidfd_send_signal() syscall not supported (Ensure that CONFIG_PROC_FS=y is set)\n",
+ "%s test: pidfd_send_signal() syscall not supported\n",
test_name);
ksft_exit_fail_msg("%s test: Failed to send signal\n",
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
index cc7c7d758008..6503b1ce091f 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json
@@ -713,5 +713,99 @@
"teardown": [
"$TC actions flush action vlan"
]
+ },
+ {
+ "id": "294e",
+ "name": "Add batch of 32 vlan push actions with cookie",
+ "category": [
+ "actions",
+ "vlan"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action vlan",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan push protocol 802.1q id 4094 priority 7 pipe index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action vlan",
+ "matchPattern": "^[ \t]+index [0-9]+ ref",
+ "matchCount": "32",
+ "teardown": [
+ "$TC actions flush action vlan"
+ ]
+ },
+ {
+ "id": "56f7",
+ "name": "Delete batch of 32 vlan push actions",
+ "category": [
+ "actions",
+ "vlan"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action vlan",
+ 0,
+ 1,
+ 255
+ ],
+ "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan push protocol 802.1q id 4094 priority 7 pipe index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
+ ],
+ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action vlan",
+ "matchPattern": "^[ \t]+index [0-9]+ ref",
+ "matchCount": "0",
+ "teardown": []
+ },
+ {
+ "id": "759f",
+ "name": "Add batch of 32 vlan pop actions with cookie",
+ "category": [
+ "actions",
+ "vlan"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action vlan",
+ 0,
+ 1,
+ 255
+ ]
+ ],
+ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan pop continue index \\$i cookie aabbccddeeff112233445566778800a1 \\\"; args=\"\\$args\\$cmd\"; done && $TC actions add \\$args\"",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action vlan",
+ "matchPattern": "^[ \t]+index [0-9]+ ref",
+ "matchCount": "32",
+ "teardown": [
+ "$TC actions flush action vlan"
+ ]
+ },
+ {
+ "id": "c84a",
+ "name": "Delete batch of 32 vlan pop actions",
+ "category": [
+ "actions",
+ "vlan"
+ ],
+ "setup": [
+ [
+ "$TC actions flush action vlan",
+ 0,
+ 1,
+ 255
+ ],
+ "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan pop index \\$i \\\"; args=\\\"\\$args\\$cmd\\\"; done && $TC actions add \\$args\""
+ ],
+ "cmdUnderTest": "bash -c \"for i in \\`seq 1 32\\`; do cmd=\\\"action vlan index \\$i \\\"; args=\"\\$args\\$cmd\"; done && $TC actions del \\$args\"",
+ "expExitCode": "0",
+ "verifyCmd": "$TC actions list action vlan",
+ "matchPattern": "^[ \t]+index [0-9]+ ref",
+ "matchCount": "0",
+ "teardown": []
}
]
diff --git a/tools/testing/selftests/x86/test_vsyscall.c b/tools/testing/selftests/x86/test_vsyscall.c
index 4602326b8f5b..a4f4d4cf22c3 100644
--- a/tools/testing/selftests/x86/test_vsyscall.c
+++ b/tools/testing/selftests/x86/test_vsyscall.c
@@ -451,7 +451,7 @@ static int test_vsys_x(void)
printf("[OK]\tExecuting the vsyscall page failed: #PF(0x%lx)\n",
segv_err);
} else {
- printf("[FAILT]\tExecution failed with the wrong error: #PF(0x%lx)\n",
+ printf("[FAIL]\tExecution failed with the wrong error: #PF(0x%lx)\n",
segv_err);
return 1;
}
diff --git a/usr/include/Makefile b/usr/include/Makefile
index aa316d99e035..1fb6abe29b2f 100644
--- a/usr/include/Makefile
+++ b/usr/include/Makefile
@@ -101,10 +101,6 @@ ifeq ($(SRCARCH),riscv)
header-test- += linux/bpf_perf_event.h
endif
-ifeq ($(SRCARCH),s390)
-header-test- += asm/zcrypt.h
-endif
-
ifeq ($(SRCARCH),sparc)
header-test- += asm/stat.h
header-test- += asm/uctx.h
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index f645c0fbf7ec..acc43242a310 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -727,7 +727,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
* Ensure we set mode to IN_GUEST_MODE after we disable
* interrupts and before the final VCPU requests check.
* See the comment in kvm_vcpu_exiting_guest_mode() and
- * Documentation/virtual/kvm/vcpu-requests.rst
+ * Documentation/virt/kvm/vcpu-requests.rst
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 936962abc38d..c45e2d7e942f 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -250,7 +250,7 @@ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
* pending state of interrupt is latched in pending_latch variable.
* Userspace will save and restore pending state and line_level
* separately.
- * Refer to Documentation/virtual/kvm/devices/arm-vgic-v3.txt
+ * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.txt
* for handling of ISPENDR and ICPENDR.
*/
for (i = 0; i < len * 8; i++) {
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index 57205beaa981..3b7525deec80 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -42,7 +42,7 @@
VGIC_AFFINITY_LEVEL(val, 3))
/*
- * As per Documentation/virtual/kvm/devices/arm-vgic-v3.txt,
+ * As per Documentation/virt/kvm/devices/arm-vgic-v3.txt,
* below macros are defined for CPUREG encoding.
*/
#define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK 0x000000000000c000
@@ -63,7 +63,7 @@
KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
/*
- * As per Documentation/virtual/kvm/devices/arm-vgic-its.txt,
+ * As per Documentation/virt/kvm/devices/arm-vgic-its.txt,
* below macros are defined for ITS table entry encoding.
*/
#define KVM_ITS_CTE_VALID_SHIFT 63