diff options
255 files changed, 2384 insertions, 1083 deletions
diff --git a/Documentation/ABI/testing/sysfs-ata b/Documentation/ABI/testing/sysfs-ata index 2f726c914752..3daecac48964 100644 --- a/Documentation/ABI/testing/sysfs-ata +++ b/Documentation/ABI/testing/sysfs-ata @@ -107,13 +107,14 @@ Description: described in ATA8 7.16 and 7.17. Only valid if the device is not a PM. - pio_mode: (RO) Transfer modes supported by the device when - in PIO mode. Mostly used by PATA device. + pio_mode: (RO) PIO transfer mode used by the device. + Mostly used by PATA devices. - xfer_mode: (RO) Current transfer mode + xfer_mode: (RO) Current transfer mode. Mostly used by + PATA devices. - dma_mode: (RO) Transfer modes supported by the device when - in DMA mode. Mostly used by PATA device. + dma_mode: (RO) DMA transfer mode used by the device. + Mostly used by PATA devices. class: (RO) Device class. Can be "ata" for disk, "atapi" for packet device, "pmp" for PM, or diff --git a/Documentation/devicetree/bindings/iommu/xen,grant-dma.yaml b/Documentation/devicetree/bindings/iommu/xen,grant-dma.yaml new file mode 100644 index 000000000000..be1539d234f9 --- /dev/null +++ b/Documentation/devicetree/bindings/iommu/xen,grant-dma.yaml @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iommu/xen,grant-dma.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Xen specific IOMMU for virtualized devices (e.g. virtio) + +maintainers: + - Stefano Stabellini <sstabellini@kernel.org> + +description: + The Xen IOMMU represents the Xen grant table interface. Grant mappings + are to be used with devices connected to the Xen IOMMU using the "iommus" + property, which also specifies the ID of the backend domain. + The binding is required to restrict memory access using Xen grant mappings. + +properties: + compatible: + const: xen,grant-dma + + '#iommu-cells': + const: 1 + description: + The single cell is the domid (domain ID) of the domain where the backend + is running. + +required: + - compatible + - "#iommu-cells" + +additionalProperties: false + +examples: + - | + iommu { + compatible = "xen,grant-dma"; + #iommu-cells = <1>; + }; diff --git a/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml index b672202fff4e..5ecdac9de484 100644 --- a/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml +++ b/Documentation/devicetree/bindings/mmc/brcm,sdhci-brcmstb.yaml @@ -75,7 +75,6 @@ examples: sd-uhs-sdr104; sdhci,auto-cmd12; interrupts = <0x0 0x26 0x4>; - interrupt-names = "sdio0_0"; clocks = <&scmi_clk 245>; clock-names = "sw_sdio"; }; @@ -94,7 +93,6 @@ examples: non-removable; bus-width = <0x8>; interrupts = <0x0 0x27 0x4>; - interrupt-names = "sdio1_0"; clocks = <&scmi_clk 245>; clock-names = "sw_sdio"; }; diff --git a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml index c79639e9027e..aca1a4a8daea 100644 --- a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml +++ b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.yaml @@ -56,6 +56,9 @@ properties: - const: core - const: axi + interrupts: + maxItems: 1 + marvell,xenon-sdhc-id: $ref: /schemas/types.yaml#/definitions/uint32 minimum: 0 diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index a80a59941d2f..3276c3d55142 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -37,30 +37,31 @@ The network filesystem helper library needs a place to store a bit of state for its use on each netfs inode it is helping to manage. To this end, a context structure is defined:: - struct netfs_i_context { + struct netfs_inode { + struct inode inode; const struct netfs_request_ops *ops; - struct fscache_cookie *cache; + struct fscache_cookie *cache; }; -A network filesystem that wants to use netfs lib must place one of these -directly after the VFS ``struct inode`` it allocates, usually as part of its -own struct. This can be done in a way similar to the following:: +A network filesystem that wants to use netfs lib must place one of these in its +inode wrapper struct instead of the VFS ``struct inode``. This can be done in +a way similar to the following:: struct my_inode { - struct { - /* These must be contiguous */ - struct inode vfs_inode; - struct netfs_i_context netfs_ctx; - }; + struct netfs_inode netfs; /* Netfslib context and vfs inode */ ... }; -This allows netfslib to find its state by simple offset from the inode pointer, -thereby allowing the netfslib helper functions to be pointed to directly by the -VFS/VM operation tables. +This allows netfslib to find its state by using ``container_of()`` from the +inode pointer, thereby allowing the netfslib helper functions to be pointed to +directly by the VFS/VM operation tables. The structure contains the following fields: + * ``inode`` + + The VFS inode structure. + * ``ops`` The set of operations provided by the network filesystem to netfslib. @@ -78,14 +79,12 @@ To help deal with the per-inode context, a number helper functions are provided. Firstly, a function to perform basic initialisation on a context and set the operations table pointer:: - void netfs_i_context_init(struct inode *inode, - const struct netfs_request_ops *ops); + void netfs_inode_init(struct inode *inode, + const struct netfs_request_ops *ops); -then two functions to cast between the VFS inode structure and the netfs -context:: +then a function to cast from the VFS inode structure to the netfs context:: - struct netfs_i_context *netfs_i_context(struct inode *inode); - struct inode *netfs_inode(struct netfs_i_context *ctx); + struct netfs_inode *netfs_node(struct inode *inode); and finally, a function to get the cache cookie pointer from the context attached to an inode (or NULL if fscache is disabled):: diff --git a/MAINTAINERS b/MAINTAINERS index a6d3bd9d2a8d..5453ad01514c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3757,6 +3757,13 @@ F: include/linux/bpf_lsm.h F: kernel/bpf/bpf_lsm.c F: security/bpf/ +BPFTOOL +M: Quentin Monnet <quentin@isovalent.com> +L: bpf@vger.kernel.org +S: Maintained +F: kernel/bpf/disasm.* +F: tools/bpf/bpftool/ + BROADCOM B44 10/100 ETHERNET DRIVER M: Michael Chan <michael.chan@broadcom.com> L: netdev@vger.kernel.org @@ -11257,6 +11264,7 @@ M: Damien Le Moal <damien.lemoal@opensource.wdc.com> L: linux-ide@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/libata.git +F: Documentation/ABI/testing/sysfs-ata F: Documentation/devicetree/bindings/ata/ F: drivers/ata/ F: include/linux/ata.h @@ -12696,7 +12704,6 @@ L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com Q: https://patchwork.kernel.org/project/netdevbpf/list/ -F: drivers/net/ethernet/mellanox/mlx5/core/accel/* F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/* F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* F: include/linux/mlx5/mlx5_ifc_fpga.h @@ -15824,6 +15831,14 @@ S: Maintained F: Documentation/devicetree/bindings/iio/chemical/plantower,pms7003.yaml F: drivers/iio/chemical/pms7003.c +PLATFORM FEATURE INFRASTRUCTURE +M: Juergen Gross <jgross@suse.com> +S: Maintained +F: arch/*/include/asm/platform-feature.h +F: include/asm-generic/platform-feature.h +F: include/linux/platform-feature.h +F: kernel/platform-feature.c + PLDMFW LIBRARY M: Jacob Keller <jacob.e.keller@intel.com> S: Maintained @@ -788,6 +788,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong KBUILD_CFLAGS += $(stackp-flags-y) KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror +KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH) ifdef CONFIG_CC_IS_CLANG @@ -805,6 +806,9 @@ endif KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable) KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable) +# These result in bogus false positives +KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer) + ifdef CONFIG_FRAME_POINTER KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls else diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h new file mode 100644 index 000000000000..7ebb7eb0bd93 --- /dev/null +++ b/arch/arm/include/asm/xen/xen-ops.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <xen/arm/xen-ops.h> diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 82ffac621854..059cce018570 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -33,7 +33,7 @@ #include <asm/dma-iommu.h> #include <asm/mach/map.h> #include <asm/system_info.h> -#include <xen/swiotlb-xen.h> +#include <asm/xen/xen-ops.h> #include "dma.h" #include "mm.h" @@ -2287,10 +2287,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, set_dma_ops(dev, dma_ops); -#ifdef CONFIG_XEN - if (xen_initial_domain()) - dev->dma_ops = &xen_swiotlb_dma_ops; -#endif + xen_setup_dma_ops(dev); dev->archdata.dma_ops_setup = true; } diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 07eb69f9e7df..1f9c3ba32833 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -443,6 +443,8 @@ static int __init xen_guest_init(void) if (!xen_domain()) return 0; + xen_set_restricted_virtio_memory_access(); + if (!acpi_disabled) xen_acpi_guest_init(); else diff --git a/arch/arm64/include/asm/xen/xen-ops.h b/arch/arm64/include/asm/xen/xen-ops.h new file mode 100644 index 000000000000..7ebb7eb0bd93 --- /dev/null +++ b/arch/arm64/include/asm/xen/xen-ops.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include <xen/arm/xen-ops.h> diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 6719f9efea09..6099c81b9322 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -9,9 +9,9 @@ #include <linux/dma-map-ops.h> #include <linux/dma-iommu.h> #include <xen/xen.h> -#include <xen/swiotlb-xen.h> #include <asm/cacheflush.h> +#include <asm/xen/xen-ops.h> void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) @@ -52,8 +52,5 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, if (iommu) iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1); -#ifdef CONFIG_XEN - if (xen_swiotlb_detect()) - dev->dma_ops = &xen_swiotlb_dma_ops; -#endif + xen_setup_dma_ops(dev); } diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 8ab4035dea27..42f2e9a8616c 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1478,6 +1478,7 @@ skip_init_ctx: bpf_jit_binary_free(header); prog->bpf_func = NULL; prog->jited = 0; + prog->jited_len = 0; goto out_off; } bpf_jit_binary_lock_ro(header); diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index be68c1f02b79..c2ce2e60c8f0 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -223,7 +223,6 @@ config PPC select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) select HAVE_IOREMAP_PROT - select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA if DEFAULT_UIMAGE @@ -786,7 +785,6 @@ config THREAD_SHIFT range 13 15 default "15" if PPC_256K_PAGES default "14" if PPC64 - default "14" if KASAN default "13" help Used to define the stack size. The default is almost always what you diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 125328d1b980..af58f1ed3952 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -14,10 +14,16 @@ #ifdef __KERNEL__ -#if defined(CONFIG_VMAP_STACK) && CONFIG_THREAD_SHIFT < PAGE_SHIFT +#ifdef CONFIG_KASAN +#define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1) +#else +#define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT +#endif + +#if defined(CONFIG_VMAP_STACK) && MIN_THREAD_SHIFT < PAGE_SHIFT #define THREAD_SHIFT PAGE_SHIFT #else -#define THREAD_SHIFT CONFIG_THREAD_SHIFT +#define THREAD_SHIFT MIN_THREAD_SHIFT #endif #define THREAD_SIZE (1 << THREAD_SHIFT) diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 2e2a2a9bcf43..f91f0f29a566 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -37,6 +37,8 @@ KASAN_SANITIZE_paca.o := n KASAN_SANITIZE_setup_64.o := n KASAN_SANITIZE_mce.o := n KASAN_SANITIZE_mce_power.o := n +KASAN_SANITIZE_udbg.o := n +KASAN_SANITIZE_udbg_16550.o := n # we have to be particularly careful in ppc64 to exclude code that # runs with translations off, as we cannot access the shadow with diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b62046bf3bb8..ee0433809621 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -2158,12 +2158,12 @@ static unsigned long ___get_wchan(struct task_struct *p) return 0; do { - sp = *(unsigned long *)sp; + sp = READ_ONCE_NOCHECK(*(unsigned long *)sp); if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) || task_is_running(p)) return 0; if (count > 0) { - ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; + ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]); if (!in_sched_functions(ip)) return ip; } diff --git a/arch/powerpc/kernel/ptrace/ptrace-fpu.c b/arch/powerpc/kernel/ptrace/ptrace-fpu.c index 5dca19361316..09c49632bfe5 100644 --- a/arch/powerpc/kernel/ptrace/ptrace-fpu.c +++ b/arch/powerpc/kernel/ptrace/ptrace-fpu.c @@ -17,9 +17,13 @@ int ptrace_get_fpr(struct task_struct *child, int index, unsigned long *data) #ifdef CONFIG_PPC_FPU_REGS flush_fp_to_thread(child); - if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long)); - else + if (fpidx < (PT_FPSCR - PT_FPR0)) { + if (IS_ENABLED(CONFIG_PPC32)) + // On 32-bit the index we are passed refers to 32-bit words + *data = ((u32 *)child->thread.fp_state.fpr)[fpidx]; + else + memcpy(data, &child->thread.TS_FPR(fpidx), sizeof(long)); + } else *data = child->thread.fp_state.fpscr; #else *data = 0; @@ -39,9 +43,13 @@ int ptrace_put_fpr(struct task_struct *child, int index, unsigned long data) #ifdef CONFIG_PPC_FPU_REGS flush_fp_to_thread(child); - if (fpidx < (PT_FPSCR - PT_FPR0)) - memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); - else + if (fpidx < (PT_FPSCR - PT_FPR0)) { + if (IS_ENABLED(CONFIG_PPC32)) + // On 32-bit the index we are passed refers to 32-bit words + ((u32 *)child->thread.fp_state.fpr)[fpidx] = data; + else + memcpy(&child->thread.TS_FPR(fpidx), &data, sizeof(long)); + } else child->thread.fp_state.fpscr = data; #endif diff --git a/arch/powerpc/kernel/ptrace/ptrace.c b/arch/powerpc/kernel/ptrace/ptrace.c index 4d2dc22d4a2d..5d7a72b41ae7 100644 --- a/arch/powerpc/kernel/ptrace/ptrace.c +++ b/arch/powerpc/kernel/ptrace/ptrace.c @@ -444,4 +444,7 @@ void __init pt_regs_check(void) * real registers. */ BUILD_BUG_ON(PT_DSCR < sizeof(struct user_pt_regs) / sizeof(unsigned long)); + + // ptrace_get/put_fpr() rely on PPC32 and VSX being incompatible + BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_VSX)); } diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 9bb43aa53d43..a6fce3106e02 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -993,8 +993,8 @@ int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...) * * Return: A pointer to the specified errorlog or NULL if not found. */ -struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, - uint16_t section_id) +noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log, + uint16_t section_id) { struct rtas_ext_event_log_v6 *ext_log = (struct rtas_ext_event_log_v6 *)log->buffer; diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c index d85fa9fc6f3c..80f54723cf6d 100644 --- a/arch/powerpc/kexec/crash.c +++ b/arch/powerpc/kexec/crash.c @@ -224,7 +224,7 @@ void crash_kexec_secondary(struct pt_regs *regs) /* wait for all the CPUs to hit real mode but timeout if they don't come in */ #if defined(CONFIG_SMP) && defined(CONFIG_PPC64) -static void __maybe_unused crash_kexec_wait_realmode(int cpu) +noinstr static void __maybe_unused crash_kexec_wait_realmode(int cpu) { unsigned int msecs; int i; diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index 1f3f9fedf1bc..0d04f9d5da8d 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -19,7 +19,6 @@ #include <asm/cacheflush.h> #include <asm/kdump.h> #include <mm/mmu_decl.h> -#include <generated/compile.h> #include <generated/utsrelease.h> struct regions { @@ -37,10 +36,6 @@ struct regions { int reserved_mem_size_cells; }; -/* Simplified build-specific string for starting entropy. */ -static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" - LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; - struct regions __initdata regions; static __init void kaslr_get_cmdline(void *fdt) @@ -71,7 +66,8 @@ static unsigned long __init get_boot_seed(void *fdt) { unsigned long hash = 0; - hash = rotate_xor(hash, build_str, sizeof(build_str)); + /* build-specific string for starting entropy. */ + hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); hash = rotate_xor(hash, fdt, fdt_totalsize(fdt)); return hash; diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 6488b3842199..19f0fc5c6f1b 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -4,6 +4,7 @@ # in particular, idle code runs a bunch of things in real mode KASAN_SANITIZE_idle.o := n KASAN_SANITIZE_pci-ioda.o := n +KASAN_SANITIZE_pci-ioda-tce.o := n # pnv_machine_check_early KASAN_SANITIZE_setup.o := n diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 181b855b3050..82cae08976bc 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -465,6 +465,9 @@ static int papr_scm_pmu_check_events(struct papr_scm_priv *p, struct nvdimm_pmu u32 available_events; int index, rc = 0; + if (!p->stat_buffer_len) + return -ENOENT; + available_events = (p->stat_buffer_len - sizeof(struct papr_scm_perf_stats)) / sizeof(struct papr_scm_perf_stat); if (available_events == 0) diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index b1a88f6cc349..91c0b80a8bf0 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -125,6 +125,7 @@ config S390 select CLONE_BACKWARDS2 select DMA_OPS if PCI select DYNAMIC_FTRACE if FUNCTION_TRACER + select GCC12_NO_ARRAY_BOUNDS select GENERIC_ALLOCATOR select GENERIC_CPU_AUTOPROBE select GENERIC_CPU_VULNERABILITIES @@ -768,7 +769,6 @@ menu "Virtualization" config PROTECTED_VIRTUALIZATION_GUEST def_bool n prompt "Protected virtualization guest support" - select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS help Select this option, if you want to be able to run this kernel as a protected virtualization KVM guest. diff --git a/arch/s390/Makefile b/arch/s390/Makefile index d73611b35164..495c68a4df1e 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -32,15 +32,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += -fno-stack-protector KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, address-of-packed-member) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) - -ifdef CONFIG_CC_IS_GCC - ifeq ($(call cc-ifversion, -ge, 1200, y), y) - ifeq ($(call cc-ifversion, -lt, 1300, y), y) - KBUILD_CFLAGS += $(call cc-disable-warning, array-bounds) - KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-disable-warning, array-bounds) - endif - endif -endif +KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_CC_NO_ARRAY_BOUNDS),-Wno-array-bounds) UTS_MACHINE := s390x STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384) diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 6fb6bf64326f..6a0ac00d5a42 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -31,6 +31,7 @@ #include <linux/cma.h> #include <linux/gfp.h> #include <linux/dma-direct.h> +#include <linux/platform-feature.h> #include <asm/processor.h> #include <linux/uaccess.h> #include <asm/pgalloc.h> @@ -168,22 +169,14 @@ bool force_dma_unencrypted(struct device *dev) return is_prot_virt_guest(); } -#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS - -int arch_has_restricted_virtio_memory_access(void) -{ - return is_prot_virt_guest(); -} -EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access); - -#endif - /* protected virtualization */ static void pv_init(void) { if (!is_prot_virt_guest()) return; + platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS); + /* make sure bounce buffers are shared */ swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE); swiotlb_update_mem_attributes(); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9783ebc4e021..be0b95e51df6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1542,7 +1542,6 @@ config X86_CPA_STATISTICS config X86_MEM_ENCRYPT select ARCH_HAS_FORCE_DMA_UNENCRYPTED select DYNAMIC_PHYSICAL_MASK - select ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS def_bool n config AMD_MEM_ENCRYPT diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 959d66b9be94..3a240a64ac68 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -653,6 +653,7 @@ struct kvm_vcpu_arch { u64 ia32_misc_enable_msr; u64 smbase; u64 smi_count; + bool at_instruction_boundary; bool tpr_access_reporting; bool xsaves_enabled; bool xfd_no_write_intercept; @@ -1300,6 +1301,8 @@ struct kvm_vcpu_stat { u64 nested_run; u64 directed_yield_attempted; u64 directed_yield_successful; + u64 preemption_reported; + u64 preemption_other; u64 guest_mode; }; diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 35f222aa66bf..913e593a3b45 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -439,7 +439,7 @@ do { \ [ptr] "+m" (*_ptr), \ [old] "+a" (__old) \ : [new] ltype (__new) \ - : "memory", "cc"); \ + : "memory"); \ if (unlikely(__err)) \ goto label; \ if (unlikely(!success)) \ diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index f4653688fa6d..e826ee9138fa 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -5179,7 +5179,7 @@ static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu) roots_to_free |= KVM_MMU_ROOT_CURRENT; for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { - if (is_obsolete_root(kvm, mmu->root.hpa)) + if (is_obsolete_root(kvm, mmu->prev_roots[i].hpa)) roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i); } diff --git a/arch/x86/kvm/mmu/tdp_iter.c b/arch/x86/kvm/mmu/tdp_iter.c index 6d3b3e5a5533..ee4802d7b36c 100644 --- a/arch/x86/kvm/mmu/tdp_iter.c +++ b/arch/x86/kvm/mmu/tdp_iter.c @@ -146,6 +146,15 @@ static bool try_step_up(struct tdp_iter *iter) } /* + * Step the iterator back up a level in the paging structure. Should only be + * used when the iterator is below the root level. + */ +void tdp_iter_step_up(struct tdp_iter *iter) +{ + WARN_ON(!try_step_up(iter)); +} + +/* * Step to the next SPTE in a pre-order traversal of the paging structure. * To get to the next SPTE, the iterator either steps down towards the goal * GFN, if at a present, non-last-level SPTE, or over to a SPTE mapping a diff --git a/arch/x86/kvm/mmu/tdp_iter.h b/arch/x86/kvm/mmu/tdp_iter.h index f0af385c56e0..adfca0cf94d3 100644 --- a/arch/x86/kvm/mmu/tdp_iter.h +++ b/arch/x86/kvm/mmu/tdp_iter.h @@ -114,5 +114,6 @@ void tdp_iter_start(struct tdp_iter *iter, struct kvm_mmu_page *root, int min_level, gfn_t next_last_level_gfn); void tdp_iter_next(struct tdp_iter *iter); void tdp_iter_restart(struct tdp_iter *iter); +void tdp_iter_step_up(struct tdp_iter *iter); #endif /* __KVM_X86_MMU_TDP_ITER_H */ diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 841feaa48be5..7b9265d67131 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1742,12 +1742,12 @@ static void zap_collapsible_spte_range(struct kvm *kvm, gfn_t start = slot->base_gfn; gfn_t end = start + slot->npages; struct tdp_iter iter; + int max_mapping_level; kvm_pfn_t pfn; rcu_read_lock(); tdp_root_for_each_pte(iter, root, start, end) { -retry: if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true)) continue; @@ -1755,15 +1755,41 @@ retry: !is_last_spte(iter.old_spte, iter.level)) continue; + /* + * This is a leaf SPTE. Check if the PFN it maps can + * be mapped at a higher level. + */ pfn = spte_to_pfn(iter.old_spte); - if (kvm_is_reserved_pfn(pfn) || - iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn, - pfn, PG_LEVEL_NUM)) + + if (kvm_is_reserved_pfn(pfn)) continue; + max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, + iter.gfn, pfn, PG_LEVEL_NUM); + + WARN_ON(max_mapping_level < iter.level); + + /* + * If this page is already mapped at the highest + * viable level, there's nothing more to do. + */ + if (max_mapping_level == iter.level) + continue; + + /* + * The page can be remapped at a higher level, so step + * up to zap the parent SPTE. + */ + while (max_mapping_level > iter.level) + tdp_iter_step_up(&iter); + /* Note, a successful atomic zap also does a remote TLB flush. */ - if (tdp_mmu_zap_spte_atomic(kvm, &iter)) - goto retry; + tdp_mmu_zap_spte_atomic(kvm, &iter); + + /* + * If the atomic zap fails, the iter will recurse back into + * the same subtree to retry. + */ } rcu_read_unlock(); diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index bed5e1692cef..3361258640a2 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -982,7 +982,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm) if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) { WARN_ON(!svm->tsc_scaling_enabled); vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio; - svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); + __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); } svm->nested.ctl.nested_cr3 = 0; @@ -1387,7 +1387,7 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu) vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio, svm->tsc_ratio_msr); - svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio); + __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); } /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 200045f71df0..1dc02cdf6960 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -465,11 +465,24 @@ static int has_svm(void) return 1; } +void __svm_write_tsc_multiplier(u64 multiplier) +{ + preempt_disable(); + + if (multiplier == __this_cpu_read(current_tsc_ratio)) + goto out; + + wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); + __this_cpu_write(current_tsc_ratio, multiplier); +out: + preempt_enable(); +} + static void svm_hardware_disable(void) { /* Make sure we clean up behind us */ if (tsc_scaling) - wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT); + __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); cpu_svm_disable(); @@ -515,8 +528,7 @@ static int svm_hardware_enable(void) * Set the default value, even if we don't use TSC scaling * to avoid having stale value in the msr */ - wrmsrl(MSR_AMD64_TSC_RATIO, SVM_TSC_RATIO_DEFAULT); - __this_cpu_write(current_tsc_ratio, SVM_TSC_RATIO_DEFAULT); + __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT); } @@ -999,11 +1011,12 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); } -void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) +static void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier) { - wrmsrl(MSR_AMD64_TSC_RATIO, multiplier); + __svm_write_tsc_multiplier(multiplier); } + /* Evaluate instruction intercepts that depend on guest CPUID features. */ static void svm_recalc_instruction_intercepts(struct kvm_vcpu *vcpu, struct vcpu_svm *svm) @@ -1363,13 +1376,8 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu) sev_es_prepare_switch_to_guest(hostsa); } - if (tsc_scaling) { - u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio; - if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { - __this_cpu_write(current_tsc_ratio, tsc_ratio); - wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio); - } - } + if (tsc_scaling) + __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio); if (likely(tsc_aux_uret_slot >= 0)) kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); @@ -4255,6 +4263,8 @@ out: static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu) { + if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_INTR) + vcpu->arch.at_instruction_boundary = true; } static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu) diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 21c5460e947a..500348c1cb35 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -590,7 +590,7 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, bool has_error_code, u32 error_code); int nested_svm_exit_special(struct vcpu_svm *svm); void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu); -void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier); +void __svm_write_tsc_multiplier(u64 multiplier); void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, struct vmcb_control_area *control); void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index a07e8cd753ec..9bd86ecccdab 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6547,6 +6547,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu) return; handle_interrupt_nmi_irqoff(vcpu, gate_offset(desc)); + vcpu->arch.at_instruction_boundary = true; } static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e9473c7c7390..03fbfbbec460 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -296,6 +296,8 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { STATS_DESC_COUNTER(VCPU, nested_run), STATS_DESC_COUNTER(VCPU, directed_yield_attempted), STATS_DESC_COUNTER(VCPU, directed_yield_successful), + STATS_DESC_COUNTER(VCPU, preemption_reported), + STATS_DESC_COUNTER(VCPU, preemption_other), STATS_DESC_ICOUNTER(VCPU, guest_mode) }; @@ -4625,6 +4627,19 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) struct kvm_memslots *slots; static const u8 preempted = KVM_VCPU_PREEMPTED; + /* + * The vCPU can be marked preempted if and only if the VM-Exit was on + * an instruction boundary and will not trigger guest emulation of any + * kind (see vcpu_run). Vendor specific code controls (conservatively) + * when this is true, for example allowing the vCPU to be marked + * preempted if and only if the VM-Exit was due to a host interrupt. + */ + if (!vcpu->arch.at_instruction_boundary) { + vcpu->stat.preemption_other++; + return; + } + + vcpu->stat.preemption_reported++; if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) return; @@ -4654,19 +4669,21 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { int idx; - if (vcpu->preempted && !vcpu->arch.guest_state_protected) - vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); + if (vcpu->preempted) { + if (!vcpu->arch.guest_state_protected) + vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); - /* - * Take the srcu lock as memslots will be accessed to check the gfn - * cache generation against the memslots generation. - */ - idx = srcu_read_lock(&vcpu->kvm->srcu); - if (kvm_xen_msr_enabled(vcpu->kvm)) - kvm_xen_runstate_set_preempted(vcpu); - else - kvm_steal_time_set_preempted(vcpu); - srcu_read_unlock(&vcpu->kvm->srcu, idx); + /* + * Take the srcu lock as memslots will be accessed to check the gfn + * cache generation against the memslots generation. + */ + idx = srcu_read_lock(&vcpu->kvm->srcu); + if (kvm_xen_msr_enabled(vcpu->kvm)) + kvm_xen_runstate_set_preempted(vcpu); + else + kvm_steal_time_set_preempted(vcpu); + srcu_read_unlock(&vcpu->kvm->srcu, idx); + } static_call(kvm_x86_vcpu_put)(vcpu); vcpu->arch.last_host_tsc = rdtsc(); @@ -10422,6 +10439,13 @@ static int vcpu_run(struct kvm_vcpu *vcpu) vcpu->arch.l1tf_flush_l1d = true; for (;;) { + /* + * If another guest vCPU requests a PV TLB flush in the middle + * of instruction emulation, the rest of the emulation could + * use a stale page translation. Assume that any code after + * this point can start executing an instruction. + */ + vcpu->arch.at_instruction_boundary = false; if (kvm_vcpu_running(vcpu)) { r = vcpu_enter_guest(vcpu); } else { diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h index ee5c4ae0755c..532a535a9e99 100644 --- a/arch/x86/kvm/xen.h +++ b/arch/x86/kvm/xen.h @@ -159,8 +159,10 @@ static inline void kvm_xen_runstate_set_preempted(struct kvm_vcpu *vcpu) * behalf of the vCPU. Only if the VMM does actually block * does it need to enter RUNSTATE_blocked. */ - if (vcpu->preempted) - kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); + if (WARN_ON_ONCE(!vcpu->preempted)) + return; + + kvm_xen_update_runstate_guest(vcpu, RUNSTATE_runnable); } /* 32-bit compatibility definitions, also used natively in 32-bit build */ diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 11350e2fd736..9f27e14e185f 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -12,7 +12,6 @@ #include <linux/swiotlb.h> #include <linux/cc_platform.h> #include <linux/mem_encrypt.h> -#include <linux/virtio_config.h> /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) @@ -87,9 +86,3 @@ void __init mem_encrypt_init(void) print_mem_encrypt_feature_info(); } - -int arch_has_restricted_virtio_memory_access(void) -{ - return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT); -} -EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access); diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index e8f7953fda83..f6d038e2cd8e 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -21,6 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/virtio_config.h> #include <linux/cc_platform.h> +#include <linux/platform-feature.h> #include <asm/tlbflush.h> #include <asm/fixmap.h> @@ -242,6 +243,9 @@ void __init sev_setup_arch(void) size = total_mem * 6 / 100; size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G); swiotlb_adjust_size(size); + + /* Set restricted memory access for virtio. */ + platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS); } static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot) diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index 517a9d8d8f94..8b71b1dd7639 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -195,6 +195,8 @@ static void __init xen_hvm_guest_init(void) if (xen_pv_domain()) return; + xen_set_restricted_virtio_memory_access(); + init_hvm_pv_info(); reserve_shared_info(); diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index f33a4421e7cd..e3297b15701c 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -109,6 +109,8 @@ static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc); static void __init xen_pv_init_platform(void) { + xen_set_restricted_virtio_memory_access(); + populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP)); set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info); diff --git a/certs/extract-cert.c b/certs/extract-cert.c index f7ef7862f207..8c1fb9a70d66 100644 --- a/certs/extract-cert.c +++ b/certs/extract-cert.c @@ -23,6 +23,13 @@ #include <openssl/err.h> #include <openssl/engine.h> +/* + * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. + * + * Remove this if/when that API is no longer used + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + #define PKEY_ID_PKCS7 2 static __attribute__((noreturn)) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 40e816419f48..9601fa92950a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2010,16 +2010,16 @@ retry: return err_mask; } -static bool ata_log_supported(struct ata_device *dev, u8 log) +static int ata_log_supported(struct ata_device *dev, u8 log) { struct ata_port *ap = dev->link->ap; if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR) - return false; + return 0; if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1)) - return false; - return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false; + return 0; + return get_unaligned_le16(&ap->sector_buf[log * 2]); } static bool ata_identify_page_supported(struct ata_device *dev, u8 page) @@ -2455,15 +2455,20 @@ static void ata_dev_config_cpr(struct ata_device *dev) struct ata_cpr_log *cpr_log = NULL; u8 *desc, *buf = NULL; - if (ata_id_major_version(dev->id) < 11 || - !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES)) + if (ata_id_major_version(dev->id) < 11) + goto out; + + buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES); + if (buf_len == 0) goto out; /* * Read the concurrent positioning ranges log (0x47). We can have at - * most 255 32B range descriptors plus a 64B header. + * most 255 32B range descriptors plus a 64B header. This log varies in + * size, so use the size reported in the GPL directory. Reading beyond + * the supported length will result in an error. */ - buf_len = (64 + 255 * 32 + 511) & ~511; + buf_len <<= 9; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) goto out; @@ -5462,7 +5467,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports) { - const struct ata_port_info *pi; + const struct ata_port_info *pi = &ata_dummy_port_info; struct ata_host *host; int i, j; @@ -5470,7 +5475,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, if (!host) return NULL; - for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { + for (i = 0, j = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ppi[j]) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 42cecf95a4e5..86dbb1cdfabd 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2125,7 +2125,7 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf) /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */ rbuf[1] = 0xb9; - put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]); + put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]); for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) { desc[0] = cpr_log->cpr[i].num; diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index ca129854a88c..c38027887499 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -196,7 +196,7 @@ static struct { { XFER_PIO_0, "XFER_PIO_0" }, { XFER_PIO_SLOW, "XFER_PIO_SLOW" } }; -ata_bitfield_name_match(xfer,ata_xfer_names) +ata_bitfield_name_search(xfer, ata_xfer_names) /* * ATA Port attributes diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 6b5ed3046b44..35608a0cf552 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -856,12 +856,14 @@ static int octeon_cf_probe(struct platform_device *pdev) int i; res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0); if (!res_dma) { + put_device(&dma_dev->dev); of_node_put(dma_node); return -EINVAL; } cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start, resource_size(res_dma)); if (!cf_port->dma_base) { + put_device(&dma_dev->dev); of_node_put(dma_node); return -EINVAL; } @@ -871,6 +873,7 @@ static int octeon_cf_probe(struct platform_device *pdev) irq = i; irq_handler = octeon_cf_interrupt; } + put_device(&dma_dev->dev); } of_node_put(dma_node); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 67abf8dcd30a..6b6d46e29e6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1918,9 +1918,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, return -EINVAL; } - /* delete kgd_mem from kfd_bo_list to avoid re-validating - * this BO in BO's restoring after eviction. - */ mutex_lock(&mem->process_info->lock); ret = amdgpu_bo_reserve(bo, true); @@ -1943,7 +1940,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, amdgpu_amdkfd_remove_eviction_fence( bo, mem->process_info->eviction_fence); - list_del_init(&mem->validate_list.head); if (size) *size = amdgpu_bo_size(bo); @@ -2512,12 +2508,15 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) process_info->eviction_fence = new_fence; *ef = dma_fence_get(&new_fence->base); - /* Attach new eviction fence to all BOs */ + /* Attach new eviction fence to all BOs except pinned ones */ list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) + validate_list.head) { + if (mem->bo->tbo.pin_count) + continue; + amdgpu_bo_fence(mem->bo, &process_info->eviction_fence->base, true); - + } /* Attach eviction fence to PD / PT BOs */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index ede2fa56f6c9..16699158e00d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -594,17 +594,20 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; - r = amdgpu_ras_block_late_init(adev, ras_block); - if (r) - return r; if (amdgpu_ras_is_supported(adev, ras_block->block)) { if (!amdgpu_persistent_edc_harvesting_supported(adev)) amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); if (r) goto late_fini; + } else { + amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 798c56214a23..aebc384531ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -518,6 +518,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(9, 1, 0): /* RENOIR looks like RAVEN */ case IP_VERSION(9, 3, 0): + /* GC 10.3.7 */ + case IP_VERSION(10, 3, 7): if (amdgpu_tmz == 0) { adev->gmc.tmz_enabled = false; dev_info(adev->dev, @@ -540,8 +542,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(10, 3, 1): /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): - /* GC 10.3.7 */ - case IP_VERSION(10, 3, 7): /* Don't enable it by default yet. */ if (amdgpu_tmz < 1) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 2de9309a4193..dac202ae864d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -197,6 +197,13 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; + /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ + if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) + dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); + } + s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", "ue", info.ue_count, "ce", info.ce_count); @@ -550,9 +557,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - if (obj->adev->asic_type == CHIP_ALDEBARAN) { + if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) - DRM_WARN("Failed to reset error counter and error status"); + dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); } return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, @@ -1027,9 +1035,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } } - if (!amdgpu_persistent_edc_harvesting_supported(adev)) - amdgpu_ras_reset_error_status(adev, info->head.block); - return 0; } @@ -1149,6 +1154,12 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev, if (res) return res; + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(adev, info.head.block)) + dev_warn(adev->dev, "Failed to reset error counter and error status"); + } + ce += info.ce_count; ue += info.ue_count; } @@ -1792,6 +1803,12 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) continue; amdgpu_ras_query_error_status(adev, &info); + + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(adev, info.head.block)) + dev_warn(adev->dev, "Failed to reset error counter and error status"); + } } } @@ -2278,8 +2295,9 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) !amdgpu_ras_asic_supported(adev)) return; - if (!(amdgpu_sriov_vf(adev) && - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))) + /* If driver run on sriov guest side, only enable ras for aldebaran */ + if (amdgpu_sriov_vf(adev) && + adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) return; if (!adev->gmc.xgmi.connected_to_cpu) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2ceeaa4c793a..dc76d2b3ce52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -679,6 +679,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, { struct amdgpu_vm_update_params params; struct amdgpu_vm_bo_base *entry; + bool flush_tlb_needed = false; int r, idx; if (list_empty(&vm->relocated)) @@ -697,6 +698,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, goto error; list_for_each_entry(entry, &vm->relocated, vm_status) { + /* vm_flush_needed after updating moved PDEs */ + flush_tlb_needed |= entry->moved; + r = amdgpu_vm_pde_update(¶ms, entry); if (r) goto error; @@ -706,8 +710,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, if (r) goto error; - /* vm_flush_needed after updating PDEs */ - atomic64_inc(&vm->tlb_seq); + if (flush_tlb_needed) + atomic64_inc(&vm->tlb_seq); while (!list_empty(&vm->relocated)) { entry = list_first_entry(&vm->relocated, @@ -789,6 +793,11 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, flush_tlb |= adev->gmc.xgmi.num_physical_nodes && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0); + /* + * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB + */ + flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0); + memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 8c0a3fc7aaa6..a4a6751b1e44 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1096,6 +1096,7 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); } static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, @@ -1316,7 +1317,7 @@ static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *ade memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) - *(uint64_t *)fw_autoload_mask |= 1 << id; + *(uint64_t *)fw_autoload_mask |= 1ULL << id; } static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, @@ -1983,7 +1984,7 @@ static int gfx_v11_0_init_csb(struct amdgpu_device *adev) return 0; } -void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); @@ -6028,6 +6029,7 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, break; default: BUG(); + break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index a0c0b7d9f444..7f4b480ae66e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -638,6 +638,12 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); +#ifdef CONFIG_X86_64 + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { + adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); + adev->gmc.aper_size = adev->gmc.real_vram_size; + } +#endif /* In case the PCI BAR is larger than the actual amount of vram */ adev->gmc.visible_vram_size = adev->gmc.aper_size; if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index 5d2dfeff8fe5..d63d3f2b8a16 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -299,7 +299,7 @@ static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_2[] = IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0) }; -void program_imu_rlc_ram(struct amdgpu_device *adev, +static void program_imu_rlc_ram(struct amdgpu_device *adev, const struct imu_rlc_ram_golden *regs, const u32 array_size) { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index d2722adabd1b..f3c1af5130ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -535,6 +535,10 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, { unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid << JPEG_IH_CTRL__IH_VMID__SHIFT)); + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, (vmid | (vmid << 4))); @@ -768,7 +772,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { 8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */ 18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */ 8 + 16, - .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */ + .emit_ib_size = 24, /* jpeg_v2_0_dec_ring_emit_ib */ .emit_ib = jpeg_v2_0_dec_ring_emit_ib, .emit_fence = jpeg_v2_0_dec_ring_emit_fence, .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h index 1a03baa59755..654e43e83e2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h @@ -41,6 +41,7 @@ #define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 #define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f +#define mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET 0x4149 #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index fcf51947bb18..7eee004cf3ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -541,7 +541,7 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable) /* This function is for backdoor MES firmware */ static int mes_v11_0_load_microcode(struct amdgpu_device *adev, - enum admgpu_mes_pipe pipe) + enum admgpu_mes_pipe pipe, bool prime_icache) { int r; uint32_t data; @@ -593,16 +593,18 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev, /* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */ WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF); - /* invalidate ICACHE */ - data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); - WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); - - /* prime the ICACHE. */ - data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); - WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + if (prime_icache) { + /* invalidate ICACHE */ + data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); + WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + + /* prime the ICACHE. */ + data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); + WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + } soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); @@ -1044,17 +1046,19 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) int r = 0; if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { - r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE); + + r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false); if (r) { - DRM_ERROR("failed to load MES kiq fw, r=%d\n", r); + DRM_ERROR("failed to load MES fw, r=%d\n", r); return r; } - r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE); + r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true); if (r) { - DRM_ERROR("failed to load MES fw, r=%d\n", r); + DRM_ERROR("failed to load MES kiq fw, r=%d\n", r); return r; } + } mes_v11_0_enable(adev, true); @@ -1086,7 +1090,7 @@ static int mes_v11_0_hw_init(void *handle) if (!adev->enable_mes_kiq) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { r = mes_v11_0_load_microcode(adev, - AMDGPU_MES_SCHED_PIPE); + AMDGPU_MES_SCHED_PIPE, true); if (r) { DRM_ERROR("failed to MES fw, r=%d\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index d016e3c3e221..b3fba8dea63c 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -170,6 +170,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs yc_video_codecs_decode = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 06b2635b142a..83c6ccaaa9e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -469,6 +469,7 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se } } + /** * sdma_v5_2_gfx_stop - stop the gfx async dma engines * @@ -514,21 +515,17 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev) } /** - * sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines - * context switch for an instance + * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer - * @instance_idx: the index of the SDMA instance + * @enable: enable/disable the DMA MEs context switch. * - * Unhalt the async dma engines context switch. + * Halt or unhalt the async dma engines context switch. */ -static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx) +static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl, phase_quantum = 0; - - if (WARN_ON(instance_idx >= adev->sdma.num_instances)) { - return; - } + int i; if (amdgpu_sdma_phase_quantum) { unsigned value = amdgpu_sdma_phase_quantum; @@ -552,68 +549,50 @@ static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, phase_quantum = value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; - - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM), - phase_quantum); - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM), - phase_quantum); - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM), - phase_quantum); } - if (!amdgpu_sriov_vf(adev)) { - f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl); + for (i = 0; i < adev->sdma.num_instances; i++) { + if (enable && amdgpu_sdma_phase_quantum) { + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), + phase_quantum); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), + phase_quantum); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), + phase_quantum); + } + + if (!amdgpu_sriov_vf(adev)) { + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, enable ? 1 : 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + } } + } /** - * sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch + * sdma_v5_2_enable - stop the async dma engines * * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs. * - * Halt the async dma engines context switch. + * Halt or unhalt the async dma engines. */ -static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev) +static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl; int i; - if (amdgpu_sriov_vf(adev)) - return; - - for (i = 0; i < adev->sdma.num_instances; i++) { - f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + if (!enable) { + sdma_v5_2_gfx_stop(adev); + sdma_v5_2_rlc_stop(adev); } -} - -/** - * sdma_v5_2_halt - stop the async dma engines - * - * @adev: amdgpu_device pointer - * - * Halt the async dma engines. - */ -static void sdma_v5_2_halt(struct amdgpu_device *adev) -{ - int i; - u32 f32_cntl; - - sdma_v5_2_gfx_stop(adev); - sdma_v5_2_rlc_stop(adev); if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); } } @@ -625,9 +604,6 @@ static void sdma_v5_2_halt(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * * Set up the gfx DMA ring buffers and enable them. - * It assumes that the dma engine is stopped for each instance. - * The function enables the engine and preemptions sequentially for each instance. - * * Returns 0 for success, error for failure. */ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) @@ -769,7 +745,10 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) ring->sched.ready = true; - sdma_v5_2_ctx_switch_enable_for_instance(adev, i); + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_2_ctx_switch_enable(adev, true); + sdma_v5_2_enable(adev, true); + } r = amdgpu_ring_test_ring(ring); if (r) { @@ -813,7 +792,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev) int i, j; /* halt the MEs */ - sdma_v5_2_halt(adev); + sdma_v5_2_enable(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { if (!adev->sdma.instance[i].fw) @@ -885,8 +864,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) int r = 0; if (amdgpu_sriov_vf(adev)) { - sdma_v5_2_ctx_switch_disable_all(adev); - sdma_v5_2_halt(adev); + sdma_v5_2_ctx_switch_enable(adev, false); + sdma_v5_2_enable(adev, false); /* set RB registers */ r = sdma_v5_2_gfx_resume(adev); @@ -910,10 +889,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); sdma_v5_2_soft_reset(adev); + /* unhalt the MEs */ + sdma_v5_2_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v5_2_ctx_switch_enable(adev, true); - /* Soft reset supposes to disable the dma engine and preemption. - * Now start the gfx rings and rlc compute queues. - */ + /* start the gfx rings and rlc compute queues */ r = sdma_v5_2_gfx_resume(adev); if (adev->in_s0ix) amdgpu_gfx_off_ctrl(adev, true); @@ -1447,8 +1428,8 @@ static int sdma_v5_2_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) return 0; - sdma_v5_2_ctx_switch_disable_all(adev); - sdma_v5_2_halt(adev); + sdma_v5_2_ctx_switch_enable(adev, false); + sdma_v5_2_enable(adev, false); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3cabceee5f57..39405f0db824 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1761,23 +1761,21 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p, - struct amdgpu_job *job) +static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p) { struct drm_gpu_scheduler **scheds; /* The create msg must be in the first IB submitted */ - if (atomic_read(&job->base.entity->fence_seq)) + if (atomic_read(&p->entity->fence_seq)) return -EINVAL; scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] [AMDGPU_RING_PRIO_DEFAULT].sched; - drm_sched_entity_modify_sched(job->base.entity, scheds, 1); + drm_sched_entity_modify_sched(p->entity, scheds, 1); return 0; } -static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, - uint64_t addr) +static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr) { struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_bo_va_mapping *map; @@ -1848,7 +1846,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) continue; - r = vcn_v3_0_limit_sched(p, job); + r = vcn_v3_0_limit_sched(p); if (r) goto out; } @@ -1862,7 +1860,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib) { - struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); uint32_t msg_lo = 0, msg_hi = 0; unsigned i; int r; @@ -1881,8 +1879,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, msg_hi = val; } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) && val == 0) { - r = vcn_v3_0_dec_msg(p, job, - ((u64)msg_hi) << 32 | msg_lo); + r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 5e9adbc71bbd..cbfb32b3d235 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1516,6 +1516,8 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); break; case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */ + case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */ pcache_info = yellow_carp_cache_info; num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 8667e3df2d0b..bf4200457772 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -73,6 +73,8 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) case IP_VERSION(4, 1, 2):/* RENOIR */ case IP_VERSION(5, 2, 1):/* VANGOGH */ case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ + case IP_VERSION(5, 2, 6):/* GC 10.3.6 */ + case IP_VERSION(5, 2, 7):/* GC 10.3.7 */ case IP_VERSION(6, 0, 1): kfd->device_info.num_sdma_queues_per_engine = 2; break; @@ -127,6 +129,8 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(9, 4, 2): /* ALDEBARAN */ case IP_VERSION(10, 3, 1): /* VANGOGH */ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ + case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ + case IP_VERSION(10, 3, 7): /* GC 10.3.7 */ case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */ case IP_VERSION(10, 1, 4): case IP_VERSION(10, 1, 10): /* NAVI10 */ @@ -178,7 +182,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd, if (gc_version < IP_VERSION(11, 0, 0)) { /* Navi2x+, Navi1x+ */ - if (gc_version >= IP_VERSION(10, 3, 0)) + if (gc_version == IP_VERSION(10, 3, 6)) + kfd->device_info.no_atomic_fw_version = 14; + else if (gc_version >= IP_VERSION(10, 3, 0)) kfd->device_info.no_atomic_fw_version = 92; else if (gc_version >= IP_VERSION(10, 1, 1)) kfd->device_info.no_atomic_fw_version = 145; @@ -368,6 +374,16 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) if (!vf) f2g = &gfx_v10_3_kfd2kgd; break; + case IP_VERSION(10, 3, 6): + gfx_target_version = 100306; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; + break; + case IP_VERSION(10, 3, 7): + gfx_target_version = 100307; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; + break; case IP_VERSION(11, 0, 0): gfx_target_version = 110000; f2g = &gfx_v11_kfd2kgd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 997650d597ec..e44376c2ecdc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -296,7 +296,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, dma_addr_t *scratch) { - uint64_t npages = migrate->cpages; + uint64_t npages = migrate->npages; struct device *dev = adev->dev; struct amdgpu_res_cursor cursor; dma_addr_t *src; @@ -344,7 +344,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, mfence); if (r) goto out_free_vram_pages; - amdgpu_res_next(&cursor, j << PAGE_SHIFT); + amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT); j = 0; } else { amdgpu_res_next(&cursor, PAGE_SIZE); @@ -590,7 +590,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, continue; } src[i] = svm_migrate_addr(adev, spage); - if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) { + if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) { r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, FROM_VRAM_TO_RAM, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 2ebf0132c25b..7b332246eda3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1295,7 +1295,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, last_start, prange->start + i, pte_flags, - last_start - prange->start, + (last_start - prange->start) << PAGE_SHIFT, bo_adev ? bo_adev->vm_manager.vram_base_offset : 0, NULL, dma_addr, &vm->last_update); @@ -2307,6 +2307,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, if (range->event == MMU_NOTIFY_RELEASE) return true; + if (!mmget_not_zero(mni->mm)) + return true; start = mni->interval_tree.start; last = mni->interval_tree.last; @@ -2333,6 +2335,7 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, } svm_range_unlock(prange); + mmput(mni->mm); return true; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index ceb34376decb..bca5f01da763 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -287,8 +287,11 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base) void dcn31_init_clocks(struct clk_mgr *clk_mgr) { + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk clk_mgr->clks.p_state_change_support = true; clk_mgr->clks.prev_p_state_change_support = true; clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; @@ -638,8 +641,14 @@ static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base) } } +int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base) +{ + return clk_mgr_base->clks.ref_dtbclk_khz; +} + static struct clk_mgr_funcs dcn31_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn31_update_clocks, .init_clocks = dcn31_init_clocks, .enable_pme_wa = dcn31_enable_pme_wa, @@ -719,7 +728,7 @@ void dcn31_clk_mgr_construct( } clk_mgr->base.base.dprefclk_khz = 600000; - clk_mgr->base.dccg->ref_dtbclk_khz = 600000; + clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h index 961b10a49486..be06fdbd0c22 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h @@ -51,6 +51,8 @@ void dcn31_clk_mgr_construct(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg); +int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base); + void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); #endif //__DCN31_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index a2ade6e93f5e..fb4ae800e919 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -41,9 +41,7 @@ #include "dc_dmub_srv.h" -#if defined (CONFIG_DRM_AMD_DC_DP2_0) #include "dc_link_dp.h" -#endif #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) @@ -580,6 +578,7 @@ static void dcn315_enable_pme_wa(struct clk_mgr *clk_mgr_base) static struct clk_mgr_funcs dcn315_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn315_update_clocks, .init_clocks = dcn31_init_clocks, .enable_pme_wa = dcn315_enable_pme_wa, @@ -656,9 +655,9 @@ void dcn315_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; + clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; dce_clock_read_ss_info(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); + clk_mgr->base.base.clks.ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); clk_mgr->base.base.bw_params = &dcn315_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index fc3af81ed6c6..e4bb9c6193b5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -571,6 +571,7 @@ static void dcn316_clk_mgr_helper_populate_bw_params( static struct clk_mgr_funcs dcn316_funcs = { .enable_pme_wa = dcn316_enable_pme_wa, .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn316_update_clocks, .init_clocks = dcn31_init_clocks, .are_clock_states_equal = dcn31_are_clock_states_equal, @@ -685,7 +686,7 @@ void dcn316_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; + clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; dce_clock_read_ss_info(&clk_mgr->base); /*clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index dc30ac366a50..cbc47aecd00f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -114,8 +114,8 @@ static const struct dc_link_settings fail_safe_link_settings = { static bool decide_fallback_link_setting( struct dc_link *link, - struct dc_link_settings initial_link_settings, - struct dc_link_settings *current_link_setting, + struct dc_link_settings *max, + struct dc_link_settings *cur, enum link_training_result training_result); static void maximize_lane_settings(const struct link_training_settings *lt_settings, struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); @@ -2784,6 +2784,7 @@ bool perform_link_training_with_retries( enum dp_panel_mode panel_mode = dp_get_panel_mode(link); enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; struct dc_link_settings cur_link_settings = *link_setting; + struct dc_link_settings max_link_settings = *link_setting; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); int fail_count = 0; bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ @@ -2793,7 +2794,6 @@ bool perform_link_training_with_retries( dp_trace_commit_lt_init(link); - if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) /* We need to do this before the link training to ensure the idle * pattern in SST mode will be sent right after the link training @@ -2909,19 +2909,15 @@ bool perform_link_training_with_retries( uint32_t req_bw; uint32_t link_bw; - decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status); - /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to - * minimum link bandwidth. + decide_fallback_link_setting(link, &max_link_settings, + &cur_link_settings, status); + /* Fail link training if reduced link bandwidth no longer meets + * stream requirements. */ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings); - is_link_bw_low = (req_bw > link_bw); - is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && - (cur_link_settings.lane_count <= LANE_COUNT_ONE)); - - if (is_link_bw_low) - DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", - __func__, req_bw, link_bw); + if (req_bw > link_bw) + break; } msleep(delay_between_attempts); @@ -3309,7 +3305,7 @@ static bool dp_verify_link_cap( int *fail_count) { struct dc_link_settings cur_link_settings = {0}; - struct dc_link_settings initial_link_settings = *known_limit_link_setting; + struct dc_link_settings max_link_settings = *known_limit_link_setting; bool success = false; bool skip_video_pattern; enum clock_source_id dp_cs_id = get_clock_source_id(link); @@ -3318,7 +3314,7 @@ static bool dp_verify_link_cap( struct link_resource link_res; memset(&irq_data, 0, sizeof(irq_data)); - cur_link_settings = initial_link_settings; + cur_link_settings = max_link_settings; /* Grant extended timeout request */ if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) { @@ -3361,7 +3357,7 @@ static bool dp_verify_link_cap( dp_trace_lt_result_update(link, status, true); dp_disable_link_phy(link, &link_res, link->connector_signal); } while (!success && decide_fallback_link_setting(link, - initial_link_settings, &cur_link_settings, status)); + &max_link_settings, &cur_link_settings, status)); link->verified_link_cap = success ? cur_link_settings : fail_safe_link_settings; @@ -3596,16 +3592,19 @@ static bool decide_fallback_link_setting_max_bw_policy( */ static bool decide_fallback_link_setting( struct dc_link *link, - struct dc_link_settings initial_link_settings, - struct dc_link_settings *current_link_setting, + struct dc_link_settings *max, + struct dc_link_settings *cur, enum link_training_result training_result) { - if (!current_link_setting) + if (!cur) return false; - if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING || + if (!max) + return false; + + if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING || link->dc->debug.force_dp2_lt_fallback_method) - return decide_fallback_link_setting_max_bw_policy(link, &initial_link_settings, - current_link_setting, training_result); + return decide_fallback_link_setting_max_bw_policy(link, max, cur, + training_result); switch (training_result) { case LINK_TRAINING_CR_FAIL_LANE0: @@ -3613,28 +3612,18 @@ static bool decide_fallback_link_setting( case LINK_TRAINING_CR_FAIL_LANE23: case LINK_TRAINING_LQA_FAIL: { - if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - } else if (!reached_minimum_lane_count - (current_link_setting->lane_count)) { - current_link_setting->link_rate = - initial_link_settings.link_rate; + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + } else if (!reached_minimum_lane_count(cur->lane_count)) { + cur->link_rate = max->link_rate; if (training_result == LINK_TRAINING_CR_FAIL_LANE0) return false; else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) - current_link_setting->lane_count = - LANE_COUNT_ONE; - else if (training_result == - LINK_TRAINING_CR_FAIL_LANE23) - current_link_setting->lane_count = - LANE_COUNT_TWO; + cur->lane_count = LANE_COUNT_ONE; + else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) + cur->lane_count = LANE_COUNT_TWO; else - current_link_setting->lane_count = - reduce_lane_count( - current_link_setting->lane_count); + cur->lane_count = reduce_lane_count(cur->lane_count); } else { return false; } @@ -3642,17 +3631,17 @@ static bool decide_fallback_link_setting( } case LINK_TRAINING_EQ_FAIL_EQ: { - if (!reached_minimum_lane_count - (current_link_setting->lane_count)) { - current_link_setting->lane_count = - reduce_lane_count( - current_link_setting->lane_count); - } else if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - current_link_setting->lane_count = initial_link_settings.lane_count; + if (!reached_minimum_lane_count(cur->lane_count)) { + cur->lane_count = reduce_lane_count(cur->lane_count); + } else if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; } else { return false; } @@ -3660,12 +3649,15 @@ static bool decide_fallback_link_setting( } case LINK_TRAINING_EQ_FAIL_CR: { - if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - current_link_setting->lane_count = initial_link_settings.lane_count; + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; } else { return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3960c74482be..817028d3c4a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.186" +#define DC_VER "3.2.187" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -416,6 +416,7 @@ struct dc_clocks { bool p_state_change_support; enum dcn_zstate_support_state zstate_support; bool dtbclk_en; + int ref_dtbclk_khz; enum dcn_pwr_state pwr_state; /* * Elements below are not compared for the purposes of @@ -719,6 +720,8 @@ struct dc_debug_options { bool apply_vendor_specific_lttpr_wa; bool extended_blank_optimization; union aux_wake_wa_options aux_wake_wa; + /* uses value at boot and disables switch */ + bool disable_dtb_ref_clk_switch; uint8_t psr_power_use_phy_fsm; enum dml_hostvm_override_opts dml_hostvm_override; }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 287a1066b547..bbc58d167c63 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -513,12 +513,10 @@ void dccg31_set_physymclk( /* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */ static void dccg31_set_dtbclk_dto( struct dccg *dccg, - int dtbclk_inst, - int req_dtbclk_khz, - int num_odm_segments, - const struct dc_crtc_timing *timing) + struct dtbclk_dto_params *params) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + int req_dtbclk_khz = params->pixclk_khz; uint32_t dtbdto_div; /* Mode DTBDTO Rate DTBCLK_DTO<x>_DIV Register @@ -529,57 +527,53 @@ static void dccg31_set_dtbclk_dto( * DSC native 4:2:2 pixel rate/2 4 * Other modes pixel rate 8 */ - if (num_odm_segments == 4) { + if (params->num_odm_segments == 4) { dtbdto_div = 2; - req_dtbclk_khz = req_dtbclk_khz / 4; - } else if ((num_odm_segments == 2) || - (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || - (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 - && !timing->dsc_cfg.ycbcr422_simple)) { + req_dtbclk_khz = params->pixclk_khz / 4; + } else if ((params->num_odm_segments == 2) || + (params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || + (params->timing->flags.DSC && params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 + && !params->timing->dsc_cfg.ycbcr422_simple)) { dtbdto_div = 4; - req_dtbclk_khz = req_dtbclk_khz / 2; + req_dtbclk_khz = params->pixclk_khz / 2; } else dtbdto_div = 8; - if (dccg->ref_dtbclk_khz && req_dtbclk_khz) { + if (params->ref_dtbclk_khz && req_dtbclk_khz) { uint32_t modulo, phase; // phase / modulo = dtbclk / dtbclk ref - modulo = dccg->ref_dtbclk_khz * 1000; - phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + dccg->ref_dtbclk_khz - 1), - dccg->ref_dtbclk_khz); + modulo = params->ref_dtbclk_khz * 1000; + phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + params->ref_dtbclk_khz - 1), + params->ref_dtbclk_khz); - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div); + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div); - REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], modulo); - REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], phase); + REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo); + REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase); - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_ENABLE[dtbclk_inst], 1); + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 1); - REG_WAIT(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLKDTO_ENABLE_STATUS[dtbclk_inst], 1, + REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1, 1, 100); /* The recommended programming sequence to enable DTBCLK DTO to generate * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should * be set only after DTO is enabled */ - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - PIPE_DTO_SRC_SEL[dtbclk_inst], 1); - - dccg->dtbclk_khz[dtbclk_inst] = req_dtbclk_khz; + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + PIPE_DTO_SRC_SEL[params->otg_inst], 1); } else { - REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_ENABLE[dtbclk_inst], 0, - PIPE_DTO_SRC_SEL[dtbclk_inst], 0, - DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div); + REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 0, + PIPE_DTO_SRC_SEL[params->otg_inst], 0, + DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div); - REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], 0); - REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], 0); - - dccg->dtbclk_khz[dtbclk_inst] = 0; + REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0); + REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0); } } @@ -606,16 +600,12 @@ void dccg31_set_audio_dtbclk_dto( REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 4); // 04 - DCCG_AUDIO_DTO_SEL_AUDIO_DTO_DTBCLK - - dccg->audio_dtbclk_khz = req_audio_dtbclk_khz; } else { REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, 0); REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, 0); REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 3); // 03 - DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO - - dccg->audio_dtbclk_khz = 0; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index d94fd1010deb..8b12b4111c88 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -230,9 +230,7 @@ static void enc31_hw_init(struct link_encoder *enc) AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3 AUX_RX_DETECTION_THRESHOLD [30:28] = 1 */ - AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); - - AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a); + // dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c index 789f7562cdc7..d2273674e872 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c @@ -1284,10 +1284,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; -#if defined (CONFIG_DRM_AMD_DC_DP2_0) if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) return true; -#endif } return false; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 46ce5a0ee4ec..b5570aa8e39d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -237,6 +237,7 @@ struct clk_mgr_funcs { bool safe_to_lower); int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr); + int (*get_dtb_ref_clk_frequency)(struct clk_mgr *clk_mgr); void (*set_low_power_state)(struct clk_mgr *clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index b2fa4de47734..c7021915bac8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -60,8 +60,17 @@ struct dccg { const struct dccg_funcs *funcs; int pipe_dppclk_khz[MAX_PIPES]; int ref_dppclk; - int dtbclk_khz[MAX_PIPES]; - int audio_dtbclk_khz; + //int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */ + //int audio_dtbclk_khz;/* TODO needs to be removed */ + int ref_dtbclk_khz;/* TODO needs to be removed */ +}; + +struct dtbclk_dto_params { + const struct dc_crtc_timing *timing; + int otg_inst; + int pixclk_khz; + int req_audio_dtbclk_khz; + int num_odm_segments; int ref_dtbclk_khz; }; @@ -111,10 +120,7 @@ struct dccg_funcs { void (*set_dtbclk_dto)( struct dccg *dccg, - int dtbclk_inst, - int req_dtbclk_khz, - int num_odm_segments, - const struct dc_crtc_timing *timing); + struct dtbclk_dto_params *dto_params); void (*set_audio_dtbclk_dto)( struct dccg *dccg, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c index 87972dc8443d..ea6cf8bfce30 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c @@ -27,6 +27,7 @@ #include "core_types.h" #include "dccg.h" #include "dc_link_dp.h" +#include "clk_mgr.h" static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) { @@ -106,14 +107,18 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; struct dccg *dccg = dc->res_pool->dccg; struct timing_generator *tg = pipe_ctx->stream_res.tg; - int odm_segment_count = get_odm_segment_count(pipe_ctx); + struct dtbclk_dto_params dto_params = {0}; enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link); + dto_params.otg_inst = tg->inst; + dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk; + dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); + dto_params.timing = &pipe_ctx->stream->timing; + dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); + dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst); dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk); - dccg->funcs->set_dtbclk_dto(dccg, tg->inst, pipe_ctx->stream->phy_pix_clk, - odm_segment_count, - &pipe_ctx->stream->timing); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); stream_enc->funcs->enable_stream(stream_enc); stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst); } @@ -124,9 +129,13 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct dccg *dccg = dc->res_pool->dccg; struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct dtbclk_dto_params dto_params = {0}; + + dto_params.otg_inst = tg->inst; + dto_params.timing = &pipe_ctx->stream->timing; stream_enc->funcs->disable(stream_enc); - dccg->funcs->set_dtbclk_dto(dccg, tg->inst, 0, 0, &pipe_ctx->stream->timing); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 7c9330a61ac1..c7bd7e216710 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -84,7 +84,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; const uint32_t timeout = 100; - uint32_t in_reset, scratch, i; + uint32_t in_reset, scratch, i, pwait_mode; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); @@ -115,6 +115,13 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) udelay(1); } + for (i = 0; i < timeout; ++i) { + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); + if (pwait_mode & (1 << 0)) + break; + + udelay(1); + } /* Force reset in case we timed out, DMCUB is likely hung. */ } @@ -125,6 +132,8 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); + REG_WRITE(DMCUB_OUTBOX0_RPTR, 0); + REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); /* Clear the GPINT command manually so we don't send anything during boot. */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index 59ddc81b5a0e..f6db6f89d45d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -151,7 +151,8 @@ struct dmub_srv; DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \ DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \ DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \ - DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) + DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \ + DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS) struct dmub_srv_dcn31_reg_offset { #define DMUB_SR(reg) uint32_t reg; diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 73b9e0a87e54..20a3d4e23f66 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -127,6 +127,8 @@ struct av_sync_data { static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0}; static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0}; +static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u"; + /*MST Dock*/ static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h index 247c6e9632ba..1cb399dbc7cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h @@ -22,6 +22,7 @@ #ifndef SMU_11_0_7_PPTABLE_H #define SMU_11_0_7_PPTABLE_H +#pragma pack(push, 1) #define SMU_11_0_7_TABLE_FORMAT_REVISION 15 @@ -139,7 +140,7 @@ struct smu_11_0_7_overdrive_table uint32_t max[SMU_11_0_7_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_11_0_7_MAX_ODSETTING]; //default minimum settings int16_t pm_setting[SMU_11_0_7_MAX_PMSETTING]; //Optimized power mode feature settings -} __attribute__((packed)); +}; enum SMU_11_0_7_PPCLOCK_ID { SMU_11_0_7_PPCLOCK_GFXCLK = 0, @@ -166,7 +167,7 @@ struct smu_11_0_7_power_saving_clock_table uint32_t count; //power_saving_clock_count = SMU_11_0_7_PPCLOCK_COUNT uint32_t max[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_11_0_7_powerplay_table { @@ -191,6 +192,8 @@ struct smu_11_0_7_powerplay_table struct smu_11_0_7_overdrive_table overdrive_table; PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h index 7a63cf8e85ed..0116e3d04fad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h @@ -22,6 +22,7 @@ #ifndef SMU_11_0_PPTABLE_H #define SMU_11_0_PPTABLE_H +#pragma pack(push, 1) #define SMU_11_0_TABLE_FORMAT_REVISION 12 @@ -109,7 +110,7 @@ struct smu_11_0_overdrive_table uint8_t cap[SMU_11_0_MAX_ODFEATURE]; //OD feature support flags uint32_t max[SMU_11_0_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_11_0_MAX_ODSETTING]; //default minimum settings -} __attribute__((packed)); +}; enum SMU_11_0_PPCLOCK_ID { SMU_11_0_PPCLOCK_GFXCLK = 0, @@ -133,7 +134,7 @@ struct smu_11_0_power_saving_clock_table uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT uint32_t max[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_11_0_powerplay_table { @@ -162,6 +163,8 @@ struct smu_11_0_powerplay_table #ifndef SMU_11_0_PARTIAL_PPTABLE PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h #endif -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h index 3f29f4327378..478862ded0bd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h @@ -22,6 +22,8 @@ #ifndef SMU_13_0_7_PPTABLE_H #define SMU_13_0_7_PPTABLE_H +#pragma pack(push, 1) + #define SMU_13_0_7_TABLE_FORMAT_REVISION 15 //// POWERPLAYTABLE::ulPlatformCaps @@ -194,7 +196,8 @@ struct smu_13_0_7_powerplay_table struct smu_13_0_7_overdrive_table overdrive_table; uint8_t padding1; PPTable_t smc_pptable; //PPTable_t in driver_if.h -} __attribute__((packed)); +}; +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h index 1f311396b706..043307485528 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h @@ -22,6 +22,8 @@ #ifndef SMU_13_0_PPTABLE_H #define SMU_13_0_PPTABLE_H +#pragma pack(push, 1) + #define SMU_13_0_TABLE_FORMAT_REVISION 1 //// POWERPLAYTABLE::ulPlatformCaps @@ -109,7 +111,7 @@ struct smu_13_0_overdrive_table { uint8_t cap[SMU_13_0_MAX_ODFEATURE]; //OD feature support flags uint32_t max[SMU_13_0_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_13_0_MAX_ODSETTING]; //default minimum settings -} __attribute__((packed)); +}; enum SMU_13_0_PPCLOCK_ID { SMU_13_0_PPCLOCK_GFXCLK = 0, @@ -132,7 +134,7 @@ struct smu_13_0_power_saving_clock_table { uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT uint32_t max[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_13_0_powerplay_table { struct atom_common_table_header header; @@ -160,6 +162,8 @@ struct smu_13_0_powerplay_table { #ifndef SMU_13_0_PARTIAL_PPTABLE PPTable_t smc_pptable; //PPTable_t in driver_if.h #endif -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 4551bc8a3ecf..f573d582407e 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -160,13 +160,12 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower) } if (bDPExecute) - ast->tx_chip_type = AST_TX_ASTDP; + ast->tx_chip_types |= BIT(AST_TX_ASTDP); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK, ASTDP_HOST_EDID_READ_DONE); - } else - ast->tx_chip_type = AST_TX_NONE; + } } diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index 204c926a18ea..4f75a9efb610 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -450,7 +450,7 @@ void ast_init_3rdtx(struct drm_device *dev) ast_init_dvo(dev); break; default: - if (ast->tx_chip_type == AST_TX_SIL164) + if (ast->tx_chip_types & BIT(AST_TX_SIL164)) ast_init_dvo(dev); else ast_init_analog(dev); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index afebe35f205e..a34db4380f68 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -73,6 +73,11 @@ enum ast_tx_chip { AST_TX_ASTDP, }; +#define AST_TX_NONE_BIT BIT(AST_TX_NONE) +#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164) +#define AST_TX_DP501_BIT BIT(AST_TX_DP501) +#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) + #define AST_DRAM_512Mx16 0 #define AST_DRAM_1Gx16 1 #define AST_DRAM_512Mx32 2 @@ -173,7 +178,7 @@ struct ast_private { struct drm_plane primary_plane; struct ast_cursor_plane cursor_plane; struct drm_crtc crtc; - union { + struct { struct { struct drm_encoder encoder; struct ast_vga_connector vga_connector; @@ -199,7 +204,7 @@ struct ast_private { ast_use_defaults } config_mode; - enum ast_tx_chip tx_chip_type; + unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ u8 *dp501_fw_addr; const struct firmware *dp501_fw; /* dp501 fw */ }; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index d770d5a23c1a..067453266897 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -216,7 +216,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } /* Check 3rd Tx option (digital output afaik) */ - ast->tx_chip_type = AST_TX_NONE; + ast->tx_chip_types |= AST_TX_NONE_BIT; /* * VGACRA3 Enhanced Color Mode Register, check if DVO is already @@ -229,7 +229,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) if (!*need_post) { jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff); if (jreg & 0x80) - ast->tx_chip_type = AST_TX_SIL164; + ast->tx_chip_types = AST_TX_SIL164_BIT; } if ((ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST2500)) { @@ -241,7 +241,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); switch (jreg) { case 0x04: - ast->tx_chip_type = AST_TX_SIL164; + ast->tx_chip_types = AST_TX_SIL164_BIT; break; case 0x08: ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL); @@ -254,22 +254,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } fallthrough; case 0x0c: - ast->tx_chip_type = AST_TX_DP501; + ast->tx_chip_types = AST_TX_DP501_BIT; } } else if (ast->chip == AST2600) ast_dp_launch(&ast->base, 0); /* Print stuff for diagnostic purposes */ - switch(ast->tx_chip_type) { - case AST_TX_SIL164: + if (ast->tx_chip_types & AST_TX_NONE_BIT) + drm_info(dev, "Using analog VGA\n"); + if (ast->tx_chip_types & AST_TX_SIL164_BIT) drm_info(dev, "Using Sil164 TMDS transmitter\n"); - break; - case AST_TX_DP501: + if (ast->tx_chip_types & AST_TX_DP501_BIT) drm_info(dev, "Using DP501 DisplayPort transmitter\n"); - break; - default: - drm_info(dev, "Analog VGA only\n"); - } + return 0; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 323af2746aa9..db2010a55674 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -997,10 +997,10 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_ON: ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0); - if (ast->tx_chip_type == AST_TX_DP501) + if (ast->tx_chip_types & AST_TX_DP501_BIT) ast_set_dp501_video_output(crtc->dev, 1); - if (ast->tx_chip_type == AST_TX_ASTDP) { + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ast_dp_power_on_off(crtc->dev, AST_DP_POWER_ON); ast_wait_for_vretrace(ast); ast_dp_set_on_off(crtc->dev, 1); @@ -1012,17 +1012,17 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: ch = mode; - if (ast->tx_chip_type == AST_TX_DP501) + if (ast->tx_chip_types & AST_TX_DP501_BIT) ast_set_dp501_video_output(crtc->dev, 0); - break; - if (ast->tx_chip_type == AST_TX_ASTDP) { + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ast_dp_set_on_off(crtc->dev, 0); ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF); } ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch); + break; } } @@ -1155,7 +1155,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, ast_crtc_load_lut(ast, crtc); //Set Aspeed Display-Port - if (ast->tx_chip_type == AST_TX_ASTDP) + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) ast_dp_set_mode(crtc, vbios_mode_info); mutex_unlock(&ast->ioregs_lock); @@ -1739,22 +1739,26 @@ int ast_mode_config_init(struct ast_private *ast) ast_crtc_init(dev); - switch (ast->tx_chip_type) { - case AST_TX_NONE: + if (ast->tx_chip_types & AST_TX_NONE_BIT) { ret = ast_vga_output_init(ast); - break; - case AST_TX_SIL164: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_SIL164_BIT) { ret = ast_sil164_output_init(ast); - break; - case AST_TX_DP501: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_DP501_BIT) { ret = ast_dp501_output_init(ast); - break; - case AST_TX_ASTDP: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ret = ast_astdp_output_init(ast); - break; + if (ret) + return ret; } - if (ret) - return ret; drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 0aa9cf0fb5c3..82fd3c8adee1 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -391,7 +391,7 @@ void ast_post_gpu(struct drm_device *dev) ast_init_3rdtx(dev); } else { - if (ast->tx_chip_type != AST_TX_NONE) + if (ast->tx_chip_types & AST_TX_SIL164_BIT) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ } } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index b97f6e8f0f6b..01c8b80e34ec 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1267,6 +1267,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, } static +struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp, + struct drm_atomic_state *state) +{ + struct drm_encoder *encoder = dp->encoder; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + + connector = drm_atomic_get_old_connector_for_encoder(state, encoder); + if (!connector) + return NULL; + + conn_state = drm_atomic_get_old_connector_state(state, connector); + if (!conn_state) + return NULL; + + return conn_state->crtc; +} + +static struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp, struct drm_atomic_state *state) { @@ -1446,14 +1465,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, { struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; - struct drm_crtc *crtc; + struct drm_crtc *old_crtc, *new_crtc; + struct drm_crtc_state *old_crtc_state = NULL; struct drm_crtc_state *new_crtc_state = NULL; + int ret; - crtc = analogix_dp_get_new_crtc(dp, old_state); - if (!crtc) + new_crtc = analogix_dp_get_new_crtc(dp, old_state); + if (!new_crtc) goto out; - new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); + new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc); if (!new_crtc_state) goto out; @@ -1462,6 +1483,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, return; out: + old_crtc = analogix_dp_get_old_crtc(dp, old_state); + if (old_crtc) { + old_crtc_state = drm_atomic_get_old_crtc_state(old_state, + old_crtc); + + /* When moving from PSR to fully disabled, exit PSR first. */ + if (old_crtc_state && old_crtc_state->self_refresh_active) { + ret = analogix_dp_disable_psr(dp); + if (ret) + DRM_ERROR("Failed to disable psr (%d)\n", ret); + } + } + analogix_dp_bridge_disable(bridge); } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 2831f0813c3a..ac66f408b40c 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -577,7 +577,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model) ctx->host_node = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); - if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) { + if (ctx->dsi_lanes <= 0 || ctx->dsi_lanes > 4) { ret = -EINVAL; goto err_put_node; } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 9603193d2fa1..987e4b212e9f 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1011,9 +1011,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state, return drm_atomic_crtc_effectively_active(old_state); /* - * We need to run through the crtc_funcs->disable() function if the CRTC - * is currently on, if it's transitioning to self refresh mode, or if - * it's in self refresh mode and needs to be fully disabled. + * We need to disable bridge(s) and CRTC if we're transitioning out of + * self-refresh and changing CRTCs at the same time, because the + * bridge tracks self-refresh status via CRTC state. + */ + if (old_state->self_refresh_active && + old_state->crtc != new_state->crtc) + return true; + + /* + * We also need to run through the crtc_funcs->disable() function if + * the CRTC is currently on, if it's transitioning to self refresh + * mode, or if it's in self refresh mode and needs to be fully + * disabled. */ return old_state->active || (old_state->self_refresh_active && !new_state->active) || diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 9c8829f945b2..f7863d6dea80 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { if (plane == &ipu_crtc->plane[0]->base) disable_full = true; - if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) + if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) disable_partial = true; } diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 7fcbc2a5b6cd..087e69b98d06 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -233,6 +233,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file) { struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_file_priv *file_priv = file->driver_priv; struct drm_panfrost_submit *args = data; struct drm_syncobj *sync_out = NULL; struct panfrost_job *job; @@ -262,12 +263,12 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, job->jc = args->jc; job->requirements = args->requirements; job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); - job->file_priv = file->driver_priv; + job->mmu = file_priv->mmu; slot = panfrost_job_get_slot(job); ret = drm_sched_job_init(&job->base, - &job->file_priv->sched_entity[slot], + &file_priv->sched_entity[slot], NULL); if (ret) goto out_put_job; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index fda5871aebe3..7c4208476fbd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -201,7 +201,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) return; } - cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); + cfg = panfrost_mmu_as_get(pfdev, job->mmu); job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head)); job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head)); @@ -435,7 +435,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev, job->jc = 0; } - panfrost_mmu_as_put(pfdev, job->file_priv->mmu); + panfrost_mmu_as_put(pfdev, job->mmu); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); if (signal_fence) @@ -456,7 +456,7 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev, * happen when we receive the DONE interrupt while doing a GPU reset). */ job->jc = 0; - panfrost_mmu_as_put(pfdev, job->file_priv->mmu); + panfrost_mmu_as_put(pfdev, job->mmu); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); dma_fence_signal_locked(job->done_fence); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 77e6d0e6f612..8becc1ba0eb9 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -17,7 +17,7 @@ struct panfrost_job { struct kref refcount; struct panfrost_device *pfdev; - struct panfrost_file_priv *file_priv; + struct panfrost_mmu *mmu; /* Fence to be signaled by IRQ handler when the job is complete. */ struct dma_fence *done_fence; diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 505a032e2786..9dcf3f51f2dd 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -402,6 +402,7 @@ config JOYSTICK_N64 config JOYSTICK_SENSEHAT tristate "Raspberry Pi Sense HAT joystick" depends on INPUT && I2C + depends on HAS_IOMEM select MFD_SIMPLE_MFD_I2C help Say Y here if you want to enable the driver for the diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index cbb1599a520e..480476121c01 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -85,13 +85,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = { }, { /* - * Lenovo Yoga Tab2 1051L, something messes with the home-button + * Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button * IRQ settings, leading to a non working home-button. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "60073"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1051"), }, }, {} /* Terminating entry */ diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 59a14505b9cd..ca150618d32f 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface, if (!dev->tp_data) goto err_free_bt_buffer; - if (dev->bt_urb) + if (dev->bt_urb) { usb_fill_int_urb(dev->bt_urb, udev, usb_rcvintpipe(udev, cfg->bt_ep), dev->bt_data, dev->cfg.bt_datalen, bcm5974_irq_button, dev, 1); + dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + } + usb_fill_int_urb(dev->tp_urb, udev, usb_rcvintpipe(udev, cfg->tp_ep), dev->tp_data, dev->cfg.tp_datalen, bcm5974_irq_trackpad, dev, 1); + dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + /* create bcm5974 device */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 1259ca22d625..f4a1281658db 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1499,8 +1499,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq) err = mmc_cqe_recovery(host); if (err) mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); - else - mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); + mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); } diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 1499a64ec3aa..f13c08db3da5 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -982,6 +982,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip) struct sdhci_host *host = slot->host; u16 clock; + if (host->mmc->ios.power_mode != MMC_POWER_ON) + return 0; + clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL); clock |= SDHCI_CLOCK_PLL_EN; diff --git a/drivers/net/amt.c b/drivers/net/amt.c index ebee5f07a208..be2719a3ba70 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -51,6 +51,7 @@ static char *status_str[] = { }; static char *type_str[] = { + "", /* Type 0 is not defined */ "AMT_MSG_DISCOVERY", "AMT_MSG_ADVERTISEMENT", "AMT_MSG_REQUEST", @@ -2220,8 +2221,7 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb) struct amt_header_advertisement *amta; int hdr_size; - hdr_size = sizeof(*amta) - sizeof(struct amt_header); - + hdr_size = sizeof(*amta) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; @@ -2251,19 +2251,27 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) struct ethhdr *eth; struct iphdr *iph; + hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); + if (!pskb_may_pull(skb, hdr_size)) + return true; + amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1); if (amtmd->reserved || amtmd->version) return true; - hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false)) return true; + skb_reset_network_header(skb); skb_push(skb, sizeof(*eth)); skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); eth = eth_hdr(skb); + + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; iph = ip_hdr(skb); + if (iph->version == 4) { if (!ipv4_is_multicast(iph->daddr)) return true; @@ -2274,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) } else if (iph->version == 6) { struct ipv6hdr *ip6h; + if (!pskb_may_pull(skb, sizeof(*ip6h))) + return true; + ip6h = ipv6_hdr(skb); if (!ipv6_addr_is_multicast(&ip6h->daddr)) return true; @@ -2306,8 +2317,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt, struct iphdr *iph; int hdr_size, len; - hdr_size = sizeof(*amtmq) - sizeof(struct amt_header); - + hdr_size = sizeof(*amtmq) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; @@ -2315,22 +2325,27 @@ static bool amt_membership_query_handler(struct amt_dev *amt, if (amtmq->reserved || amtmq->version) return true; - hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth); + hdr_size -= sizeof(*eth); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false)) return true; + oeth = eth_hdr(skb); skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); skb_reset_network_header(skb); eth = eth_hdr(skb); + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; + iph = ip_hdr(skb); if (iph->version == 4) { - if (!ipv4_is_multicast(iph->daddr)) - return true; if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3))) return true; + if (!ipv4_is_multicast(iph->daddr)) + return true; + ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS); @@ -2345,15 +2360,17 @@ static bool amt_membership_query_handler(struct amt_dev *amt, ip_eth_mc_map(iph->daddr, eth->h_dest); #if IS_ENABLED(CONFIG_IPV6) } else if (iph->version == 6) { - struct ipv6hdr *ip6h = ipv6_hdr(skb); struct mld2_query *mld2q; + struct ipv6hdr *ip6h; - if (!ipv6_addr_is_multicast(&ip6h->daddr)) - return true; if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS + sizeof(*mld2q))) return true; + ip6h = ipv6_hdr(skb); + if (!ipv6_addr_is_multicast(&ip6h->daddr)) + return true; + mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); @@ -2389,23 +2406,23 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb) { struct amt_header_membership_update *amtmu; struct amt_tunnel_list *tunnel; - struct udphdr *udph; struct ethhdr *eth; struct iphdr *iph; - int len; + int len, hdr_size; iph = ip_hdr(skb); - udph = udp_hdr(skb); - if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol, - false, false)) + hdr_size = sizeof(*amtmu) + sizeof(struct udphdr); + if (!pskb_may_pull(skb, hdr_size)) return true; - amtmu = (struct amt_header_membership_update *)skb->data; + amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1); if (amtmu->reserved || amtmu->version) return true; - skb_pull(skb, sizeof(*amtmu)); + if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false)) + return true; + skb_reset_network_header(skb); list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) { @@ -2426,6 +2443,9 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb) return true; report: + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; + iph = ip_hdr(skb); if (iph->version == 4) { if (ip_mc_check_igmp(skb)) { @@ -2679,7 +2699,8 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) amt = rcu_dereference_sk_user_data(sk); if (!amt) { err = true; - goto drop; + kfree_skb(skb); + goto out; } skb->dev = amt->dev; diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 8af4def38a98..e531b93f3cb2 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -2070,8 +2070,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv, for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) { err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i], gphy_fw_np, i); - if (err) + if (err) { + of_node_put(gphy_fw_np); goto remove_gphy; + } i++; } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 7b37d45bc9fb..d94150d8f3f4 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -50,22 +50,25 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, } static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, - u16 ctrl, u16 status, u16 lpa, + u16 bmsr, u16 lpa, u16 status, struct phylink_link_state *state) { + state->link = false; + + /* If the BMSR reports that the link had failed, report this to + * phylink. + */ + if (!(bmsr & BMSR_LSTATUS)) + return 0; + state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK); + state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) { /* The Spped and Duplex Resolved register is 1 if AN is enabled * and complete, or if AN is disabled. So with disabled AN we - * still get here on link up. But we want to set an_complete - * only if AN was enabled, thus we look at BMCR_ANENABLE. - * (According to 802.3-2008 section 22.2.4.2.10, we should be - * able to get this same value from BMSR_ANEGCAPABLE, but tests - * show that these Marvell PHYs don't conform to this part of - * the specificaion - BMSR_ANEGCAPABLE is simply always 1.) + * still get here on link up. */ - state->an_complete = !!(ctrl & BMCR_ANENABLE); state->duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ? DUPLEX_FULL : DUPLEX_HALF; @@ -191,12 +194,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status, ctrl; + u16 bmsr, lpa, status; int err; - err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl); + err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr); if (err) { - dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err); return err; } @@ -212,7 +215,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state); } int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, @@ -918,13 +921,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status, ctrl; + u16 bmsr, lpa, status; int err; err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, &ctrl); + MV88E6390_SGMII_BMSR, &bmsr); if (err) { - dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err); return err; } @@ -942,7 +945,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state); } static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 3bb42a9f236d..769f672e9128 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -955,35 +955,21 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port, return 0; } -static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port, - phy_interface_t interface) -{ - int ext_int; - - ext_int = rtl8365mb_extint_port_map[port]; - - if (ext_int < 0 && - (interface == PHY_INTERFACE_MODE_NA || - interface == PHY_INTERFACE_MODE_INTERNAL || - interface == PHY_INTERFACE_MODE_GMII)) - /* Internal PHY */ - return true; - else if ((ext_int >= 1) && - phy_interface_mode_is_rgmii(interface)) - /* Extension MAC */ - return true; - - return false; -} - static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { - if (dsa_is_user_port(ds, port)) + if (dsa_is_user_port(ds, port)) { __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); - else if (dsa_is_cpu_port(ds, port)) + + /* GMII is the default interface mode for phylib, so + * we have to support it for ports with integrated PHY. + */ + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); + } else if (dsa_is_cpu_port(ds, port)) { phy_interface_set_rgmii(config->supported_interfaces); + } config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; @@ -996,12 +982,6 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port, struct realtek_priv *priv = ds->priv; int ret; - if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) { - dev_err(priv->dev, "phy mode %s is unsupported on port %d\n", - phy_modes(state->interface), port); - return; - } - if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) { dev_err(priv->dev, "port %d supports only conventional PHY or fixed-link\n", diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index a3816264c35c..8c5828582c21 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio = mdiobus_alloc(); if (mdio == NULL) { netdev_err(dev, "Error allocating MDIO bus\n"); - return -ENOMEM; + ret = -ENOMEM; + goto put_node; } mdio->name = ALTERA_TSE_RESOURCE_NAME; @@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio->id); goto out_free_mdio; } + of_node_put(mdio_node); if (netif_msg_drv(priv)) netdev_info(dev, "MDIO bus %s: created\n", mdio->id); @@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) out_free_mdio: mdiobus_free(mdio); mdio = NULL; +put_node: + of_node_put(mdio_node); return ret; } diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index c6f003975621..d5f2c6989221 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -820,7 +820,7 @@ static int au1000_rx(struct net_device *dev) pr_cont("\n"); } } - prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); + prxd->buff_stat = lower_32_bits(pDB->dma_addr) | RX_DMA_ENABLE; aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); wmb(); /* drain writebuffer */ @@ -996,7 +996,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) ps->tx_packets++; ps->tx_bytes += ptxd->len; - ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; + ptxd->buff_stat = lower_32_bits(pDB->dma_addr) | TX_DMA_ENABLE; wmb(); /* drain writebuffer */ dev_kfree_skb(skb); aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); @@ -1131,9 +1131,9 @@ static int au1000_probe(struct platform_device *pdev) /* Allocate the data buffers * Snooping works fine with eth on all au1xxx */ - aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE * - (NUM_TX_BUFFS + NUM_RX_BUFFS), - &aup->dma_addr, 0); + aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + &aup->dma_addr, 0); if (!aup->vaddr) { dev_err(&pdev->dev, "failed to allocate data buffers\n"); err = -ENOMEM; @@ -1234,8 +1234,8 @@ static int au1000_probe(struct platform_device *pdev) for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { pDB->pnext = pDBfree; pDBfree = pDB; - pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); - pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); + pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i; + pDB->dma_addr = aup->dma_addr + MAX_BUF_SIZE * i; pDB++; } aup->pDBfree = pDBfree; @@ -1246,7 +1246,7 @@ static int au1000_probe(struct platform_device *pdev) if (!pDB) goto err_out; - aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->rx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr); aup->rx_db_inuse[i] = pDB; } @@ -1255,7 +1255,7 @@ static int au1000_probe(struct platform_device *pdev) if (!pDB) goto err_out; - aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->tx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr); aup->tx_dma_ring[i]->len = 0; aup->tx_db_inuse[i] = pDB; } @@ -1310,7 +1310,7 @@ err_remap2: iounmap(aup->mac); err_remap1: dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); + aup->vaddr, aup->dma_addr); err_vaddr: free_netdev(dev); err_alloc: @@ -1343,7 +1343,7 @@ static int au1000_remove(struct platform_device *pdev) au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); + aup->vaddr, aup->dma_addr); iounmap(aup->macdma); iounmap(aup->mac); diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h index e3a3ed29db61..2489c2f4fd8a 100644 --- a/drivers/net/ethernet/amd/au1000_eth.h +++ b/drivers/net/ethernet/amd/au1000_eth.h @@ -106,8 +106,8 @@ struct au1000_private { struct mac_reg *mac; /* mac registers */ u32 *enable; /* address of MAC Enable Register */ void __iomem *macdma; /* base of MAC DMA port */ - u32 vaddr; /* virtual address of rx/tx buffers */ - dma_addr_t dma_addr; /* dma address of rx/tx buffers */ + void *vaddr; /* virtual address of rx/tx buffers */ + dma_addr_t dma_addr; /* dma address of rx/tx buffers */ spinlock_t lock; /* Serialise access to device */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index a3593290886f..4d46780fad13 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2784,7 +2784,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); - netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); + netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto)); for (i = 0; i < skb->len; i += 32) { unsigned int len = min(skb->len - i, 32U); diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 086739e4f40a..9b83d5361699 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) np = of_get_child_by_name(core->dev.of_node, "mdio"); err = of_mdiobus_register(mii_bus, np); + of_node_put(np); if (err) { dev_err(&core->dev, "Registration of mii bus failed\n"); goto err_free_bus; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7f11c0a8e7a9..d4e63f0644c3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1184,9 +1184,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + disable = IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; - enable = 0; + enable = IXGBE_VMOLR_BAM; break; case IXGBEVF_XCAST_MODE_MULTI: disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; @@ -1208,9 +1208,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, return -EPERM; } - disable = 0; + disable = IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; break; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b3b3c079a0fa..59c9a10f83ba 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -899,6 +899,17 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, return true; } +static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask) +{ + unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH); + unsigned long data; + + data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN, + get_order(size)); + + return (void *)data; +} + /* the qdma core needs scratch memory to be setup */ static int mtk_init_fq_dma(struct mtk_eth *eth) { @@ -1467,7 +1478,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; /* alloc new buffer */ - new_data = napi_alloc_frag(ring->frag_size); + if (ring->frag_size <= PAGE_SIZE) + new_data = napi_alloc_frag(ring->frag_size); + else + new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC); if (unlikely(!new_data)) { netdev->stats.rx_dropped++; goto release_desc; @@ -1914,7 +1928,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; for (i = 0; i < rx_dma_size; i++) { - ring->data[i] = netdev_alloc_frag(ring->frag_size); + if (ring->frag_size <= PAGE_SIZE) + ring->data[i] = netdev_alloc_frag(ring->frag_size); + else + ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL); if (!ring->data[i]) return -ENOMEM; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ed5038d98ef6..6400a827173c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -2110,7 +2110,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev, en_err(priv, "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", i, offset, ee->len - i, ret); - return 0; + return ret; } i += ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 0eb9d74547f8..50422b56a64d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -579,17 +579,6 @@ static void *pci_get_other_drvdata(struct device *this, struct device *other) return pci_get_drvdata(to_pci_dev(other)); } -static int next_phys_dev(struct device *dev, const void *data) -{ - struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; - - mdev = pci_get_other_drvdata(this->device, dev); - if (!mdev) - return 0; - - return _next_phys_dev(mdev, data); -} - static int next_phys_dev_lag(struct device *dev, const void *data) { struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; @@ -624,13 +613,6 @@ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev, } /* Must be called with intf_mutex held */ -struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) -{ - lockdep_assert_held(&mlx5_intf_mutex); - return mlx5_get_next_dev(dev, &next_phys_dev); -} - -/* Must be called with intf_mutex held */ struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev) { lockdep_assert_held(&mlx5_intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index eae9aa9c0811..978a2bb8e122 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) if (!tracer->owner) return; + if (unlikely(!tracer->str_db.loaded)) + goto arm; + block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; @@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) &tmp_trace_block[TRACES_PER_BLOCK - 1]); } +arm: mlx5_fw_tracer_arm(dev); } @@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void queue_work(tracer->work_queue, &tracer->ownership_change_work); break; case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: - if (likely(tracer->str_db.loaded)) - queue_work(tracer->work_queue, &tracer->handle_traces_work); + queue_work(tracer->work_queue, &tracer->handle_traces_work); break; default: mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 68364484a435..3c1edfa33aa7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -565,7 +565,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; - bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write); + bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && + MLX5_CAP_GEN(mdev, relaxed_ordering_write); return ro && lro_en ? MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 43a536cb81db..c0f409c195bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -38,11 +38,12 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) { + bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); - MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read); - MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write); + MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read); + MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write); } static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index eb90e79388f1..f797fd97d305 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -950,6 +950,13 @@ err_event_reg: return err; } +static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) +{ + mlx5e_rep_tc_netdevice_event_unregister(rpriv); + mlx5e_rep_bond_cleanup(rpriv); + mlx5e_rep_tc_cleanup(rpriv); +} + static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -961,42 +968,36 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) return err; } - err = mlx5e_tc_ht_init(&rpriv->tc_ht); - if (err) - goto err_ht_init; - if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { err = mlx5e_init_uplink_rep_tx(rpriv); if (err) goto err_init_tx; } + err = mlx5e_tc_ht_init(&rpriv->tc_ht); + if (err) + goto err_ht_init; + return 0; -err_init_tx: - mlx5e_tc_ht_cleanup(&rpriv->tc_ht); err_ht_init: + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) + mlx5e_cleanup_uplink_rep_tx(rpriv); +err_init_tx: mlx5e_destroy_tises(priv); return err; } -static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) -{ - mlx5e_rep_tc_netdevice_event_unregister(rpriv); - mlx5e_rep_bond_cleanup(rpriv); - mlx5e_rep_tc_cleanup(rpriv); -} - static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - mlx5e_destroy_tises(priv); + mlx5e_tc_ht_cleanup(&rpriv->tc_ht); if (rpriv->rep->vport == MLX5_VPORT_UPLINK) mlx5e_cleanup_uplink_rep_tx(rpriv); - mlx5e_tc_ht_cleanup(&rpriv->tc_ht); + mlx5e_destroy_tises(priv); } static void mlx5e_rep_enable(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 217cac29057f..2ce3728576d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2690,9 +2690,6 @@ static int mlx5_esw_offloads_devcom_event(int event, switch (event) { case ESW_OFFLOADS_DEVCOM_PAIR: - if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev) - break; - if (mlx5_eswitch_vport_match_metadata_enabled(esw) != mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) break; @@ -2744,6 +2741,9 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) return; + if (!mlx5_is_lag_supported(esw->dev)) + return; + mlx5_devcom_register_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_esw_offloads_devcom_event, @@ -2761,6 +2761,9 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) return; + if (!mlx5_is_lag_supported(esw->dev)) + return; + mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, ESW_OFFLOADS_DEVCOM_UNPAIR, esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index fdcf7f529330..21e5c709b2d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1574,9 +1574,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, return NULL; } -static bool check_conflicting_actions(u32 action1, u32 action2) +static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0, + const struct mlx5_fs_vlan *vlan1) { - u32 xored_actions = action1 ^ action2; + return vlan0->ethtype != vlan1->ethtype || + vlan0->vid != vlan1->vid || + vlan0->prio != vlan1->prio; +} + +static bool check_conflicting_actions(const struct mlx5_flow_act *act1, + const struct mlx5_flow_act *act2) +{ + u32 action1 = act1->action; + u32 action2 = act2->action; + u32 xored_actions; + + xored_actions = action1 ^ action2; /* if one rule only wants to count, it's ok */ if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT || @@ -1593,6 +1606,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2) MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) return true; + if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT && + act1->pkt_reformat != act2->pkt_reformat) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + act1->modify_hdr != act2->modify_hdr) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH && + check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0])) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 && + check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1])) + return true; + return false; } @@ -1600,7 +1629,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_context *flow_context, const struct mlx5_flow_act *flow_act) { - if (check_conflicting_actions(flow_act->action, fte->action.action)) { + if (check_conflicting_actions(flow_act, &fte->action)) { mlx5_core_warn(get_dev(&fte->node), "Found two FTEs with conflicting actions\n"); return -EEXIST; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 552b6e26e701..2a8fc547eb37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -783,7 +783,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; - struct lag_tracker tracker; + struct lag_tracker tracker = { }; bool do_bond, roce_lag; int err; int i; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 72f70fad4641..c81b173156d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -74,6 +74,16 @@ struct mlx5_lag { struct lag_mpesw lag_mpesw; }; +static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev) +{ + if (!MLX5_CAP_GEN(dev, vport_group_manager) || + !MLX5_CAP_GEN(dev, lag_master) || + MLX5_CAP_GEN(dev, num_lag_ports) < 2 || + MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) + return false; + return true; +} + static inline struct mlx5_lag * mlx5_lag_dev(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 484cb1e4fc7f..9cc7afea2758 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -209,7 +209,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev); void mlx5_detach_device(struct mlx5_core_dev *dev); int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); -struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev); void mlx5_dev_list_lock(void); void mlx5_dev_list_unlock(void); diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 443a5d6eb57b..7c31a46195b2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -507,6 +507,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) key_size += sizeof(struct nfp_flower_ipv6); } + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) { + map[FLOW_PAY_QINQ] = key_size; + key_size += sizeof(struct nfp_flower_vlan); + } + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) { map[FLOW_PAY_GRE] = key_size; if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) @@ -515,11 +520,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) key_size += sizeof(struct nfp_flower_ipv4_gre_tun); } - if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) { - map[FLOW_PAY_QINQ] = key_size; - key_size += sizeof(struct nfp_flower_vlan); - } - if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) || (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) { map[FLOW_PAY_UDP_TUN] = key_size; @@ -758,6 +758,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) } } + if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) { + offset = key_map[FLOW_PAY_QINQ]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, + (struct nfp_flower_vlan *)msk, + rules[i]); + } + } + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) { offset = key_map[FLOW_PAY_GRE]; key = kdata + offset; @@ -798,17 +809,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) } } - if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) { - offset = key_map[FLOW_PAY_QINQ]; - key = kdata + offset; - msk = mdata + offset; - for (i = 0; i < _CT_TYPE_MAX; i++) { - nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, - (struct nfp_flower_vlan *)msk, - rules[i]); - } - } - if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN || key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { offset = key_map[FLOW_PAY_UDP_TUN]; diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 193a167a6762..e01430139b6d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -625,6 +625,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, msk += sizeof(struct nfp_flower_ipv6); } + if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) { + nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext, + (struct nfp_flower_vlan *)msk, + rule); + ext += sizeof(struct nfp_flower_vlan); + msk += sizeof(struct nfp_flower_vlan); + } + if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { struct nfp_flower_ipv6_gre_tun *gre_match; @@ -660,14 +668,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, } } - if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) { - nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext, - (struct nfp_flower_vlan *)msk, - rule); - ext += sizeof(struct nfp_flower_vlan); - msk += sizeof(struct nfp_flower_vlan); - } - if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 54af30961351..6eeeb0fda91f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -15,7 +15,7 @@ #include "nfp_net_sriov.h" static int -nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) +nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn) { u16 cap_vf; @@ -24,12 +24,14 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP); if ((cap_vf & cap) != cap) { - nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); + if (warn) + nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); return -EOPNOTSUPP; } if (vf < 0 || vf >= app->pf->num_vfs) { - nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); + if (warn) + nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); return -EINVAL; } @@ -65,7 +67,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) unsigned int vf_offset; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true); if (err) return err; @@ -101,7 +103,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, u32 vlan_tag; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan", true); if (err) return err; @@ -115,7 +117,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, } /* Check if fw supports or not */ - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", true); if (err) is_proto_sup = false; @@ -149,7 +151,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf, u32 vf_offset, ratevalue; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", true); if (err) return err; @@ -181,7 +183,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF, - "spoofchk"); + "spoofchk", true); if (err) return err; @@ -205,7 +207,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable) int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST, - "trust"); + "trust", true); if (err) return err; @@ -230,7 +232,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE, - "link_state"); + "link_state", true); if (err) return err; @@ -265,7 +267,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, u8 flags; int err; - err = nfp_net_sriov_check(app, vf, 0, ""); + err = nfp_net_sriov_check(app, vf, 0, "", true); if (err) return err; @@ -285,13 +287,13 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag); ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag); - if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto")) + if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", false)) ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag)); ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags); ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags); ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags); - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", false); if (!err) { rate = readl(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_RATE); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index f9f80933e0c9..38fe77d1035e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -1072,13 +1072,11 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) { - goto err_dvr_probe; + goto err_alloc_irq; } return 0; -err_dvr_probe: - pci_free_irq_vectors(pdev); err_alloc_irq: clk_disable_unprepare(plat->stmmac_clk); clk_unregister_fixed_rate(plat->stmmac_clk); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 8561f2d4443b..13dafe7a29bd 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -137,6 +137,7 @@ #define DP83867_DOWNSHIFT_2_COUNT 2 #define DP83867_DOWNSHIFT_4_COUNT 4 #define DP83867_DOWNSHIFT_8_COUNT 8 +#define DP83867_SGMII_AUTONEG_EN BIT(7) /* CFG3 bits */ #define DP83867_CFG3_INT_OE BIT(7) @@ -855,6 +856,32 @@ static int dp83867_phy_reset(struct phy_device *phydev) DP83867_PHYCR_FORCE_LINK_GOOD, 0); } +static void dp83867_link_change_notify(struct phy_device *phydev) +{ + /* There is a limitation in DP83867 PHY device where SGMII AN is + * only triggered once after the device is booted up. Even after the + * PHY TPI is down and up again, SGMII AN is not triggered and + * hence no new in-band message from PHY to MAC side SGMII. + * This could cause an issue during power up, when PHY is up prior + * to MAC. At this condition, once MAC side SGMII is up, MAC side + * SGMII wouldn`t receive new in-band message from TI PHY with + * correct link status, speed and duplex info. + * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg + * whenever there is a link change. + */ + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + int val = 0; + + val = phy_clear_bits(phydev, DP83867_CFG2, + DP83867_SGMII_AUTONEG_EN); + if (val < 0) + return; + + phy_set_bits(phydev, DP83867_CFG2, + DP83867_SGMII_AUTONEG_EN); + } +} + static struct phy_driver dp83867_driver[] = { { .phy_id = DP83867_PHY_ID, @@ -879,6 +906,8 @@ static struct phy_driver dp83867_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, + + .link_change_notify = dp83867_link_change_notify, }, }; module_phy_driver(dp83867_driver); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 58d602985877..8a2dbe849866 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -1046,7 +1046,6 @@ int __init mdio_bus_init(void) return ret; } -EXPORT_SYMBOL_GPL(mdio_bus_init); #if IS_ENABLED(CONFIG_PHYLIB) void mdio_bus_exit(void) diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c index a99aedff795d..ea7309453096 100644 --- a/drivers/nfc/nfcmrvl/usb.c +++ b/drivers/nfc/nfcmrvl/usb.c @@ -388,13 +388,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data) int err; while ((urb = usb_get_from_anchor(&drv_data->deferred))) { + usb_anchor_urb(urb, &drv_data->tx_anchor); + err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) + if (err) { + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + usb_free_urb(urb); break; + } drv_data->tx_in_flight++; + usb_free_urb(urb); + } + + /* Cleanup the rest deferred urbs. */ + while ((urb = usb_get_from_anchor(&drv_data->deferred))) { + kfree(urb->setup_packet); + usb_free_urb(urb); } - usb_scuttle_anchored_urbs(&drv_data->deferred); } static int nfcmrvl_resume(struct usb_interface *intf) diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index 7e213f8ddc98..df8d27cf2956 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -300,6 +300,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, int r = 0; struct device *dev = &hdev->ndev->dev; struct nfc_evt_transaction *transaction; + u32 aid_len; + u8 params_len; pr_debug("connectivity gate event: %x\n", event); @@ -308,43 +310,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, r = nfc_se_connectivity(hdev->ndev, host); break; case ST21NFCA_EVT_TRANSACTION: - /* - * According to specification etsi 102 622 + /* According to specification etsi 102 622 * 11.2.2.4 EVT_TRANSACTION Table 52 * Description Tag Length * AID 81 5 to 16 * PARAMETERS 82 0 to 255 + * + * The key differences are aid storage length is variably sized + * in the packet, but fixed in nfc_evt_transaction, and that the aid_len + * is u8 in the packet, but u32 in the structure, and the tags in + * the packet are not included in nfc_evt_transaction. + * + * size in bytes: 1 1 5-16 1 1 0-255 + * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4 + * member name: aid_tag(M) aid_len aid params_tag(M) params_len params + * example: 0x81 5-16 X 0x82 0-255 X */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && - skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) + if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; - transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); - if (!transaction) - return -ENOMEM; - - transaction->aid_len = skb->data[1]; + aid_len = skb->data[1]; - /* Checking if the length of the AID is valid */ - if (transaction->aid_len > sizeof(transaction->aid)) - return -EINVAL; + if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid)) + return -EPROTO; - memcpy(transaction->aid, &skb->data[2], - transaction->aid_len); + params_len = skb->data[aid_len + 3]; - /* Check next byte is PARAMETERS tag (82) */ - if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + /* Verify PARAMETERS tag is (82), and final check that there is enough + * space in the packet to read everything. + */ + if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) || + (skb->len < aid_len + 4 + params_len)) return -EPROTO; - transaction->params_len = skb->data[transaction->aid_len + 3]; + transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL); + if (!transaction) + return -ENOMEM; - /* Total size is allocated (skb->len - 2) minus fixed array members */ - if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction))) - return -EINVAL; + transaction->aid_len = aid_len; + transaction->params_len = params_len; - memcpy(transaction->params, skb->data + - transaction->aid_len + 4, transaction->params_len); + memcpy(transaction->aid, &skb->data[2], aid_len); + memcpy(transaction->params, &skb->data[aid_len + 4], params_len); r = nfc_se_transaction(hdev->ndev, host, transaction); break; diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig index d421e1482395..6b51ad01f791 100644 --- a/drivers/platform/mips/Kconfig +++ b/drivers/platform/mips/Kconfig @@ -17,7 +17,7 @@ menuconfig MIPS_PLATFORM_DEVICES if MIPS_PLATFORM_DEVICES config CPU_HWMON - tristate "Loongson-3 CPU HWMon Driver" + bool "Loongson-3 CPU HWMon Driver" depends on MACH_LOONGSON64 select HWMON default y diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index b5adf6abd241..a6dc8b5846fe 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -6,12 +6,6 @@ config VIRTIO bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG or CONFIG_S390_GUEST. -config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS - bool - help - This option is selected if the architecture may need to enforce - VIRTIO_F_ACCESS_PLATFORM - config VIRTIO_PCI_LIB tristate help diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index ef04a96942bf..6bace84ae37e 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -5,6 +5,7 @@ #include <linux/module.h> #include <linux/idr.h> #include <linux/of.h> +#include <linux/platform-feature.h> #include <uapi/linux/virtio_ids.h> /* Unique numbering for virtio devices. */ @@ -170,12 +171,10 @@ EXPORT_SYMBOL_GPL(virtio_add_status); static int virtio_features_ok(struct virtio_device *dev) { unsigned int status; - int ret; might_sleep(); - ret = arch_has_restricted_virtio_memory_access(); - if (ret) { + if (platform_has(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS)) { if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) { dev_warn(&dev->dev, "device must provide VIRTIO_F_VERSION_1\n"); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 120d32f164ac..bfd5f4f706bc 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -335,4 +335,24 @@ config XEN_UNPOPULATED_ALLOC having to balloon out RAM regions in order to obtain physical memory space to create such mappings. +config XEN_GRANT_DMA_IOMMU + bool + select IOMMU_API + +config XEN_GRANT_DMA_OPS + bool + select DMA_OPS + +config XEN_VIRTIO + bool "Xen virtio support" + depends on VIRTIO + select XEN_GRANT_DMA_OPS + select XEN_GRANT_DMA_IOMMU if OF + help + Enable virtio support for running as Xen guest. Depending on the + guest type this will require special support on the backend side + (qemu or kernel, depending on the virtio device types used). + + If in doubt, say n. + endmenu diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 5aae66e638a7..c0503f1c7d5b 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -39,3 +39,5 @@ xen-gntalloc-y := gntalloc.o xen-privcmd-y := privcmd.o privcmd-buf.o obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o +obj-$(CONFIG_XEN_GRANT_DMA_OPS) += grant-dma-ops.o +obj-$(CONFIG_XEN_GRANT_DMA_IOMMU) += grant-dma-iommu.o diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c new file mode 100644 index 000000000000..16b8bc0c0b33 --- /dev/null +++ b/drivers/xen/grant-dma-iommu.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Stub IOMMU driver which does nothing. + * The main purpose of it being present is to reuse generic IOMMU device tree + * bindings by Xen grant DMA-mapping layer. + * + * Copyright (C) 2022 EPAM Systems Inc. + */ + +#include <linux/iommu.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +struct grant_dma_iommu_device { + struct device *dev; + struct iommu_device iommu; +}; + +/* Nothing is really needed here */ +static const struct iommu_ops grant_dma_iommu_ops; + +static const struct of_device_id grant_dma_iommu_of_match[] = { + { .compatible = "xen,grant-dma" }, + { }, +}; + +static int grant_dma_iommu_probe(struct platform_device *pdev) +{ + struct grant_dma_iommu_device *mmu; + int ret; + + mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return -ENOMEM; + + mmu->dev = &pdev->dev; + + ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev); + if (ret) + return ret; + + platform_set_drvdata(pdev, mmu); + + return 0; +} + +static int grant_dma_iommu_remove(struct platform_device *pdev) +{ + struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + iommu_device_unregister(&mmu->iommu); + + return 0; +} + +static struct platform_driver grant_dma_iommu_driver = { + .driver = { + .name = "grant-dma-iommu", + .of_match_table = grant_dma_iommu_of_match, + }, + .probe = grant_dma_iommu_probe, + .remove = grant_dma_iommu_remove, +}; + +static int __init grant_dma_iommu_init(void) +{ + struct device_node *iommu_np; + + iommu_np = of_find_matching_node(NULL, grant_dma_iommu_of_match); + if (!iommu_np) + return 0; + + of_node_put(iommu_np); + + return platform_driver_register(&grant_dma_iommu_driver); +} +subsys_initcall(grant_dma_iommu_init); diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c new file mode 100644 index 000000000000..fc0142484001 --- /dev/null +++ b/drivers/xen/grant-dma-ops.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Xen grant DMA-mapping layer - contains special DMA-mapping routines + * for providing grant references as DMA addresses to be used by frontends + * (e.g. virtio) in Xen guests + * + * Copyright (c) 2021, Juergen Gross <jgross@suse.com> + */ + +#include <linux/module.h> +#include <linux/dma-map-ops.h> +#include <linux/of.h> +#include <linux/pfn.h> +#include <linux/xarray.h> +#include <xen/xen.h> +#include <xen/xen-ops.h> +#include <xen/grant_table.h> + +struct xen_grant_dma_data { + /* The ID of backend domain */ + domid_t backend_domid; + /* Is device behaving sane? */ + bool broken; +}; + +static DEFINE_XARRAY(xen_grant_dma_devices); + +#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63) + +static inline dma_addr_t grant_to_dma(grant_ref_t grant) +{ + return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT); +} + +static inline grant_ref_t dma_to_grant(dma_addr_t dma) +{ + return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT); +} + +static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) +{ + struct xen_grant_dma_data *data; + + xa_lock(&xen_grant_dma_devices); + data = xa_load(&xen_grant_dma_devices, (unsigned long)dev); + xa_unlock(&xen_grant_dma_devices); + + return data; +} + +/* + * DMA ops for Xen frontends (e.g. virtio). + * + * Used to act as a kind of software IOMMU for Xen guests by using grants as + * DMA addresses. + * Such a DMA address is formed by using the grant reference as a frame + * number and setting the highest address bit (this bit is for the backend + * to be able to distinguish it from e.g. a mmio address). + */ +static void *xen_grant_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + unsigned long pfn; + grant_ref_t grant; + void *ret; + + data = find_xen_grant_dma_data(dev); + if (!data) + return NULL; + + if (unlikely(data->broken)) + return NULL; + + ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp); + if (!ret) + return NULL; + + pfn = virt_to_pfn(ret); + + if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) { + free_pages_exact(ret, n_pages * PAGE_SIZE); + return NULL; + } + + for (i = 0; i < n_pages; i++) { + gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, + pfn_to_gfn(pfn + i), 0); + } + + *dma_handle = grant_to_dma(grant); + + return ret; +} + +static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + + data = find_xen_grant_dma_data(dev); + if (!data) + return; + + if (unlikely(data->broken)) + return; + + grant = dma_to_grant(dma_handle); + + for (i = 0; i < n_pages; i++) { + if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { + dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); + data->broken = true; + return; + } + } + + gnttab_free_grant_reference_seq(grant, n_pages); + + free_pages_exact(vaddr, n_pages * PAGE_SIZE); +} + +static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, + enum dma_data_direction dir, + gfp_t gfp) +{ + void *vaddr; + + vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0); + if (!vaddr) + return NULL; + + return virt_to_page(vaddr); +} + +static void xen_grant_dma_free_pages(struct device *dev, size_t size, + struct page *vaddr, dma_addr_t dma_handle, + enum dma_data_direction dir) +{ + xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0); +} + +static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + dma_addr_t dma_handle; + + if (WARN_ON(dir == DMA_NONE)) + return DMA_MAPPING_ERROR; + + data = find_xen_grant_dma_data(dev); + if (!data) + return DMA_MAPPING_ERROR; + + if (unlikely(data->broken)) + return DMA_MAPPING_ERROR; + + if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) + return DMA_MAPPING_ERROR; + + for (i = 0; i < n_pages; i++) { + gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, + xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE); + } + + dma_handle = grant_to_dma(grant) + offset; + + return dma_handle; +} + +static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + + if (WARN_ON(dir == DMA_NONE)) + return; + + data = find_xen_grant_dma_data(dev); + if (!data) + return; + + if (unlikely(data->broken)) + return; + + grant = dma_to_grant(dma_handle); + + for (i = 0; i < n_pages; i++) { + if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { + dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); + data->broken = true; + return; + } + } + + gnttab_free_grant_reference_seq(grant, n_pages); +} + +static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *s; + unsigned int i; + + if (WARN_ON(dir == DMA_NONE)) + return; + + for_each_sg(sg, s, nents, i) + xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir, + attrs); +} + +static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *s; + unsigned int i; + + if (WARN_ON(dir == DMA_NONE)) + return -EINVAL; + + for_each_sg(sg, s, nents, i) { + s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset, + s->length, dir, attrs); + if (s->dma_address == DMA_MAPPING_ERROR) + goto out; + + sg_dma_len(s) = s->length; + } + + return nents; + +out: + xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + sg_dma_len(sg) = 0; + + return -EIO; +} + +static int xen_grant_dma_supported(struct device *dev, u64 mask) +{ + return mask == DMA_BIT_MASK(64); +} + +static const struct dma_map_ops xen_grant_dma_ops = { + .alloc = xen_grant_dma_alloc, + .free = xen_grant_dma_free, + .alloc_pages = xen_grant_dma_alloc_pages, + .free_pages = xen_grant_dma_free_pages, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, + .map_page = xen_grant_dma_map_page, + .unmap_page = xen_grant_dma_unmap_page, + .map_sg = xen_grant_dma_map_sg, + .unmap_sg = xen_grant_dma_unmap_sg, + .dma_supported = xen_grant_dma_supported, +}; + +bool xen_is_grant_dma_device(struct device *dev) +{ + struct device_node *iommu_np; + bool has_iommu; + + /* XXX Handle only DT devices for now */ + if (!dev->of_node) + return false; + + iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); + has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma"); + of_node_put(iommu_np); + + return has_iommu; +} + +void xen_grant_setup_dma_ops(struct device *dev) +{ + struct xen_grant_dma_data *data; + struct of_phandle_args iommu_spec; + + data = find_xen_grant_dma_data(dev); + if (data) { + dev_err(dev, "Xen grant DMA data is already created\n"); + return; + } + + /* XXX ACPI device unsupported for now */ + if (!dev->of_node) + goto err; + + if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", + 0, &iommu_spec)) { + dev_err(dev, "Cannot parse iommus property\n"); + goto err; + } + + if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || + iommu_spec.args_count != 1) { + dev_err(dev, "Incompatible IOMMU node\n"); + of_node_put(iommu_spec.np); + goto err; + } + + of_node_put(iommu_spec.np); + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + goto err; + + /* + * The endpoint ID here means the ID of the domain where the corresponding + * backend is running + */ + data->backend_domid = iommu_spec.args[0]; + + if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data, + GFP_KERNEL))) { + dev_err(dev, "Cannot store Xen grant DMA data\n"); + goto err; + } + + dev->dma_ops = &xen_grant_dma_ops; + + return; + +err: + dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n"); +} + +MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); +MODULE_AUTHOR("Juergen Gross <jgross@suse.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7a18292540bc..738029de3c67 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -33,6 +33,7 @@ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt +#include <linux/bitmap.h> #include <linux/memblock.h> #include <linux/sched.h> #include <linux/mm.h> @@ -70,9 +71,32 @@ static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; + +/* + * Handling of free grants: + * + * Free grants are in a simple list anchored in gnttab_free_head. They are + * linked by grant ref, the last element contains GNTTAB_LIST_END. The number + * of free entries is stored in gnttab_free_count. + * Additionally there is a bitmap of free entries anchored in + * gnttab_free_bitmap. This is being used for simplifying allocation of + * multiple consecutive grants, which is needed e.g. for support of virtio. + * gnttab_last_free is used to add free entries of new frames at the end of + * the free list. + * gnttab_free_tail_ptr specifies the variable which references the start + * of consecutive free grants ending with gnttab_last_free. This pointer is + * updated in a rather defensive way, in order to avoid performance hits in + * hot paths. + * All those variables are protected by gnttab_list_lock. + */ static int gnttab_free_count; -static grant_ref_t gnttab_free_head; +static unsigned int gnttab_size; +static grant_ref_t gnttab_free_head = GNTTAB_LIST_END; +static grant_ref_t gnttab_last_free = GNTTAB_LIST_END; +static grant_ref_t *gnttab_free_tail_ptr; +static unsigned long *gnttab_free_bitmap; static DEFINE_SPINLOCK(gnttab_list_lock); + struct grant_frames xen_auto_xlat_grant_frames; static unsigned int xen_gnttab_version; module_param_named(version, xen_gnttab_version, uint, 0); @@ -168,16 +192,116 @@ static int get_free_entries(unsigned count) ref = head = gnttab_free_head; gnttab_free_count -= count; - while (count-- > 1) - head = gnttab_entry(head); + while (count--) { + bitmap_clear(gnttab_free_bitmap, head, 1); + if (gnttab_free_tail_ptr == __gnttab_entry(head)) + gnttab_free_tail_ptr = &gnttab_free_head; + if (count) + head = gnttab_entry(head); + } gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; + if (!gnttab_free_count) { + gnttab_last_free = GNTTAB_LIST_END; + gnttab_free_tail_ptr = NULL; + } + spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } +static int get_seq_entry_count(void) +{ + if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr || + *gnttab_free_tail_ptr == GNTTAB_LIST_END) + return 0; + + return gnttab_last_free - *gnttab_free_tail_ptr + 1; +} + +/* Rebuilds the free grant list and tries to find count consecutive entries. */ +static int get_free_seq(unsigned int count) +{ + int ret = -ENOSPC; + unsigned int from, to; + grant_ref_t *last; + + gnttab_free_tail_ptr = &gnttab_free_head; + last = &gnttab_free_head; + + for (from = find_first_bit(gnttab_free_bitmap, gnttab_size); + from < gnttab_size; + from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) { + to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size, + from + 1); + if (ret < 0 && to - from >= count) { + ret = from; + bitmap_clear(gnttab_free_bitmap, ret, count); + from += count; + gnttab_free_count -= count; + if (from == to) + continue; + } + + /* + * Recreate the free list in order to have it properly sorted. + * This is needed to make sure that the free tail has the maximum + * possible size. + */ + while (from < to) { + *last = from; + last = __gnttab_entry(from); + gnttab_last_free = from; + from++; + } + if (to < gnttab_size) + gnttab_free_tail_ptr = __gnttab_entry(to - 1); + } + + *last = GNTTAB_LIST_END; + if (gnttab_last_free != gnttab_size - 1) + gnttab_free_tail_ptr = NULL; + + return ret; +} + +static int get_free_entries_seq(unsigned int count) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&gnttab_list_lock, flags); + + if (gnttab_free_count < count) { + ret = gnttab_expand(count - gnttab_free_count); + if (ret < 0) + goto out; + } + + if (get_seq_entry_count() < count) { + ret = get_free_seq(count); + if (ret >= 0) + goto out; + ret = gnttab_expand(count - get_seq_entry_count()); + if (ret < 0) + goto out; + } + + ret = *gnttab_free_tail_ptr; + *gnttab_free_tail_ptr = gnttab_entry(ret + count - 1); + gnttab_free_count -= count; + if (!gnttab_free_count) + gnttab_free_tail_ptr = NULL; + bitmap_clear(gnttab_free_bitmap, ret, count); + + out: + spin_unlock_irqrestore(&gnttab_list_lock, flags); + + return ret; +} + static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; @@ -204,21 +328,51 @@ static inline void check_free_callbacks(void) do_free_callbacks(); } -static void put_free_entry(grant_ref_t ref) +static void put_free_entry_locked(grant_ref_t ref) { - unsigned long flags; - if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES)) return; - spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; + if (!gnttab_free_count) + gnttab_last_free = ref; + if (gnttab_free_tail_ptr == &gnttab_free_head) + gnttab_free_tail_ptr = __gnttab_entry(ref); gnttab_free_count++; + bitmap_set(gnttab_free_bitmap, ref, 1); +} + +static void put_free_entry(grant_ref_t ref) +{ + unsigned long flags; + + spin_lock_irqsave(&gnttab_list_lock, flags); + put_free_entry_locked(ref); check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } +static void gnttab_set_free(unsigned int start, unsigned int n) +{ + unsigned int i; + + for (i = start; i < start + n - 1; i++) + gnttab_entry(i) = i + 1; + + gnttab_entry(i) = GNTTAB_LIST_END; + if (!gnttab_free_count) { + gnttab_free_head = start; + gnttab_free_tail_ptr = &gnttab_free_head; + } else { + gnttab_entry(gnttab_last_free) = start; + } + gnttab_free_count += n; + gnttab_last_free = i; + + bitmap_set(gnttab_free_bitmap, start, n); +} + /* * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. * Introducing a valid entry into the grant table: @@ -450,23 +604,31 @@ void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; - int count = 1; - if (head == GNTTAB_LIST_END) - return; + spin_lock_irqsave(&gnttab_list_lock, flags); - ref = head; - while (gnttab_entry(ref) != GNTTAB_LIST_END) { - ref = gnttab_entry(ref); - count++; + while (head != GNTTAB_LIST_END) { + ref = gnttab_entry(head); + put_free_entry_locked(head); + head = ref; } - gnttab_entry(ref) = gnttab_free_head; - gnttab_free_head = head; - gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); +void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count) +{ + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&gnttab_list_lock, flags); + for (i = count; i > 0; i--) + put_free_entry_locked(head + i - 1); + check_free_callbacks(); + spin_unlock_irqrestore(&gnttab_list_lock, flags); +} +EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq); + int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); @@ -480,6 +642,24 @@ int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); +int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first) +{ + int h; + + if (count == 1) + h = get_free_entries(1); + else + h = get_free_entries_seq(count); + + if (h < 0) + return -ENOSPC; + + *first = h; + + return 0; +} +EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq); + int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); @@ -572,16 +752,13 @@ static int grow_gnttab_list(unsigned int more_frames) goto grow_nomem; } + gnttab_set_free(gnttab_size, extra_entries); - for (i = grefs_per_frame * nr_grant_frames; - i < grefs_per_frame * new_nr_grant_frames - 1; i++) - gnttab_entry(i) = i + 1; - - gnttab_entry(i) = gnttab_free_head; - gnttab_free_head = grefs_per_frame * nr_grant_frames; - gnttab_free_count += extra_entries; + if (!gnttab_free_tail_ptr) + gnttab_free_tail_ptr = __gnttab_entry(gnttab_size); nr_grant_frames = new_nr_grant_frames; + gnttab_size += extra_entries; check_free_callbacks(); @@ -1424,20 +1601,20 @@ static int gnttab_expand(unsigned int req_entries) int gnttab_init(void) { int i; - unsigned long max_nr_grant_frames; + unsigned long max_nr_grant_frames, max_nr_grefs; unsigned int max_nr_glist_frames, nr_glist_frames; - unsigned int nr_init_grefs; int ret; gnttab_request_version(); max_nr_grant_frames = gnttab_max_grant_frames(); + max_nr_grefs = max_nr_grant_frames * + gnttab_interface->grefs_per_grant_frame; nr_grant_frames = 1; /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ - max_nr_glist_frames = (max_nr_grant_frames * - gnttab_interface->grefs_per_grant_frame / RPP); + max_nr_glist_frames = max_nr_grefs / RPP; gnttab_list = kmalloc_array(max_nr_glist_frames, sizeof(grant_ref_t *), @@ -1454,6 +1631,12 @@ int gnttab_init(void) } } + gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL); + if (!gnttab_free_bitmap) { + ret = -ENOMEM; + goto ini_nomem; + } + ret = arch_gnttab_init(max_nr_grant_frames, nr_status_frames(max_nr_grant_frames)); if (ret < 0) @@ -1464,15 +1647,10 @@ int gnttab_init(void) goto ini_nomem; } - nr_init_grefs = nr_grant_frames * - gnttab_interface->grefs_per_grant_frame; - - for (i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) - gnttab_entry(i) = i + 1; + gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame; - gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; - gnttab_free_count = nr_init_grefs - GNTTAB_NR_RESERVED_ENTRIES; - gnttab_free_head = GNTTAB_NR_RESERVED_ENTRIES; + gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES, + gnttab_size - GNTTAB_NR_RESERVED_ENTRIES); printk("Grant table initialized\n"); return 0; @@ -1481,6 +1659,7 @@ int gnttab_init(void) for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); + bitmap_free(gnttab_free_bitmap); return ret; } EXPORT_SYMBOL_GPL(gnttab_init); diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index 34742c6e189e..f17c4c03db30 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, return 0; } -EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); struct remap_pfn { struct mm_struct *mm; diff --git a/fs/9p/cache.c b/fs/9p/cache.c index 1c8dc696d516..cebba4eaa0b5 100644 --- a/fs/9p/cache.c +++ b/fs/9p/cache.c @@ -62,12 +62,12 @@ void v9fs_cache_inode_get_cookie(struct inode *inode) version = cpu_to_le32(v9inode->qid.version); path = cpu_to_le64(v9inode->qid.path); v9ses = v9fs_inode2v9ses(inode); - v9inode->netfs_ctx.cache = + v9inode->netfs.cache = fscache_acquire_cookie(v9fs_session_cache(v9ses), 0, &path, sizeof(path), &version, sizeof(version), - i_size_read(&v9inode->vfs_inode)); + i_size_read(&v9inode->netfs.inode)); p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n", inode, v9fs_inode_cookie(v9inode)); diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index e28ddf763b3b..0129de2ea31a 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -625,7 +625,7 @@ static void v9fs_inode_init_once(void *foo) struct v9fs_inode *v9inode = (struct v9fs_inode *)foo; memset(&v9inode->qid, 0, sizeof(v9inode->qid)); - inode_init_once(&v9inode->vfs_inode); + inode_init_once(&v9inode->netfs.inode); } /** diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h index ec0e8df3b2eb..1b219c21d15e 100644 --- a/fs/9p/v9fs.h +++ b/fs/9p/v9fs.h @@ -109,11 +109,7 @@ struct v9fs_session_info { #define V9FS_INO_INVALID_ATTR 0x01 struct v9fs_inode { - struct { - /* These must be contiguous */ - struct inode vfs_inode; /* the VFS's inode record */ - struct netfs_i_context netfs_ctx; /* Netfslib context */ - }; + struct netfs_inode netfs; /* Netfslib context and vfs inode */ struct p9_qid qid; unsigned int cache_validity; struct p9_fid *writeback_fid; @@ -122,13 +118,13 @@ struct v9fs_inode { static inline struct v9fs_inode *V9FS_I(const struct inode *inode) { - return container_of(inode, struct v9fs_inode, vfs_inode); + return container_of(inode, struct v9fs_inode, netfs.inode); } static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inode) { #ifdef CONFIG_9P_FSCACHE - return netfs_i_cookie(&v9inode->vfs_inode); + return netfs_i_cookie(&v9inode->netfs.inode); #else return NULL; #endif diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 8ce82ff1e40a..90c6c1ba03ab 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -140,7 +140,7 @@ static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error, transferred_or_error != -ENOBUFS) { version = cpu_to_le32(v9inode->qid.version); fscache_invalidate(v9fs_inode_cookie(v9inode), &version, - i_size_read(&v9inode->vfs_inode), 0); + i_size_read(&v9inode->netfs.inode), 0); } } diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 55367ecb9442..e660c6348b9d 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -234,7 +234,7 @@ struct inode *v9fs_alloc_inode(struct super_block *sb) v9inode->writeback_fid = NULL; v9inode->cache_validity = 0; mutex_init(&v9inode->v_mutex); - return &v9inode->vfs_inode; + return &v9inode->netfs.inode; } /** @@ -252,7 +252,7 @@ void v9fs_free_inode(struct inode *inode) */ static void v9fs_set_netfs_context(struct inode *inode) { - netfs_i_context_init(inode, &v9fs_req_ops); + netfs_inode_init(inode, &v9fs_req_ops); } int v9fs_init_inode(struct v9fs_session_info *v9ses, diff --git a/fs/afs/callback.c b/fs/afs/callback.c index 1b4d5809808d..a484fa642808 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c @@ -30,7 +30,7 @@ void afs_invalidate_mmap_work(struct work_struct *work) { struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work); - unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false); + unmap_mapping_pages(vnode->netfs.inode.i_mapping, 0, 0, false); } void afs_server_init_callback_work(struct work_struct *work) diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 79f6b74336d2..56ae5cd5184f 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -109,7 +109,7 @@ struct afs_lookup_cookie { */ static void afs_dir_read_cleanup(struct afs_read *req) { - struct address_space *mapping = req->vnode->vfs_inode.i_mapping; + struct address_space *mapping = req->vnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t last = req->nr_pages - 1; @@ -153,7 +153,7 @@ static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio, block = kmap_local_folio(folio, offset); if (block->hdr.magic != AFS_DIR_MAGIC) { printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n", - __func__, dvnode->vfs_inode.i_ino, + __func__, dvnode->netfs.inode.i_ino, pos, offset, size, ntohs(block->hdr.magic)); trace_afs_dir_check_failed(dvnode, pos + offset, i_size); kunmap_local(block); @@ -183,7 +183,7 @@ error: static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req) { union afs_xdr_dir_block *block; - struct address_space *mapping = dvnode->vfs_inode.i_mapping; + struct address_space *mapping = dvnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t last = req->nr_pages - 1; size_t offset, size; @@ -217,7 +217,7 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req) */ static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req) { - struct address_space *mapping = dvnode->vfs_inode.i_mapping; + struct address_space *mapping = dvnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t last = req->nr_pages - 1; int ret = 0; @@ -269,7 +269,7 @@ static int afs_dir_open(struct inode *inode, struct file *file) static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) __acquires(&dvnode->validate_lock) { - struct address_space *mapping = dvnode->vfs_inode.i_mapping; + struct address_space *mapping = dvnode->netfs.inode.i_mapping; struct afs_read *req; loff_t i_size; int nr_pages, i; @@ -287,7 +287,7 @@ static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key) req->cleanup = afs_dir_read_cleanup; expand: - i_size = i_size_read(&dvnode->vfs_inode); + i_size = i_size_read(&dvnode->netfs.inode); if (i_size < 2048) { ret = afs_bad(dvnode, afs_file_error_dir_small); goto error; @@ -305,7 +305,7 @@ expand: req->actual_len = i_size; /* May change */ req->len = nr_pages * PAGE_SIZE; /* We can ask for more than there is */ req->data_version = dvnode->status.data_version; /* May change */ - iov_iter_xarray(&req->def_iter, READ, &dvnode->vfs_inode.i_mapping->i_pages, + iov_iter_xarray(&req->def_iter, READ, &dvnode->netfs.inode.i_mapping->i_pages, 0, i_size); req->iter = &req->def_iter; @@ -897,7 +897,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, out_op: if (op->error == 0) { - inode = &op->file[1].vnode->vfs_inode; + inode = &op->file[1].vnode->netfs.inode; op->file[1].vnode = NULL; } @@ -1139,7 +1139,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) afs_stat_v(dir, n_reval); /* search the directory for this vnode */ - ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version); + ret = afs_do_lookup_one(&dir->netfs.inode, dentry, &fid, key, &dir_version); switch (ret) { case 0: /* the filename maps to something */ @@ -1170,7 +1170,7 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) _debug("%pd: file deleted (uq %u -> %u I:%u)", dentry, fid.unique, vnode->fid.unique, - vnode->vfs_inode.i_generation); + vnode->netfs.inode.i_generation); goto not_found; } goto out_valid; @@ -1368,7 +1368,7 @@ static void afs_dir_remove_subdir(struct dentry *dentry) if (d_really_is_positive(dentry)) { struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); - clear_nlink(&vnode->vfs_inode); + clear_nlink(&vnode->netfs.inode); set_bit(AFS_VNODE_DELETED, &vnode->flags); clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags); clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); @@ -1487,8 +1487,8 @@ static void afs_dir_remove_link(struct afs_operation *op) /* Already done */ } else if (test_bit(AFS_VNODE_DIR_VALID, &dvnode->flags)) { write_seqlock(&vnode->cb_lock); - drop_nlink(&vnode->vfs_inode); - if (vnode->vfs_inode.i_nlink == 0) { + drop_nlink(&vnode->netfs.inode); + if (vnode->netfs.inode.i_nlink == 0) { set_bit(AFS_VNODE_DELETED, &vnode->flags); __afs_break_callback(vnode, afs_cb_break_for_unlink); } @@ -1504,7 +1504,7 @@ static void afs_dir_remove_link(struct afs_operation *op) op->error = ret; } - _debug("nlink %d [val %d]", vnode->vfs_inode.i_nlink, op->error); + _debug("nlink %d [val %d]", vnode->netfs.inode.i_nlink, op->error); } static void afs_unlink_success(struct afs_operation *op) @@ -1680,8 +1680,8 @@ static void afs_link_success(struct afs_operation *op) afs_update_dentry_version(op, dvp, op->dentry); if (op->dentry_2->d_parent == op->dentry->d_parent) afs_update_dentry_version(op, dvp, op->dentry_2); - ihold(&vp->vnode->vfs_inode); - d_instantiate(op->dentry, &vp->vnode->vfs_inode); + ihold(&vp->vnode->netfs.inode); + d_instantiate(op->dentry, &vp->vnode->netfs.inode); } static void afs_link_put(struct afs_operation *op) diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c index d98e109ecee9..0ab7752d1b75 100644 --- a/fs/afs/dir_edit.c +++ b/fs/afs/dir_edit.c @@ -109,7 +109,7 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block, */ static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index) { - struct address_space *mapping = vnode->vfs_inode.i_mapping; + struct address_space *mapping = vnode->netfs.inode.i_mapping; struct folio *folio; folio = __filemap_get_folio(mapping, index, @@ -216,7 +216,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode, _enter(",,{%d,%s},", name->len, name->name); - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vnode->netfs.inode); if (i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS || (i_size & (AFS_DIR_BLOCK_SIZE - 1))) { clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags); @@ -336,7 +336,7 @@ found_space: if (b < AFS_DIR_BLOCKS_WITH_CTR) meta->meta.alloc_ctrs[b] -= need_slots; - inode_inc_iversion_raw(&vnode->vfs_inode); + inode_inc_iversion_raw(&vnode->netfs.inode); afs_stat_v(vnode, n_dir_cr); _debug("Insert %s in %u[%u]", name->name, b, slot); @@ -383,7 +383,7 @@ void afs_edit_dir_remove(struct afs_vnode *vnode, _enter(",,{%d,%s},", name->len, name->name); - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vnode->netfs.inode); if (i_size < AFS_DIR_BLOCK_SIZE || i_size > AFS_DIR_BLOCK_SIZE * AFS_DIR_MAX_BLOCKS || (i_size & (AFS_DIR_BLOCK_SIZE - 1))) { @@ -463,7 +463,7 @@ found_dirent: if (b < AFS_DIR_BLOCKS_WITH_CTR) meta->meta.alloc_ctrs[b] += need_slots; - inode_set_iversion_raw(&vnode->vfs_inode, vnode->status.data_version); + inode_set_iversion_raw(&vnode->netfs.inode, vnode->status.data_version); afs_stat_v(vnode, n_dir_rm); _debug("Remove %s from %u[%u]", name->name, b, slot); diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c index 45cfd50a9521..bb5807e87fa4 100644 --- a/fs/afs/dir_silly.c +++ b/fs/afs/dir_silly.c @@ -131,7 +131,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode, goto out; } while (!d_is_negative(sdentry)); - ihold(&vnode->vfs_inode); + ihold(&vnode->netfs.inode); ret = afs_do_silly_rename(dvnode, vnode, dentry, sdentry, key); switch (ret) { @@ -148,7 +148,7 @@ int afs_sillyrename(struct afs_vnode *dvnode, struct afs_vnode *vnode, d_drop(sdentry); } - iput(&vnode->vfs_inode); + iput(&vnode->netfs.inode); dput(sdentry); out: _leave(" = %d", ret); diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index f120bcb8bf73..3a5bbffdf053 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -76,7 +76,7 @@ struct inode *afs_iget_pseudo_dir(struct super_block *sb, bool root) /* there shouldn't be an existing inode */ BUG_ON(!(inode->i_state & I_NEW)); - netfs_i_context_init(inode, NULL); + netfs_inode_init(inode, NULL); inode->i_size = 0; inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO; if (root) { diff --git a/fs/afs/file.c b/fs/afs/file.c index a8e8832179e4..4de7af7c2f09 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -194,7 +194,7 @@ int afs_release(struct inode *inode, struct file *file) afs_put_wb_key(af->wb); if ((file->f_mode & FMODE_WRITE)) { - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vnode->netfs.inode); afs_set_cache_aux(vnode, &aux); fscache_unuse_cookie(afs_vnode_cache(vnode), &aux, &i_size); } else { @@ -325,7 +325,7 @@ static void afs_issue_read(struct netfs_io_subrequest *subreq) fsreq->iter = &fsreq->def_iter; iov_iter_xarray(&fsreq->def_iter, READ, - &fsreq->vnode->vfs_inode.i_mapping->i_pages, + &fsreq->vnode->netfs.inode.i_mapping->i_pages, fsreq->pos, fsreq->len); afs_fetch_data(fsreq->vnode, fsreq); diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c index d222dfbe976b..7a3803ce3a22 100644 --- a/fs/afs/fs_operation.c +++ b/fs/afs/fs_operation.c @@ -232,14 +232,14 @@ int afs_put_operation(struct afs_operation *op) if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode) clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags); if (op->file[0].put_vnode) - iput(&op->file[0].vnode->vfs_inode); + iput(&op->file[0].vnode->netfs.inode); if (op->file[1].put_vnode) - iput(&op->file[1].vnode->vfs_inode); + iput(&op->file[1].vnode->netfs.inode); if (op->more_files) { for (i = 0; i < op->nr_files - 2; i++) if (op->more_files[i].put_vnode) - iput(&op->more_files[i].vnode->vfs_inode); + iput(&op->more_files[i].vnode->netfs.inode); kfree(op->more_files); } diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 30b066299d39..22811e9eacf5 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c @@ -58,7 +58,7 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren */ static void afs_set_netfs_context(struct afs_vnode *vnode) { - netfs_i_context_init(&vnode->vfs_inode, &afs_req_ops); + netfs_inode_init(&vnode->netfs.inode, &afs_req_ops); } /* @@ -96,7 +96,7 @@ static int afs_inode_init_from_status(struct afs_operation *op, inode->i_flags |= S_NOATIME; inode->i_uid = make_kuid(&init_user_ns, status->owner); inode->i_gid = make_kgid(&init_user_ns, status->group); - set_nlink(&vnode->vfs_inode, status->nlink); + set_nlink(&vnode->netfs.inode, status->nlink); switch (status->type) { case AFS_FTYPE_FILE: @@ -139,7 +139,7 @@ static int afs_inode_init_from_status(struct afs_operation *op, afs_set_netfs_context(vnode); vnode->invalid_before = status->data_version; - inode_set_iversion_raw(&vnode->vfs_inode, status->data_version); + inode_set_iversion_raw(&vnode->netfs.inode, status->data_version); if (!vp->scb.have_cb) { /* it's a symlink we just created (the fileserver @@ -163,7 +163,7 @@ static void afs_apply_status(struct afs_operation *op, { struct afs_file_status *status = &vp->scb.status; struct afs_vnode *vnode = vp->vnode; - struct inode *inode = &vnode->vfs_inode; + struct inode *inode = &vnode->netfs.inode; struct timespec64 t; umode_t mode; bool data_changed = false; @@ -246,7 +246,7 @@ static void afs_apply_status(struct afs_operation *op, * idea of what the size should be that's not the same as * what's on the server. */ - vnode->netfs_ctx.remote_i_size = status->size; + vnode->netfs.remote_i_size = status->size; if (change_size) { afs_set_i_size(vnode, status->size); inode->i_ctime = t; @@ -289,7 +289,7 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v */ if (vp->scb.status.abort_code == VNOVNODE) { set_bit(AFS_VNODE_DELETED, &vnode->flags); - clear_nlink(&vnode->vfs_inode); + clear_nlink(&vnode->netfs.inode); __afs_break_callback(vnode, afs_cb_break_for_deleted); op->flags &= ~AFS_OPERATION_DIR_CONFLICT; } @@ -306,8 +306,8 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v if (vp->scb.have_cb) afs_apply_callback(op, vp); } else if (vp->op_unlinked && !(op->flags & AFS_OPERATION_DIR_CONFLICT)) { - drop_nlink(&vnode->vfs_inode); - if (vnode->vfs_inode.i_nlink == 0) { + drop_nlink(&vnode->netfs.inode); + if (vnode->netfs.inode.i_nlink == 0) { set_bit(AFS_VNODE_DELETED, &vnode->flags); __afs_break_callback(vnode, afs_cb_break_for_deleted); } @@ -326,7 +326,7 @@ static void afs_fetch_status_success(struct afs_operation *op) struct afs_vnode *vnode = vp->vnode; int ret; - if (vnode->vfs_inode.i_state & I_NEW) { + if (vnode->netfs.inode.i_state & I_NEW) { ret = afs_inode_init_from_status(op, vp, vnode); op->error = ret; if (ret == 0) @@ -430,7 +430,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode) struct afs_vnode_cache_aux aux; if (vnode->status.type != AFS_FTYPE_FILE) { - vnode->netfs_ctx.cache = NULL; + vnode->netfs.cache = NULL; return; } @@ -457,7 +457,7 @@ static void afs_get_inode_cache(struct afs_vnode *vnode) struct inode *afs_iget(struct afs_operation *op, struct afs_vnode_param *vp) { struct afs_vnode_param *dvp = &op->file[0]; - struct super_block *sb = dvp->vnode->vfs_inode.i_sb; + struct super_block *sb = dvp->vnode->netfs.inode.i_sb; struct afs_vnode *vnode; struct inode *inode; int ret; @@ -582,10 +582,10 @@ static void afs_zap_data(struct afs_vnode *vnode) /* nuke all the non-dirty pages that aren't locked, mapped or being * written back in a regular file and completely discard the pages in a * directory or symlink */ - if (S_ISREG(vnode->vfs_inode.i_mode)) - invalidate_remote_inode(&vnode->vfs_inode); + if (S_ISREG(vnode->netfs.inode.i_mode)) + invalidate_remote_inode(&vnode->netfs.inode); else - invalidate_inode_pages2(vnode->vfs_inode.i_mapping); + invalidate_inode_pages2(vnode->netfs.inode.i_mapping); } /* @@ -683,8 +683,8 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) key_serial(key)); if (unlikely(test_bit(AFS_VNODE_DELETED, &vnode->flags))) { - if (vnode->vfs_inode.i_nlink) - clear_nlink(&vnode->vfs_inode); + if (vnode->netfs.inode.i_nlink) + clear_nlink(&vnode->netfs.inode); goto valid; } @@ -826,7 +826,7 @@ void afs_evict_inode(struct inode *inode) static void afs_setattr_success(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; - struct inode *inode = &vp->vnode->vfs_inode; + struct inode *inode = &vp->vnode->netfs.inode; loff_t old_i_size = i_size_read(inode); op->setattr.old_i_size = old_i_size; @@ -843,7 +843,7 @@ static void afs_setattr_success(struct afs_operation *op) static void afs_setattr_edit_file(struct afs_operation *op) { struct afs_vnode_param *vp = &op->file[0]; - struct inode *inode = &vp->vnode->vfs_inode; + struct inode *inode = &vp->vnode->netfs.inode; if (op->setattr.attr->ia_valid & ATTR_SIZE) { loff_t size = op->setattr.attr->ia_size; @@ -875,7 +875,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET | ATTR_TOUCH; struct afs_operation *op; struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry)); - struct inode *inode = &vnode->vfs_inode; + struct inode *inode = &vnode->netfs.inode; loff_t i_size; int ret; diff --git a/fs/afs/internal.h b/fs/afs/internal.h index a30995901266..984b113a9107 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -619,12 +619,7 @@ enum afs_lock_state { * leak from one inode to another. */ struct afs_vnode { - struct { - /* These must be contiguous */ - struct inode vfs_inode; /* the VFS's inode record */ - struct netfs_i_context netfs_ctx; /* Netfslib context */ - }; - + struct netfs_inode netfs; /* Netfslib context and vfs inode */ struct afs_volume *volume; /* volume on which vnode resides */ struct afs_fid fid; /* the file identifier for this inode */ struct afs_file_status status; /* AFS status info for this file */ @@ -675,7 +670,7 @@ struct afs_vnode { static inline struct fscache_cookie *afs_vnode_cache(struct afs_vnode *vnode) { #ifdef CONFIG_AFS_FSCACHE - return netfs_i_cookie(&vnode->vfs_inode); + return netfs_i_cookie(&vnode->netfs.inode); #else return NULL; #endif @@ -685,7 +680,7 @@ static inline void afs_vnode_set_cache(struct afs_vnode *vnode, struct fscache_cookie *cookie) { #ifdef CONFIG_AFS_FSCACHE - vnode->netfs_ctx.cache = cookie; + vnode->netfs.cache = cookie; #endif } @@ -892,7 +887,7 @@ static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int fl afs_set_cache_aux(vnode, &aux); fscache_invalidate(afs_vnode_cache(vnode), &aux, - i_size_read(&vnode->vfs_inode), flags); + i_size_read(&vnode->netfs.inode), flags); } /* @@ -1217,7 +1212,7 @@ static inline struct afs_net *afs_i2net(struct inode *inode) static inline struct afs_net *afs_v2net(struct afs_vnode *vnode) { - return afs_i2net(&vnode->vfs_inode); + return afs_i2net(&vnode->netfs.inode); } static inline struct afs_net *afs_sock2net(struct sock *sk) @@ -1593,12 +1588,12 @@ extern void yfs_fs_store_opaque_acl2(struct afs_operation *); */ static inline struct afs_vnode *AFS_FS_I(struct inode *inode) { - return container_of(inode, struct afs_vnode, vfs_inode); + return container_of(inode, struct afs_vnode, netfs.inode); } static inline struct inode *AFS_VNODE_TO_I(struct afs_vnode *vnode) { - return &vnode->vfs_inode; + return &vnode->netfs.inode; } /* @@ -1621,8 +1616,8 @@ static inline void afs_update_dentry_version(struct afs_operation *op, */ static inline void afs_set_i_size(struct afs_vnode *vnode, u64 size) { - i_size_write(&vnode->vfs_inode, size); - vnode->vfs_inode.i_blocks = ((size + 1023) >> 10) << 1; + i_size_write(&vnode->netfs.inode, size); + vnode->netfs.inode.i_blocks = ((size + 1023) >> 10) << 1; } /* diff --git a/fs/afs/super.c b/fs/afs/super.c index 1fea195b0b27..95d713074dc8 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -659,7 +659,7 @@ static void afs_i_init_once(void *_vnode) struct afs_vnode *vnode = _vnode; memset(vnode, 0, sizeof(*vnode)); - inode_init_once(&vnode->vfs_inode); + inode_init_once(&vnode->netfs.inode); mutex_init(&vnode->io_lock); init_rwsem(&vnode->validate_lock); spin_lock_init(&vnode->wb_lock); @@ -700,8 +700,8 @@ static struct inode *afs_alloc_inode(struct super_block *sb) init_rwsem(&vnode->rmdir_lock); INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work); - _leave(" = %p", &vnode->vfs_inode); - return &vnode->vfs_inode; + _leave(" = %p", &vnode->netfs.inode); + return &vnode->netfs.inode; } static void afs_free_inode(struct inode *inode) diff --git a/fs/afs/write.c b/fs/afs/write.c index 2236b2165e37..f80a6096d91c 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -146,10 +146,10 @@ int afs_write_end(struct file *file, struct address_space *mapping, write_end_pos = pos + copied; - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vnode->netfs.inode); if (write_end_pos > i_size) { write_seqlock(&vnode->cb_lock); - i_size = i_size_read(&vnode->vfs_inode); + i_size = i_size_read(&vnode->netfs.inode); if (write_end_pos > i_size) afs_set_i_size(vnode, write_end_pos); write_sequnlock(&vnode->cb_lock); @@ -257,7 +257,7 @@ static void afs_redirty_pages(struct writeback_control *wbc, */ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) { - struct address_space *mapping = vnode->vfs_inode.i_mapping; + struct address_space *mapping = vnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t end; @@ -354,7 +354,6 @@ static const struct afs_operation_ops afs_store_data_operation = { static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, bool laundering) { - struct netfs_i_context *ictx = &vnode->netfs_ctx; struct afs_operation *op; struct afs_wb_key *wbk = NULL; loff_t size = iov_iter_count(iter); @@ -385,9 +384,9 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t op->store.write_iter = iter; op->store.pos = pos; op->store.size = size; - op->store.i_size = max(pos + size, ictx->remote_i_size); + op->store.i_size = max(pos + size, vnode->netfs.remote_i_size); op->store.laundering = laundering; - op->mtime = vnode->vfs_inode.i_mtime; + op->mtime = vnode->netfs.inode.i_mtime; op->flags |= AFS_OPERATION_UNINTR; op->ops = &afs_store_data_operation; @@ -554,7 +553,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, struct iov_iter iter; unsigned long priv; unsigned int offset, to, len, max_len; - loff_t i_size = i_size_read(&vnode->vfs_inode); + loff_t i_size = i_size_read(&vnode->netfs.inode); bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode)); long count = wbc->nr_to_write; @@ -845,7 +844,7 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) _enter("{%llx:%llu},{%zu},", vnode->fid.vid, vnode->fid.vnode, count); - if (IS_SWAPFILE(&vnode->vfs_inode)) { + if (IS_SWAPFILE(&vnode->netfs.inode)) { printk(KERN_INFO "AFS: Attempt to write to active swap file!\n"); return -EBUSY; @@ -958,8 +957,8 @@ void afs_prune_wb_keys(struct afs_vnode *vnode) /* Discard unused keys */ spin_lock(&vnode->wb_lock); - if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && - !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { + if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) && + !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) { list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { if (refcount_read(&wbk->usage) == 1) list_move(&wbk->vnode_link, &graveyard); @@ -1034,6 +1033,6 @@ static void afs_write_to_cache(struct afs_vnode *vnode, bool caching) { fscache_write_to_cache(afs_vnode_cache(vnode), - vnode->vfs_inode.i_mapping, start, len, i_size, + vnode->netfs.inode.i_mapping, start, len, i_size, afs_write_to_cache_done, vnode, caching); } diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index e5221be6eb55..f5f116ed1b9e 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1798,7 +1798,7 @@ enum { static int __ceph_pool_perm_get(struct ceph_inode_info *ci, s64 pool, struct ceph_string *pool_ns) { - struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); + struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode); struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; struct rb_node **p, *parent; @@ -1913,7 +1913,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, 0, false, true); err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); - wr_req->r_mtime = ci->vfs_inode.i_mtime; + wr_req->r_mtime = ci->netfs.inode.i_mtime; err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); if (!err) diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c index ddea99922073..177d8e8d73fe 100644 --- a/fs/ceph/cache.c +++ b/fs/ceph/cache.c @@ -29,9 +29,9 @@ void ceph_fscache_register_inode_cookie(struct inode *inode) if (!(inode->i_state & I_NEW)) return; - WARN_ON_ONCE(ci->netfs_ctx.cache); + WARN_ON_ONCE(ci->netfs.cache); - ci->netfs_ctx.cache = + ci->netfs.cache = fscache_acquire_cookie(fsc->fscache, 0, &ci->i_vino, sizeof(ci->i_vino), &ci->i_version, sizeof(ci->i_version), diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h index 7255b790a4c1..26c6ae06e2f4 100644 --- a/fs/ceph/cache.h +++ b/fs/ceph/cache.h @@ -28,7 +28,7 @@ void ceph_fscache_invalidate(struct inode *inode, bool dio_write); static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci) { - return netfs_i_cookie(&ci->vfs_inode); + return netfs_i_cookie(&ci->netfs.inode); } static inline void ceph_fscache_resize(struct inode *inode, loff_t to) diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index bf2e94005598..38c930384d41 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -492,7 +492,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc, struct ceph_mount_options *opt = mdsc->fsc->mount_options; ci->i_hold_caps_max = round_jiffies(jiffies + opt->caps_wanted_delay_max * HZ); - dout("__cap_set_timeouts %p %lu\n", &ci->vfs_inode, + dout("__cap_set_timeouts %p %lu\n", &ci->netfs.inode, ci->i_hold_caps_max - jiffies); } @@ -507,7 +507,7 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc, static void __cap_delay_requeue(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { - dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->vfs_inode, + dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->netfs.inode, ci->i_ceph_flags, ci->i_hold_caps_max); if (!mdsc->stopping) { spin_lock(&mdsc->cap_delay_lock); @@ -531,7 +531,7 @@ no_change: static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { - dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode); + dout("__cap_delay_requeue_front %p\n", &ci->netfs.inode); spin_lock(&mdsc->cap_delay_lock); ci->i_ceph_flags |= CEPH_I_FLUSH; if (!list_empty(&ci->i_cap_delay_list)) @@ -548,7 +548,7 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, static void __cap_delay_cancel(struct ceph_mds_client *mdsc, struct ceph_inode_info *ci) { - dout("__cap_delay_cancel %p\n", &ci->vfs_inode); + dout("__cap_delay_cancel %p\n", &ci->netfs.inode); if (list_empty(&ci->i_cap_delay_list)) return; spin_lock(&mdsc->cap_delay_lock); @@ -568,7 +568,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, * Each time we receive FILE_CACHE anew, we increment * i_rdcache_gen. */ - if (S_ISREG(ci->vfs_inode.i_mode) && + if (S_ISREG(ci->netfs.inode.i_mode) && (issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) { ci->i_rdcache_gen++; @@ -583,14 +583,14 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, if ((issued & CEPH_CAP_FILE_SHARED) != (had & CEPH_CAP_FILE_SHARED)) { if (issued & CEPH_CAP_FILE_SHARED) atomic_inc(&ci->i_shared_gen); - if (S_ISDIR(ci->vfs_inode.i_mode)) { - dout(" marking %p NOT complete\n", &ci->vfs_inode); + if (S_ISDIR(ci->netfs.inode.i_mode)) { + dout(" marking %p NOT complete\n", &ci->netfs.inode); __ceph_dir_clear_complete(ci); } } /* Wipe saved layout if we're losing DIR_CREATE caps */ - if (S_ISDIR(ci->vfs_inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) && + if (S_ISDIR(ci->netfs.inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) && !(issued & CEPH_CAP_DIR_CREATE)) { ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns)); memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout)); @@ -771,7 +771,7 @@ static int __cap_is_valid(struct ceph_cap *cap) if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { dout("__cap_is_valid %p cap %p issued %s " - "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode, + "but STALE (gen %u vs %u)\n", &cap->ci->netfs.inode, cap, ceph_cap_string(cap->issued), cap->cap_gen, gen); return 0; } @@ -797,7 +797,7 @@ int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) if (!__cap_is_valid(cap)) continue; dout("__ceph_caps_issued %p cap %p issued %s\n", - &ci->vfs_inode, cap, ceph_cap_string(cap->issued)); + &ci->netfs.inode, cap, ceph_cap_string(cap->issued)); have |= cap->issued; if (implemented) *implemented |= cap->implemented; @@ -844,12 +844,12 @@ static void __touch_cap(struct ceph_cap *cap) spin_lock(&s->s_cap_lock); if (!s->s_cap_iterator) { - dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, + dout("__touch_cap %p cap %p mds%d\n", &cap->ci->netfs.inode, cap, s->s_mds); list_move_tail(&cap->session_caps, &s->s_caps); } else { dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n", - &cap->ci->vfs_inode, cap, s->s_mds); + &cap->ci->netfs.inode, cap, s->s_mds); } spin_unlock(&s->s_cap_lock); } @@ -867,7 +867,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) if ((have & mask) == mask) { dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s" - " (mask %s)\n", ceph_ino(&ci->vfs_inode), + " (mask %s)\n", ceph_ino(&ci->netfs.inode), ceph_cap_string(have), ceph_cap_string(mask)); return 1; @@ -879,7 +879,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) continue; if ((cap->issued & mask) == mask) { dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s" - " (mask %s)\n", ceph_ino(&ci->vfs_inode), cap, + " (mask %s)\n", ceph_ino(&ci->netfs.inode), cap, ceph_cap_string(cap->issued), ceph_cap_string(mask)); if (touch) @@ -891,7 +891,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) have |= cap->issued; if ((have & mask) == mask) { dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s" - " (mask %s)\n", ceph_ino(&ci->vfs_inode), + " (mask %s)\n", ceph_ino(&ci->netfs.inode), ceph_cap_string(cap->issued), ceph_cap_string(mask)); if (touch) { @@ -919,7 +919,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask, int touch) { - struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); + struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); int r; r = __ceph_caps_issued_mask(ci, mask, touch); @@ -950,7 +950,7 @@ int __ceph_caps_revoking_other(struct ceph_inode_info *ci, int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; int ret; spin_lock(&ci->i_ceph_lock); @@ -969,8 +969,8 @@ int __ceph_caps_used(struct ceph_inode_info *ci) if (ci->i_rd_ref) used |= CEPH_CAP_FILE_RD; if (ci->i_rdcache_ref || - (S_ISREG(ci->vfs_inode.i_mode) && - ci->vfs_inode.i_data.nrpages)) + (S_ISREG(ci->netfs.inode.i_mode) && + ci->netfs.inode.i_data.nrpages)) used |= CEPH_CAP_FILE_CACHE; if (ci->i_wr_ref) used |= CEPH_CAP_FILE_WR; @@ -993,11 +993,11 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci) const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR); const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY); struct ceph_mount_options *opt = - ceph_inode_to_client(&ci->vfs_inode)->mount_options; + ceph_inode_to_client(&ci->netfs.inode)->mount_options; unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ; unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ; - if (S_ISDIR(ci->vfs_inode.i_mode)) { + if (S_ISDIR(ci->netfs.inode.i_mode)) { int want = 0; /* use used_cutoff here, to keep dir's wanted caps longer */ @@ -1050,7 +1050,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci) int __ceph_caps_wanted(struct ceph_inode_info *ci) { int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci); - if (S_ISDIR(ci->vfs_inode.i_mode)) { + if (S_ISDIR(ci->netfs.inode.i_mode)) { /* we want EXCL if holding caps of dir ops */ if (w & CEPH_CAP_ANY_DIR_OPS) w |= CEPH_CAP_FILE_EXCL; @@ -1116,9 +1116,9 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) lockdep_assert_held(&ci->i_ceph_lock); - dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); + dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode); - mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc; + mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc; /* remove from inode's cap rbtree, and clear auth cap */ rb_erase(&cap->ci_node, &ci->i_caps); @@ -1169,7 +1169,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) * keep i_snap_realm. */ if (ci->i_wr_ref == 0 && ci->i_snap_realm) - ceph_change_snap_realm(&ci->vfs_inode, NULL); + ceph_change_snap_realm(&ci->netfs.inode, NULL); __cap_delay_cancel(mdsc, ci); } @@ -1188,11 +1188,11 @@ void ceph_remove_cap(struct ceph_cap *cap, bool queue_release) lockdep_assert_held(&ci->i_ceph_lock); - fsc = ceph_inode_to_client(&ci->vfs_inode); + fsc = ceph_inode_to_client(&ci->netfs.inode); WARN_ON_ONCE(ci->i_auth_cap == cap && !list_empty(&ci->i_dirty_item) && !fsc->blocklisted && - !ceph_inode_is_shutdown(&ci->vfs_inode)); + !ceph_inode_is_shutdown(&ci->netfs.inode)); __ceph_remove_cap(cap, queue_release); } @@ -1343,7 +1343,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap, int flushing, u64 flush_tid, u64 oldest_flush_tid) { struct ceph_inode_info *ci = cap->ci; - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; int held, revoking; lockdep_assert_held(&ci->i_ceph_lock); @@ -1440,7 +1440,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap, static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci) { struct ceph_msg *msg; - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, CAP_MSG_SIZE, GFP_NOFS, false); if (!msg) { @@ -1528,7 +1528,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci, __releases(ci->i_ceph_lock) __acquires(ci->i_ceph_lock) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_mds_client *mdsc = session->s_mdsc; struct ceph_cap_snap *capsnap; u64 oldest_flush_tid = 0; @@ -1622,7 +1622,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci, void ceph_flush_snaps(struct ceph_inode_info *ci, struct ceph_mds_session **psession) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; struct ceph_mds_session *session = NULL; int mds; @@ -1682,8 +1682,8 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, struct ceph_cap_flush **pcf) { struct ceph_mds_client *mdsc = - ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; - struct inode *inode = &ci->vfs_inode; + ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc; + struct inode *inode = &ci->netfs.inode; int was = ci->i_dirty_caps; int dirty = 0; @@ -1696,7 +1696,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, return 0; } - dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, + dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->netfs.inode, ceph_cap_string(mask), ceph_cap_string(was), ceph_cap_string(was | mask)); ci->i_dirty_caps |= mask; @@ -1712,7 +1712,7 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, ci->i_snap_realm->cached_context); } dout(" inode %p now dirty snapc %p auth cap %p\n", - &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap); + &ci->netfs.inode, ci->i_head_snapc, ci->i_auth_cap); BUG_ON(!list_empty(&ci->i_dirty_item)); spin_lock(&mdsc->cap_dirty_lock); list_add(&ci->i_dirty_item, &session->s_cap_dirty); @@ -1875,7 +1875,7 @@ static int try_nonblocking_invalidate(struct inode *inode) bool __ceph_should_report_size(struct ceph_inode_info *ci) { - loff_t size = i_size_read(&ci->vfs_inode); + loff_t size = i_size_read(&ci->netfs.inode); /* mds will adjust max size according to the reported size */ if (ci->i_flushing_caps & CEPH_CAP_FILE_WR) return false; @@ -1900,7 +1900,7 @@ bool __ceph_should_report_size(struct ceph_inode_info *ci) void ceph_check_caps(struct ceph_inode_info *ci, int flags, struct ceph_mds_session *session) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); struct ceph_cap *cap; u64 flush_tid, oldest_flush_tid; @@ -2467,7 +2467,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc, __releases(ci->i_ceph_lock) __acquires(ci->i_ceph_lock) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_cap *cap; struct ceph_cap_flush *cf; int ret; @@ -2560,7 +2560,7 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, cap = ci->i_auth_cap; if (!(cap && cap->session == session)) { pr_err("%p auth cap %p not mds%d ???\n", - &ci->vfs_inode, cap, session->s_mds); + &ci->netfs.inode, cap, session->s_mds); spin_unlock(&ci->i_ceph_lock); continue; } @@ -2610,7 +2610,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, cap = ci->i_auth_cap; if (!(cap && cap->session == session)) { pr_err("%p auth cap %p not mds%d ???\n", - &ci->vfs_inode, cap, session->s_mds); + &ci->netfs.inode, cap, session->s_mds); spin_unlock(&ci->i_ceph_lock); continue; } @@ -2630,7 +2630,7 @@ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session, lockdep_assert_held(&ci->i_ceph_lock); - dout("%s %p flushing %s\n", __func__, &ci->vfs_inode, + dout("%s %p flushing %s\n", __func__, &ci->netfs.inode, ceph_cap_string(ci->i_flushing_caps)); if (!list_empty(&ci->i_cap_flush_list)) { @@ -2673,10 +2673,10 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got, } if (got & CEPH_CAP_FILE_BUFFER) { if (ci->i_wb_ref == 0) - ihold(&ci->vfs_inode); + ihold(&ci->netfs.inode); ci->i_wb_ref++; dout("%s %p wb %d -> %d (?)\n", __func__, - &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); + &ci->netfs.inode, ci->i_wb_ref-1, ci->i_wb_ref); } } @@ -3004,7 +3004,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got return ret; } - if (S_ISREG(ci->vfs_inode.i_mode) && + if (S_ISREG(ci->netfs.inode.i_mode) && ci->i_inline_version != CEPH_INLINE_NONE && (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && i_size_read(inode) > 0) { @@ -3094,7 +3094,7 @@ enum put_cap_refs_mode { static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had, enum put_cap_refs_mode mode) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; int last = 0, put = 0, flushsnaps = 0, wake = 0; bool check_flushsnaps = false; @@ -3202,7 +3202,7 @@ void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had) void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, struct ceph_snap_context *snapc) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_cap_snap *capsnap = NULL, *iter; int put = 0; bool last = false; @@ -3698,7 +3698,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, session->s_mds, &list_first_entry(&session->s_cap_flushing, struct ceph_inode_info, - i_flushing_item)->vfs_inode); + i_flushing_item)->netfs.inode); } } mdsc->num_cap_flushing--; @@ -4345,7 +4345,7 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc) break; list_del_init(&ci->i_cap_delay_list); - inode = igrab(&ci->vfs_inode); + inode = igrab(&ci->netfs.inode); if (inode) { spin_unlock(&mdsc->cap_delay_lock); dout("check_delayed_caps on %p\n", inode); @@ -4373,7 +4373,7 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s) while (!list_empty(&s->s_cap_dirty)) { ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info, i_dirty_item); - inode = &ci->vfs_inode; + inode = &ci->netfs.inode; ihold(inode); dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode)); spin_unlock(&mdsc->cap_dirty_lock); @@ -4407,7 +4407,7 @@ void __ceph_touch_fmode(struct ceph_inode_info *ci, void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count) { - struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb); + struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); int bits = (fmode << 1) | 1; bool already_opened = false; int i; @@ -4441,7 +4441,7 @@ void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count) */ void ceph_put_fmode(struct ceph_inode_info *ci, int fmode, int count) { - struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->vfs_inode.i_sb); + struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); int bits = (fmode << 1) | 1; bool is_closed = true; int i; @@ -4656,7 +4656,7 @@ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invali lockdep_assert_held(&ci->i_ceph_lock); dout("removing cap %p, ci is %p, inode is %p\n", - cap, ci, &ci->vfs_inode); + cap, ci, &ci->netfs.inode); is_auth = (cap == ci->i_auth_cap); __ceph_remove_cap(cap, false); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 8c8226c0feac..da59e836a06e 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -205,7 +205,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file, { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mount_options *opt = - ceph_inode_to_client(&ci->vfs_inode)->mount_options; + ceph_inode_to_client(&ci->netfs.inode)->mount_options; struct ceph_file_info *fi; int ret; diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index b7e9cac3aeef..650746b3ba99 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -176,7 +176,7 @@ static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, rb_insert_color(&frag->node, &ci->i_fragtree); dout("get_or_create_frag added %llx.%llx frag %x\n", - ceph_vinop(&ci->vfs_inode), f); + ceph_vinop(&ci->netfs.inode), f); return frag; } @@ -457,10 +457,10 @@ struct inode *ceph_alloc_inode(struct super_block *sb) if (!ci) return NULL; - dout("alloc_inode %p\n", &ci->vfs_inode); + dout("alloc_inode %p\n", &ci->netfs.inode); /* Set parameters for the netfs library */ - netfs_i_context_init(&ci->vfs_inode, &ceph_netfs_ops); + netfs_inode_init(&ci->netfs.inode, &ceph_netfs_ops); spin_lock_init(&ci->i_ceph_lock); @@ -547,7 +547,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb) INIT_WORK(&ci->i_work, ceph_inode_work); ci->i_work_mask = 0; memset(&ci->i_btime, '\0', sizeof(ci->i_btime)); - return &ci->vfs_inode; + return &ci->netfs.inode; } void ceph_free_inode(struct inode *inode) @@ -1978,7 +1978,7 @@ static void ceph_inode_work(struct work_struct *work) { struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, i_work); - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) { dout("writeback %p\n", inode); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index f5d110d90b77..33f517d549ce 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1564,7 +1564,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session, p = session->s_caps.next; while (p != &session->s_caps) { cap = list_entry(p, struct ceph_cap, session_caps); - inode = igrab(&cap->ci->vfs_inode); + inode = igrab(&cap->ci->netfs.inode); if (!inode) { p = p->next; continue; @@ -1622,7 +1622,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, int iputs; dout("removing cap %p, ci is %p, inode is %p\n", - cap, ci, &ci->vfs_inode); + cap, ci, &ci->netfs.inode); spin_lock(&ci->i_ceph_lock); iputs = ceph_purge_inode_cap(inode, cap, &invalidate); spin_unlock(&ci->i_ceph_lock); diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 322ee5add942..864cdaa0d2bd 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -521,7 +521,7 @@ static bool has_new_snaps(struct ceph_snap_context *o, static void ceph_queue_cap_snap(struct ceph_inode_info *ci, struct ceph_cap_snap **pcapsnap) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_snap_context *old_snapc, *new_snapc; struct ceph_cap_snap *capsnap = *pcapsnap; struct ceph_buffer *old_blob = NULL; @@ -652,7 +652,7 @@ update_snapc: int __ceph_finish_cap_snap(struct ceph_inode_info *ci, struct ceph_cap_snap *capsnap) { - struct inode *inode = &ci->vfs_inode; + struct inode *inode = &ci->netfs.inode; struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); BUG_ON(capsnap->writing); @@ -712,7 +712,7 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm) spin_lock(&realm->inodes_with_caps_lock); list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) { - struct inode *inode = igrab(&ci->vfs_inode); + struct inode *inode = igrab(&ci->netfs.inode); if (!inode) continue; spin_unlock(&realm->inodes_with_caps_lock); @@ -904,7 +904,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc) while (!list_empty(&mdsc->snap_flush_list)) { ci = list_first_entry(&mdsc->snap_flush_list, struct ceph_inode_info, i_snap_flush_item); - inode = &ci->vfs_inode; + inode = &ci->netfs.inode; ihold(inode); spin_unlock(&mdsc->snap_flush_lock); ceph_flush_snaps(ci, &session); diff --git a/fs/ceph/super.c b/fs/ceph/super.c index b73b4f75462c..40140805bdcf 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -876,7 +876,7 @@ mempool_t *ceph_wb_pagevec_pool; static void ceph_inode_init_once(void *foo) { struct ceph_inode_info *ci = foo; - inode_init_once(&ci->vfs_inode); + inode_init_once(&ci->netfs.inode); } static int __init init_caches(void) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index dd7dac0f984a..f59dac66955b 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -316,11 +316,7 @@ struct ceph_inode_xattrs_info { * Ceph inode. */ struct ceph_inode_info { - struct { - /* These must be contiguous */ - struct inode vfs_inode; - struct netfs_i_context netfs_ctx; /* Netfslib context */ - }; + struct netfs_inode netfs; /* Netfslib context and vfs inode */ struct ceph_vino i_vino; /* ceph ino + snap */ spinlock_t i_ceph_lock; @@ -436,7 +432,7 @@ struct ceph_inode_info { static inline struct ceph_inode_info * ceph_inode(const struct inode *inode) { - return container_of(inode, struct ceph_inode_info, vfs_inode); + return container_of(inode, struct ceph_inode_info, netfs.inode); } static inline struct ceph_fs_client * @@ -1316,7 +1312,7 @@ static inline void __ceph_update_quota(struct ceph_inode_info *ci, has_quota = __ceph_has_quota(ci, QUOTA_GET_ANY); if (had_quota != has_quota) - ceph_adjust_quota_realms_count(&ci->vfs_inode, has_quota); + ceph_adjust_quota_realms_count(&ci->netfs.inode, has_quota); } extern void ceph_handle_quota(struct ceph_mds_client *mdsc, diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 8c2dc2c762a4..f141f5246163 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -57,7 +57,7 @@ static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci) static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val, size_t size) { - struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); + struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); struct ceph_osd_client *osdc = &fsc->client->osdc; struct ceph_string *pool_ns; s64 pool = ci->i_layout.pool_id; @@ -69,7 +69,7 @@ static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val, pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); - dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode); + dout("ceph_vxattrcb_layout %p\n", &ci->netfs.inode); down_read(&osdc->lock); pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); if (pool_name) { @@ -161,7 +161,7 @@ static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci, char *val, size_t size) { ssize_t ret; - struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); + struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); struct ceph_osd_client *osdc = &fsc->client->osdc; s64 pool = ci->i_layout.pool_id; const char *pool_name; @@ -313,7 +313,7 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val, static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci, char *val, size_t size) { - struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); + struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid); } @@ -321,7 +321,7 @@ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci, static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci, char *val, size_t size) { - struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); + struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); return ceph_fmt_xattr(val, size, "client%lld", ceph_client_gid(fsc->client)); @@ -629,7 +629,7 @@ static int __set_xattr(struct ceph_inode_info *ci, } dout("__set_xattr_val added %llx.%llx xattr %p %.*s=%.*s\n", - ceph_vinop(&ci->vfs_inode), xattr, name_len, name, val_len, val); + ceph_vinop(&ci->netfs.inode), xattr, name_len, name, val_len, val); return 0; } @@ -871,7 +871,7 @@ struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci) struct ceph_buffer *old_blob = NULL; void *dest; - dout("__build_xattrs_blob %p\n", &ci->vfs_inode); + dout("__build_xattrs_blob %p\n", &ci->netfs.inode); if (ci->i_xattrs.dirty) { int need = __get_required_blob_size(ci, 0, 0); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 12c872800326..c85d9a378325 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -377,7 +377,7 @@ cifs_alloc_inode(struct super_block *sb) cifs_inode->flags = 0; spin_lock_init(&cifs_inode->writers_lock); cifs_inode->writers = 0; - cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ + cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */ cifs_inode->server_eof = 0; cifs_inode->uniqueid = 0; cifs_inode->createtime = 0; @@ -389,12 +389,12 @@ cifs_alloc_inode(struct super_block *sb) * Can not set i_flags here - they get immediately overwritten to zero * by the VFS. */ - /* cifs_inode->vfs_inode.i_flags = S_NOATIME | S_NOCMTIME; */ + /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */ INIT_LIST_HEAD(&cifs_inode->openFileList); INIT_LIST_HEAD(&cifs_inode->llist); INIT_LIST_HEAD(&cifs_inode->deferred_closes); spin_lock_init(&cifs_inode->deferred_lock); - return &cifs_inode->vfs_inode; + return &cifs_inode->netfs.inode; } static void @@ -1418,7 +1418,7 @@ cifs_init_once(void *inode) { struct cifsInodeInfo *cifsi = inode; - inode_init_once(&cifsi->vfs_inode); + inode_init_once(&cifsi->netfs.inode); init_rwsem(&cifsi->lock_sem); } diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index f873379066c7..e7737166e5b8 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1479,20 +1479,16 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file); #define CIFS_CACHE_RW_FLG (CIFS_CACHE_READ_FLG | CIFS_CACHE_WRITE_FLG) #define CIFS_CACHE_RHW_FLG (CIFS_CACHE_RW_FLG | CIFS_CACHE_HANDLE_FLG) -#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->vfs_inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)) +#define CIFS_CACHE_READ(cinode) ((cinode->oplock & CIFS_CACHE_READ_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)) #define CIFS_CACHE_HANDLE(cinode) (cinode->oplock & CIFS_CACHE_HANDLE_FLG) -#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->vfs_inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)) +#define CIFS_CACHE_WRITE(cinode) ((cinode->oplock & CIFS_CACHE_WRITE_FLG) || (CIFS_SB(cinode->netfs.inode.i_sb)->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)) /* * One of these for each file inode */ struct cifsInodeInfo { - struct { - /* These must be contiguous */ - struct inode vfs_inode; /* the VFS's inode record */ - struct netfs_i_context netfs_ctx; /* Netfslib context */ - }; + struct netfs_inode netfs; /* Netfslib context and vfs inode */ bool can_cache_brlcks; struct list_head llist; /* locks helb by this inode */ /* @@ -1531,7 +1527,7 @@ struct cifsInodeInfo { static inline struct cifsInodeInfo * CIFS_I(struct inode *inode) { - return container_of(inode, struct cifsInodeInfo, vfs_inode); + return container_of(inode, struct cifsInodeInfo, netfs.inode); } static inline struct cifs_sb_info * diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 1618e0537d58..e64cda7a7610 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2004,7 +2004,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only) { struct cifsFileInfo *open_file = NULL; - struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); + struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); /* only filter by fsuid on multiuser mounts */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) @@ -2060,7 +2060,7 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags, return rc; } - cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); + cifs_sb = CIFS_SB(cifs_inode->netfs.inode.i_sb); /* only filter by fsuid on multiuser mounts */ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) @@ -4669,14 +4669,14 @@ bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file) /* This inode is open for write at least once */ struct cifs_sb_info *cifs_sb; - cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb); + cifs_sb = CIFS_SB(cifsInode->netfs.inode.i_sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) { /* since no page cache to corrupt on directio we can change size safely */ return true; } - if (i_size_read(&cifsInode->vfs_inode) < end_of_file) + if (i_size_read(&cifsInode->netfs.inode) < end_of_file) return true; return false; diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index a638b29e9062..23ef56f55ce5 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -101,13 +101,13 @@ void cifs_fscache_get_inode_cookie(struct inode *inode) struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); - cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd); + cifs_fscache_fill_coherency(&cifsi->netfs.inode, &cd); - cifsi->netfs_ctx.cache = + cifsi->netfs.cache = fscache_acquire_cookie(tcon->fscache, 0, &cifsi->uniqueid, sizeof(cifsi->uniqueid), &cd, sizeof(cd), - i_size_read(&cifsi->vfs_inode)); + i_size_read(&cifsi->netfs.inode)); } void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) @@ -131,7 +131,7 @@ void cifs_fscache_release_inode_cookie(struct inode *inode) if (cookie) { cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cookie); fscache_relinquish_cookie(cookie, false); - cifsi->netfs_ctx.cache = NULL; + cifsi->netfs.cache = NULL; } } diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h index 52355c0912ae..ab9a51d0125c 100644 --- a/fs/cifs/fscache.h +++ b/fs/cifs/fscache.h @@ -52,10 +52,10 @@ void cifs_fscache_fill_coherency(struct inode *inode, struct cifsInodeInfo *cifsi = CIFS_I(inode); memset(cd, 0, sizeof(*cd)); - cd->last_write_time_sec = cpu_to_le64(cifsi->vfs_inode.i_mtime.tv_sec); - cd->last_write_time_nsec = cpu_to_le32(cifsi->vfs_inode.i_mtime.tv_nsec); - cd->last_change_time_sec = cpu_to_le64(cifsi->vfs_inode.i_ctime.tv_sec); - cd->last_change_time_nsec = cpu_to_le32(cifsi->vfs_inode.i_ctime.tv_nsec); + cd->last_write_time_sec = cpu_to_le64(cifsi->netfs.inode.i_mtime.tv_sec); + cd->last_write_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_mtime.tv_nsec); + cd->last_change_time_sec = cpu_to_le64(cifsi->netfs.inode.i_ctime.tv_sec); + cd->last_change_time_nsec = cpu_to_le32(cifsi->netfs.inode.i_ctime.tv_nsec); } diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 2f9e7d2f81b6..81da81e18553 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -115,7 +115,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr) __func__, cifs_i->uniqueid); set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags); /* Invalidate fscache cookie */ - cifs_fscache_fill_coherency(&cifs_i->vfs_inode, &cd); + cifs_fscache_fill_coherency(&cifs_i->netfs.inode, &cd); fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0); } @@ -2499,7 +2499,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start, u64 len) { struct cifsInodeInfo *cifs_i = CIFS_I(inode); - struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->vfs_inode.i_sb); + struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_i->netfs.inode.i_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct TCP_Server_Info *server = tcon->ses->server; struct cifsFileInfo *cfile; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 35962a1a23b9..cbc3b433f502 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -537,11 +537,11 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock) if (oplock == OPLOCK_EXCLUSIVE) { cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n", - &cinode->vfs_inode); + &cinode->netfs.inode); } else if (oplock == OPLOCK_READ) { cinode->oplock = CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Level II Oplock granted on inode %p\n", - &cinode->vfs_inode); + &cinode->netfs.inode); } else cinode->oplock = 0; } diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 98a76fa791c0..8543cafdfd34 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -4260,15 +4260,15 @@ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, if (oplock == SMB2_OPLOCK_LEVEL_BATCH) { cinode->oplock = CIFS_CACHE_RHW_FLG; cifs_dbg(FYI, "Batch Oplock granted on inode %p\n", - &cinode->vfs_inode); + &cinode->netfs.inode); } else if (oplock == SMB2_OPLOCK_LEVEL_EXCLUSIVE) { cinode->oplock = CIFS_CACHE_RW_FLG; cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n", - &cinode->vfs_inode); + &cinode->netfs.inode); } else if (oplock == SMB2_OPLOCK_LEVEL_II) { cinode->oplock = CIFS_CACHE_READ_FLG; cifs_dbg(FYI, "Level II Oplock granted on inode %p\n", - &cinode->vfs_inode); + &cinode->netfs.inode); } else cinode->oplock = 0; } @@ -4307,7 +4307,7 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock, cinode->oplock = new_oplock; cifs_dbg(FYI, "%s Lease granted on inode %p\n", message, - &cinode->vfs_inode); + &cinode->netfs.inode); } static void diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 360ce3604a2d..e6b932219803 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -1549,7 +1549,7 @@ static int __ext2_write_inode(struct inode *inode, int do_sync) if (IS_ERR(raw_inode)) return -EIO; - /* For fields not not tracking in the in-memory inode, + /* For fields not tracking in the in-memory inode, * initialise them to zero for new inodes. */ if (ei->i_state & EXT2_STATE_NEW) memset(raw_inode, 0, EXT2_SB(sb)->s_inode_size); diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index a21d8f1a56d1..05221366a16d 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -120,6 +120,7 @@ static bool inode_io_list_move_locked(struct inode *inode, struct list_head *head) { assert_spin_locked(&wb->list_lock); + assert_spin_locked(&inode->i_lock); list_move(&inode->i_io_list, head); @@ -1365,9 +1366,9 @@ static int move_expired_inodes(struct list_head *delaying_queue, inode = wb_inode(delaying_queue->prev); if (inode_dirtied_after(inode, dirtied_before)) break; + spin_lock(&inode->i_lock); list_move(&inode->i_io_list, &tmp); moved++; - spin_lock(&inode->i_lock); inode->i_state |= I_SYNC_QUEUED; spin_unlock(&inode->i_lock); if (sb_is_blkdev_sb(inode->i_sb)) @@ -1383,7 +1384,12 @@ static int move_expired_inodes(struct list_head *delaying_queue, goto out; } - /* Move inodes from one superblock together */ + /* + * Although inode's i_io_list is moved from 'tmp' to 'dispatch_queue', + * we don't take inode->i_lock here because it is just a pointless overhead. + * Inode is already marked as I_SYNC_QUEUED so writeback list handling is + * fully under our control. + */ while (!list_empty(&tmp)) { sb = wb_inode(tmp.prev)->i_sb; list_for_each_prev_safe(pos, node, &tmp) { @@ -1826,8 +1832,8 @@ static long writeback_sb_inodes(struct super_block *sb, * We'll have another go at writing back this inode * when we completed a full scan of b_io. */ - spin_unlock(&inode->i_lock); requeue_io(inode, wb); + spin_unlock(&inode->i_lock); trace_writeback_sb_inodes_requeue(inode); continue; } @@ -2358,6 +2364,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) { struct super_block *sb = inode->i_sb; int dirtytime = 0; + struct bdi_writeback *wb = NULL; trace_writeback_mark_inode_dirty(inode, flags); @@ -2410,13 +2417,24 @@ void __mark_inode_dirty(struct inode *inode, int flags) inode->i_state |= flags; /* + * Grab inode's wb early because it requires dropping i_lock and we + * need to make sure following checks happen atomically with dirty + * list handling so that we don't move inodes under flush worker's + * hands. + */ + if (!was_dirty) { + wb = locked_inode_to_wb_and_lock_list(inode); + spin_lock(&inode->i_lock); + } + + /* * If the inode is queued for writeback by flush worker, just * update its dirty state. Once the flush worker is done with * the inode it will place it on the appropriate superblock * list, based upon its state. */ if (inode->i_state & I_SYNC_QUEUED) - goto out_unlock_inode; + goto out_unlock; /* * Only add valid (hashed) inodes to the superblock's @@ -2424,22 +2442,19 @@ void __mark_inode_dirty(struct inode *inode, int flags) */ if (!S_ISBLK(inode->i_mode)) { if (inode_unhashed(inode)) - goto out_unlock_inode; + goto out_unlock; } if (inode->i_state & I_FREEING) - goto out_unlock_inode; + goto out_unlock; /* * If the inode was already on b_dirty/b_io/b_more_io, don't * reposition it (that would break b_dirty time-ordering). */ if (!was_dirty) { - struct bdi_writeback *wb; struct list_head *dirty_list; bool wakeup_bdi = false; - wb = locked_inode_to_wb_and_lock_list(inode); - inode->dirtied_when = jiffies; if (dirtytime) inode->dirtied_time_when = jiffies; @@ -2453,6 +2468,7 @@ void __mark_inode_dirty(struct inode *inode, int flags) dirty_list); spin_unlock(&wb->list_lock); + spin_unlock(&inode->i_lock); trace_writeback_dirty_inode_enqueue(inode); /* @@ -2467,6 +2483,9 @@ void __mark_inode_dirty(struct inode *inode, int flags) return; } } +out_unlock: + if (wb) + spin_unlock(&wb->list_lock); out_unlock_inode: spin_unlock(&inode->i_lock); } diff --git a/fs/inode.c b/fs/inode.c index 9d9b422504d1..bd4da9c5207e 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -27,7 +27,7 @@ * Inode locking rules: * * inode->i_lock protects: - * inode->i_state, inode->i_hash, __iget() + * inode->i_state, inode->i_hash, __iget(), inode->i_io_list * Inode LRU list locks protect: * inode->i_sb->s_inode_lru, inode->i_lru * inode->i_sb->s_inode_list_lock protects: diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 8742d22dfd2b..d37e012386f3 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -155,7 +155,7 @@ static void netfs_rreq_expand(struct netfs_io_request *rreq, void netfs_readahead(struct readahead_control *ractl) { struct netfs_io_request *rreq; - struct netfs_i_context *ctx = netfs_i_context(ractl->mapping->host); + struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); int ret; _enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); @@ -215,7 +215,7 @@ int netfs_read_folio(struct file *file, struct folio *folio) { struct address_space *mapping = folio_file_mapping(folio); struct netfs_io_request *rreq; - struct netfs_i_context *ctx = netfs_i_context(mapping->host); + struct netfs_inode *ctx = netfs_inode(mapping->host); int ret; _enter("%lx", folio_index(folio)); @@ -331,7 +331,7 @@ int netfs_write_begin(struct file *file, struct address_space *mapping, void **_fsdata) { struct netfs_io_request *rreq; - struct netfs_i_context *ctx = netfs_i_context(file_inode(file )); + struct netfs_inode *ctx = netfs_inode(file_inode(file )); struct folio *folio; unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE; pgoff_t index = pos >> PAGE_SHIFT; diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index b7b0e3d18d9e..43fac1b14e40 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -91,7 +91,7 @@ static inline void netfs_stat_d(atomic_t *stat) /* * Miscellaneous functions. */ -static inline bool netfs_is_cache_enabled(struct netfs_i_context *ctx) +static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) { #if IS_ENABLED(CONFIG_FSCACHE) struct fscache_cookie *cookie = ctx->cache; diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index e86107b30ba4..c6afa605b63b 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -18,7 +18,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, { static atomic_t debug_ids; struct inode *inode = file ? file_inode(file) : mapping->host; - struct netfs_i_context *ctx = netfs_i_context(inode); + struct netfs_inode *ctx = netfs_inode(inode); struct netfs_io_request *rreq; int ret; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index a74aef99bd3d..09d1307959d0 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -79,6 +79,7 @@ #include <linux/capability.h> #include <linux/quotaops.h> #include <linux/blkdev.h> +#include <linux/sched/mm.h> #include "../internal.h" /* ugh */ #include <linux/uaccess.h> @@ -425,9 +426,11 @@ EXPORT_SYMBOL(mark_info_dirty); int dquot_acquire(struct dquot *dquot) { int ret = 0, ret2 = 0; + unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); + memalloc = memalloc_nofs_save(); if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { ret = dqopt->ops[dquot->dq_id.type]->read_dqblk(dquot); if (ret < 0) @@ -458,6 +461,7 @@ int dquot_acquire(struct dquot *dquot) smp_mb__before_atomic(); set_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_iolock: + memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } @@ -469,9 +473,11 @@ EXPORT_SYMBOL(dquot_acquire); int dquot_commit(struct dquot *dquot) { int ret = 0; + unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); + memalloc = memalloc_nofs_save(); if (!clear_dquot_dirty(dquot)) goto out_lock; /* Inactive dquot can be only if there was error during read/init @@ -481,6 +487,7 @@ int dquot_commit(struct dquot *dquot) else ret = -EIO; out_lock: + memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } @@ -492,9 +499,11 @@ EXPORT_SYMBOL(dquot_commit); int dquot_release(struct dquot *dquot) { int ret = 0, ret2 = 0; + unsigned int memalloc; struct quota_info *dqopt = sb_dqopt(dquot->dq_sb); mutex_lock(&dquot->dq_lock); + memalloc = memalloc_nofs_save(); /* Check whether we are not racing with some other dqget() */ if (dquot_is_busy(dquot)) goto out_dqlock; @@ -510,6 +519,7 @@ int dquot_release(struct dquot *dquot) } clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_dqlock: + memalloc_nofs_restore(memalloc); mutex_unlock(&dquot->dq_lock); return ret; } diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 302506bbc2a4..8e47d483b524 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -44,6 +44,7 @@ mandatory-y += msi.h mandatory-y += pci.h mandatory-y += percpu.h mandatory-y += pgalloc.h +mandatory-y += platform-feature.h mandatory-y += preempt.h mandatory-y += rwonce.h mandatory-y += sections.h diff --git a/include/asm-generic/platform-feature.h b/include/asm-generic/platform-feature.h new file mode 100644 index 000000000000..4b0af3d51588 --- /dev/null +++ b/include/asm-generic/platform-feature.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_PLATFORM_FEATURE_H +#define _ASM_GENERIC_PLATFORM_FEATURE_H + +/* Number of arch specific feature flags. */ +#define PLATFORM_ARCH_FEAT_N 0 + +#endif /* _ASM_GENERIC_PLATFORM_FEATURE_H */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 732de9014626..0f2a59c9c735 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -822,7 +822,6 @@ struct ata_port { struct ata_queued_cmd qcmd[ATA_MAX_QUEUE + 1]; u64 qc_active; int nr_active_links; /* #links with active qcs */ - unsigned int sas_last_tag; /* track next tag hw expects */ struct ata_link link; /* host default link */ struct ata_link *slave_link; /* see ata_slave_link_init() */ diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 77fa6a61706a..6dbb4c9ce50d 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -119,9 +119,10 @@ typedef void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error, bool was_async); /* - * Per-inode description. This must be directly after the inode struct. + * Per-inode context. This wraps the VFS inode. */ -struct netfs_i_context { +struct netfs_inode { + struct inode inode; /* The VFS inode */ const struct netfs_request_ops *ops; #if IS_ENABLED(CONFIG_FSCACHE) struct fscache_cookie *cache; @@ -256,7 +257,7 @@ struct netfs_cache_ops { * boundary as appropriate. */ enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, - loff_t i_size); + loff_t i_size); /* Prepare a write operation, working out what part of the write we can * actually do. @@ -288,45 +289,35 @@ extern void netfs_put_subrequest(struct netfs_io_subrequest *subreq, extern void netfs_stats_show(struct seq_file *); /** - * netfs_i_context - Get the netfs inode context from the inode + * netfs_inode - Get the netfs inode context from the inode * @inode: The inode to query * * Get the netfs lib inode context from the network filesystem's inode. The * context struct is expected to directly follow on from the VFS inode struct. */ -static inline struct netfs_i_context *netfs_i_context(struct inode *inode) +static inline struct netfs_inode *netfs_inode(struct inode *inode) { - return (void *)inode + sizeof(*inode); + return container_of(inode, struct netfs_inode, inode); } /** - * netfs_inode - Get the netfs inode from the inode context - * @ctx: The context to query - * - * Get the netfs inode from the netfs library's inode context. The VFS inode - * is expected to directly precede the context struct. - */ -static inline struct inode *netfs_inode(struct netfs_i_context *ctx) -{ - return (void *)ctx - sizeof(struct inode); -} - -/** - * netfs_i_context_init - Initialise a netfs lib context + * netfs_inode_init - Initialise a netfslib inode context * @inode: The inode with which the context is associated * @ops: The netfs's operations list * * Initialise the netfs library context struct. This is expected to follow on * directly from the VFS inode struct. */ -static inline void netfs_i_context_init(struct inode *inode, - const struct netfs_request_ops *ops) +static inline void netfs_inode_init(struct inode *inode, + const struct netfs_request_ops *ops) { - struct netfs_i_context *ctx = netfs_i_context(inode); + struct netfs_inode *ctx = netfs_inode(inode); - memset(ctx, 0, sizeof(*ctx)); ctx->ops = ops; ctx->remote_i_size = i_size_read(inode); +#if IS_ENABLED(CONFIG_FSCACHE) + ctx->cache = NULL; +#endif } /** @@ -338,7 +329,7 @@ static inline void netfs_i_context_init(struct inode *inode, */ static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size) { - struct netfs_i_context *ctx = netfs_i_context(inode); + struct netfs_inode *ctx = netfs_inode(inode); ctx->remote_i_size = new_i_size; } @@ -352,7 +343,7 @@ static inline void netfs_resize_file(struct inode *inode, loff_t new_i_size) static inline struct fscache_cookie *netfs_i_cookie(struct inode *inode) { #if IS_ENABLED(CONFIG_FSCACHE) - struct netfs_i_context *ctx = netfs_i_context(inode); + struct netfs_inode *ctx = netfs_inode(inode); return ctx->cache; #else return NULL; diff --git a/include/linux/platform-feature.h b/include/linux/platform-feature.h new file mode 100644 index 000000000000..b2f48be999fa --- /dev/null +++ b/include/linux/platform-feature.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PLATFORM_FEATURE_H +#define _PLATFORM_FEATURE_H + +#include <linux/bitops.h> +#include <asm/platform-feature.h> + +/* The platform features are starting with the architecture specific ones. */ + +/* Used to enable platform specific DMA handling for virtio devices. */ +#define PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS (0 + PLATFORM_ARCH_FEAT_N) + +#define PLATFORM_FEAT_N (1 + PLATFORM_ARCH_FEAT_N) + +void platform_set(unsigned int feature); +void platform_clear(unsigned int feature); +bool platform_has(unsigned int feature); + +#endif /* _PLATFORM_FEATURE_H */ diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 9a36051ceb76..49c7c32815f1 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h @@ -604,13 +604,4 @@ static inline void virtio_cwrite64(struct virtio_device *vdev, _r; \ }) -#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS -int arch_has_restricted_virtio_memory_access(void); -#else -static inline int arch_has_restricted_virtio_memory_access(void) -{ - return 0; -} -#endif /* CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS */ - #endif /* _LINUX_VIRTIO_CONFIG_H */ diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 021778a7e1af..6484095a8c01 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -612,5 +612,6 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, enum tc_setup_type type, void *data, struct flow_block_offload *bo, void (*cleanup)(struct flow_block_cb *block_cb)); +bool flow_indr_dev_exists(void); #endif /* _NET_FLOW_OFFLOAD_H */ diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 5b38bf1a586b..de9dcc5652c4 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -1063,7 +1063,7 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags); @@ -1079,7 +1079,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue, struct sk_buff *ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct rt6_info *rt, unsigned int flags, struct inet_cork_full *cork); diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 20af9d3557b9..279ae0fff7ad 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1090,7 +1090,6 @@ struct nft_stats { struct nft_hook { struct list_head list; - bool inactive; struct nf_hook_ops ops; struct rcu_head rcu; }; diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h index 797147843958..3568b6a2f5f0 100644 --- a/include/net/netfilter/nf_tables_offload.h +++ b/include/net/netfilter/nf_tables_offload.h @@ -92,7 +92,7 @@ int nft_flow_rule_offload_commit(struct net *net); NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \ memset(&(__reg)->mask, 0xff, (__reg)->len); -int nft_chain_offload_priority(struct nft_base_chain *basechain); +bool nft_chain_offload_support(const struct nft_base_chain *basechain); int nft_offload_init(void); void nft_offload_exit(void); diff --git a/include/uapi/linux/tls.h b/include/uapi/linux/tls.h index ac39328eabe7..bb8f80812b0b 100644 --- a/include/uapi/linux/tls.h +++ b/include/uapi/linux/tls.h @@ -39,7 +39,7 @@ /* TLS socket options */ #define TLS_TX 1 /* Set transmit parameters */ #define TLS_RX 2 /* Set receive parameters */ -#define TLS_TX_ZEROCOPY_SENDFILE 3 /* transmit zerocopy sendfile */ +#define TLS_TX_ZEROCOPY_RO 3 /* TX zerocopy (only sendfile now) */ /* Supported versions */ #define TLS_VERSION_MINOR(ver) ((ver) & 0xFF) @@ -161,7 +161,7 @@ enum { TLS_INFO_CIPHER, TLS_INFO_TXCONF, TLS_INFO_RXCONF, - TLS_INFO_ZC_SENDFILE, + TLS_INFO_ZC_RO_TX, __TLS_INFO_MAX, }; #define TLS_INFO_MAX (__TLS_INFO_MAX - 1) diff --git a/include/xen/arm/xen-ops.h b/include/xen/arm/xen-ops.h new file mode 100644 index 000000000000..b0766a660338 --- /dev/null +++ b/include/xen/arm/xen-ops.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_ARM_XEN_OPS_H +#define _ASM_ARM_XEN_OPS_H + +#include <xen/swiotlb-xen.h> +#include <xen/xen-ops.h> + +static inline void xen_setup_dma_ops(struct device *dev) +{ +#ifdef CONFIG_XEN + if (xen_is_grant_dma_device(dev)) + xen_grant_setup_dma_ops(dev); + else if (xen_swiotlb_detect()) + dev->dma_ops = &xen_swiotlb_dma_ops; +#endif +} + +#endif /* _ASM_ARM_XEN_OPS_H */ diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h index 527c9907f99c..e279be353e3f 100644 --- a/include/xen/grant_table.h +++ b/include/xen/grant_table.h @@ -127,10 +127,14 @@ int gnttab_try_end_foreign_access(grant_ref_t ref); */ int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head); +int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first); + void gnttab_free_grant_reference(grant_ref_t ref); void gnttab_free_grant_references(grant_ref_t head); +void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count); + int gnttab_empty_grant_references(const grant_ref_t *pprivate_head); int gnttab_claim_grant_reference(grant_ref_t *pprivate_head); diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index c7c1b46ff4cd..80546960f8b7 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -214,4 +214,17 @@ static inline void xen_preemptible_hcall_end(void) { } #endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */ +#ifdef CONFIG_XEN_GRANT_DMA_OPS +void xen_grant_setup_dma_ops(struct device *dev); +bool xen_is_grant_dma_device(struct device *dev); +#else +static inline void xen_grant_setup_dma_ops(struct device *dev) +{ +} +static inline bool xen_is_grant_dma_device(struct device *dev) +{ + return false; +} +#endif /* CONFIG_XEN_GRANT_DMA_OPS */ + #endif /* INCLUDE_XEN_OPS_H */ diff --git a/include/xen/xen.h b/include/xen/xen.h index a99bab817523..0780a81e140d 100644 --- a/include/xen/xen.h +++ b/include/xen/xen.h @@ -52,6 +52,14 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, extern u64 xen_saved_max_mem_size; #endif +#include <linux/platform-feature.h> + +static inline void xen_set_restricted_virtio_memory_access(void) +{ + if (IS_ENABLED(CONFIG_XEN_VIRTIO) && xen_domain()) + platform_set(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS); +} + #ifdef CONFIG_XEN_UNPOPULATED_ALLOC int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages); void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages); diff --git a/init/Kconfig b/init/Kconfig index c984afc489de..c7900e8975f1 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -885,6 +885,15 @@ config CC_IMPLICIT_FALLTHROUGH default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5) default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough) +# Currently, disable gcc-12 array-bounds globally. +# We may want to target only particular configurations some day. +config GCC12_NO_ARRAY_BOUNDS + def_bool y + +config CC_NO_ARRAY_BOUNDS + bool + default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS + # # For architectures that know their GCC __int128 support is sound # diff --git a/kernel/Makefile b/kernel/Makefile index 318789c728d3..a7e1f49ab2b3 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -7,7 +7,7 @@ obj-y = fork.o exec_domain.o panic.o \ cpu.o exit.o softirq.o resource.o \ sysctl.o capability.o ptrace.o user.o \ signal.o sys.o umh.o workqueue.o pid.o task_work.o \ - extable.o params.o \ + extable.o params.o platform-feature.o \ kthread.o sys_ni.o nsproxy.o \ notifier.o ksysfs.o cred.o reboot.o \ async.o range.o smpboot.o ucount.o regset.o diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 7bccaa4646e5..63d0ac7dfe2f 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -6054,6 +6054,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, struct bpf_reg_state *regs, bool ptr_to_mem_ok) { + enum bpf_prog_type prog_type = resolve_prog_type(env->prog); struct bpf_verifier_log *log = &env->log; u32 i, nargs, ref_id, ref_obj_id = 0; bool is_kfunc = btf_is_kernel(btf); @@ -6171,7 +6172,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, return -EINVAL; } /* rest of the arguments can be anything, like normal kfunc */ - } else if (btf_get_prog_ctx_type(log, btf, t, env->prog->type, i)) { + } else if (btf_get_prog_ctx_type(log, btf, t, prog_type, i)) { /* If function expects ctx type in BTF check that caller * is passing PTR_TO_CTX. */ diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index ac740630c79c..2caafd13f8aa 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -564,7 +564,7 @@ static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs) rc = active_cacheline_insert(entry); if (rc == -ENOMEM) { - pr_err("cacheline tracking ENOMEM, dma-debug disabled\n"); + pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { err_printk(entry->dev, entry, diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index dfa1de89dc94..cb50f8d38360 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -192,7 +192,7 @@ void __init swiotlb_update_mem_attributes(void) } static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, - unsigned long nslabs, bool late_alloc) + unsigned long nslabs, unsigned int flags, bool late_alloc) { void *vaddr = phys_to_virt(start); unsigned long bytes = nslabs << IO_TLB_SHIFT, i; @@ -203,8 +203,7 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start, mem->index = 0; mem->late_alloc = late_alloc; - if (swiotlb_force_bounce) - mem->force_bounce = true; + mem->force_bounce = swiotlb_force_bounce || (flags & SWIOTLB_FORCE); spin_lock_init(&mem->lock); for (i = 0; i < mem->nslabs; i++) { @@ -275,8 +274,7 @@ retry: panic("%s: Failed to allocate %zu bytes align=0x%lx\n", __func__, alloc_size, PAGE_SIZE); - swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, false); - mem->force_bounce = flags & SWIOTLB_FORCE; + swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false); if (flags & SWIOTLB_VERBOSE) swiotlb_print_info(); @@ -348,7 +346,7 @@ retry: set_memory_decrypted((unsigned long)vstart, (nslabs << IO_TLB_SHIFT) >> PAGE_SHIFT); - swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, true); + swiotlb_init_io_tlb_mem(mem, virt_to_phys(vstart), nslabs, 0, true); swiotlb_print_info(); return 0; @@ -835,8 +833,8 @@ static int rmem_swiotlb_device_init(struct reserved_mem *rmem, set_memory_decrypted((unsigned long)phys_to_virt(rmem->base), rmem->size >> PAGE_SHIFT); - swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, false); - mem->force_bounce = true; + swiotlb_init_io_tlb_mem(mem, rmem->base, nslabs, SWIOTLB_FORCE, + false); mem->for_alloc = true; rmem->priv = mem; diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c index 9d09f489b60e..2e0f75bcb7fd 100644 --- a/kernel/entry/kvm.c +++ b/kernel/entry/kvm.c @@ -9,12 +9,6 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work) int ret; if (ti_work & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) { - clear_notify_signal(); - if (task_work_pending(current)) - task_work_run(); - } - - if (ti_work & _TIF_SIGPENDING) { kvm_handle_signal_exit(vcpu); return -EINTR; } diff --git a/kernel/platform-feature.c b/kernel/platform-feature.c new file mode 100644 index 000000000000..cb6a6c3e4fed --- /dev/null +++ b/kernel/platform-feature.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bitops.h> +#include <linux/cache.h> +#include <linux/export.h> +#include <linux/platform-feature.h> + +#define PLATFORM_FEAT_ARRAY_SZ BITS_TO_LONGS(PLATFORM_FEAT_N) +static unsigned long __read_mostly platform_features[PLATFORM_FEAT_ARRAY_SZ]; + +void platform_set(unsigned int feature) +{ + set_bit(feature, platform_features); +} +EXPORT_SYMBOL_GPL(platform_set); + +void platform_clear(unsigned int feature) +{ + clear_bit(feature, platform_features); +} +EXPORT_SYMBOL_GPL(platform_clear); + +bool platform_has(unsigned int feature) +{ + return test_bit(feature, platform_features); +} +EXPORT_SYMBOL_GPL(platform_has); diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 10b157a6d73e..7a13e6ac6327 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2263,11 +2263,11 @@ static int copy_user_syms(struct user_syms *us, unsigned long __user *usyms, u32 int err = -ENOMEM; unsigned int i; - syms = kvmalloc(cnt * sizeof(*syms), GFP_KERNEL); + syms = kvmalloc_array(cnt, sizeof(*syms), GFP_KERNEL); if (!syms) goto error; - buf = kvmalloc(cnt * KSYM_NAME_LEN, GFP_KERNEL); + buf = kvmalloc_array(cnt, KSYM_NAME_LEN, GFP_KERNEL); if (!buf) goto error; @@ -2464,7 +2464,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr return -EINVAL; size = cnt * sizeof(*addrs); - addrs = kvmalloc(size, GFP_KERNEL); + addrs = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); if (!addrs) return -ENOMEM; @@ -2489,7 +2489,7 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies); if (ucookies) { - cookies = kvmalloc(size, GFP_KERNEL); + cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL); if (!cookies) { err = -ENOMEM; goto error; diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c index 73f68d4625f3..929f6379a279 100644 --- a/net/core/flow_offload.c +++ b/net/core/flow_offload.c @@ -595,3 +595,9 @@ int flow_indr_dev_setup_offload(struct net_device *dev, struct Qdisc *sch, return (bo && list_empty(&bo->cb_list)) ? -EOPNOTSUPP : count; } EXPORT_SYMBOL(flow_indr_dev_setup_offload); + +bool flow_indr_dev_exists(void) +{ + return !list_empty(&flow_block_indr_dev_list); +} +EXPORT_SYMBOL(flow_indr_dev_exists); diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index e8de5e699b3f..545f91b6cb5e 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -1026,10 +1026,12 @@ void __init inet_hashinfo2_init(struct inet_hashinfo *h, const char *name, init_hashinfo_lhash2(h); /* this one is used for source ports of outgoing connections */ - table_perturb = kmalloc_array(INET_TABLE_PERTURB_SIZE, - sizeof(*table_perturb), GFP_KERNEL); - if (!table_perturb) - panic("TCP: failed to alloc table_perturb"); + table_perturb = alloc_large_system_hash("Table-perturb", + sizeof(*table_perturb), + INET_TABLE_PERTURB_SIZE, + 0, 0, NULL, NULL, + INET_TABLE_PERTURB_SIZE, + INET_TABLE_PERTURB_SIZE); } int inet_hashinfo2_init_mod(struct inet_hashinfo *h) diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 7e474a85deaf..3b9cd487075a 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -629,21 +629,20 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb, } if (dev->header_ops) { - const int pull_len = tunnel->hlen + sizeof(struct iphdr); - if (skb_cow_head(skb, 0)) goto free_skb; tnl_params = (const struct iphdr *)skb->data; - if (pull_len > skb_transport_offset(skb)) - goto free_skb; - /* Pull skb since ip_tunnel_xmit() needs skb->data pointing * to gre header. */ - skb_pull(skb, pull_len); + skb_pull(skb, tunnel->hlen + sizeof(struct iphdr)); skb_reset_mac_header(skb); + + if (skb->ip_summed == CHECKSUM_PARTIAL && + skb_checksum_start(skb) < skb->data) + goto free_skb; } else { if (skb_cow_head(skb, dev->needed_headroom)) goto free_skb; diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c index 2fe5860c21d6..b146ce88c5d0 100644 --- a/net/ipv4/xfrm4_protocol.c +++ b/net/ipv4/xfrm4_protocol.c @@ -304,4 +304,3 @@ void __init xfrm4_protocol_init(void) { xfrm_input_register_afinfo(&xfrm4_input_afinfo); } -EXPORT_SYMBOL(xfrm4_protocol_init); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 4081b12a01ff..77e3f5970ce4 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1450,7 +1450,7 @@ static int __ip6_append_data(struct sock *sk, struct page_frag *pfrag, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, unsigned int flags, struct ipcm6_cookie *ipc6) { struct sk_buff *skb, *skb_prev = NULL; @@ -1798,7 +1798,7 @@ error: int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags) { @@ -1995,7 +1995,7 @@ EXPORT_SYMBOL_GPL(ip6_flush_pending_frames); struct sk_buff *ip6_make_skb(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), - void *from, int length, int transhdrlen, + void *from, size_t length, int transhdrlen, struct ipcm6_cookie *ipc6, struct rt6_info *rt, unsigned int flags, struct inet_cork_full *cork) { diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c index 29bc4e7c3046..6de01185cc68 100644 --- a/net/ipv6/seg6_hmac.c +++ b/net/ipv6/seg6_hmac.c @@ -399,7 +399,6 @@ int __init seg6_hmac_init(void) { return seg6_hmac_init_algo(); } -EXPORT_SYMBOL(seg6_hmac_init); int __net_init seg6_hmac_net_init(struct net *net) { diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index 9fbe243a0e81..98a34287439c 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -218,6 +218,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, struct flowi6 fl6; int dev_flags = 0; + memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_iif = skb->dev->ifindex; fl6.daddr = nhaddr ? *nhaddr : hdr->daddr; fl6.saddr = hdr->saddr; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index c6ff8bf9b55f..9dbd801ddb98 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -504,14 +504,15 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; int transhdrlen = 4; /* zero session-id */ - int ulen = len + transhdrlen; + int ulen; int err; /* Rough check on arithmetic overflow, * better check is made in ip6_append_data(). */ - if (len > INT_MAX) + if (len > INT_MAX - transhdrlen) return -EMSGSIZE; + ulen = len + transhdrlen; /* Mirror BSD error message compatibility */ if (msg->msg_flags & MSG_OOB) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 746be13438ef..51144fc66889 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -544,6 +544,7 @@ static int nft_trans_flowtable_add(struct nft_ctx *ctx, int msg_type, if (msg_type == NFT_MSG_NEWFLOWTABLE) nft_activate_next(ctx->net, flowtable); + INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans)); nft_trans_flowtable(trans) = flowtable; nft_trans_commit_list_add_tail(ctx->net, trans); @@ -1914,7 +1915,6 @@ static struct nft_hook *nft_netdev_hook_alloc(struct net *net, goto err_hook_dev; } hook->ops.dev = dev; - hook->inactive = false; return hook; @@ -2166,7 +2166,7 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, chain->flags |= NFT_CHAIN_BASE | flags; basechain->policy = NF_ACCEPT; if (chain->flags & NFT_CHAIN_HW_OFFLOAD && - nft_chain_offload_priority(basechain) < 0) + !nft_chain_offload_support(basechain)) return -EOPNOTSUPP; flow_block_init(&basechain->flow_block); @@ -7332,7 +7332,7 @@ static void __nft_unregister_flowtable_net_hooks(struct net *net, nf_unregister_net_hook(net, &hook->ops); if (release_netdev) { list_del(&hook->list); - kfree_rcu(hook); + kfree_rcu(hook, rcu); } } } @@ -7433,11 +7433,15 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh, if (nla[NFTA_FLOWTABLE_FLAGS]) { flags = ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS])); - if (flags & ~NFT_FLOWTABLE_MASK) - return -EOPNOTSUPP; + if (flags & ~NFT_FLOWTABLE_MASK) { + err = -EOPNOTSUPP; + goto err_flowtable_update_hook; + } if ((flowtable->data.flags & NFT_FLOWTABLE_HW_OFFLOAD) ^ - (flags & NFT_FLOWTABLE_HW_OFFLOAD)) - return -EOPNOTSUPP; + (flags & NFT_FLOWTABLE_HW_OFFLOAD)) { + err = -EOPNOTSUPP; + goto err_flowtable_update_hook; + } } else { flags = flowtable->data.flags; } @@ -7618,6 +7622,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, { const struct nlattr * const *nla = ctx->nla; struct nft_flowtable_hook flowtable_hook; + LIST_HEAD(flowtable_del_list); struct nft_hook *this, *hook; struct nft_trans *trans; int err; @@ -7633,7 +7638,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, err = -ENOENT; goto err_flowtable_del_hook; } - hook->inactive = true; + list_move(&hook->list, &flowtable_del_list); } trans = nft_trans_alloc(ctx, NFT_MSG_DELFLOWTABLE, @@ -7646,6 +7651,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, nft_trans_flowtable(trans) = flowtable; nft_trans_flowtable_update(trans) = true; INIT_LIST_HEAD(&nft_trans_flowtable_hooks(trans)); + list_splice(&flowtable_del_list, &nft_trans_flowtable_hooks(trans)); nft_flowtable_hook_release(&flowtable_hook); nft_trans_commit_list_add_tail(ctx->net, trans); @@ -7653,13 +7659,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, return 0; err_flowtable_del_hook: - list_for_each_entry(this, &flowtable_hook.list, list) { - hook = nft_hook_list_find(&flowtable->hook_list, this); - if (!hook) - break; - - hook->inactive = false; - } + list_splice(&flowtable_del_list, &flowtable->hook_list); nft_flowtable_hook_release(&flowtable_hook); return err; @@ -8329,6 +8329,9 @@ static void nft_commit_release(struct nft_trans *trans) nf_tables_chain_destroy(&trans->ctx); break; case NFT_MSG_DELRULE: + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); + nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); break; case NFT_MSG_DELSET: @@ -8563,17 +8566,6 @@ void nft_chain_del(struct nft_chain *chain) list_del_rcu(&chain->list); } -static void nft_flowtable_hooks_del(struct nft_flowtable *flowtable, - struct list_head *hook_list) -{ - struct nft_hook *hook, *next; - - list_for_each_entry_safe(hook, next, &flowtable->hook_list, list) { - if (hook->inactive) - list_move(&hook->list, hook_list); - } -} - static void nf_tables_module_autoload_cleanup(struct net *net) { struct nftables_pernet *nft_net = nft_pernet(net); @@ -8828,6 +8820,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nf_tables_rule_notify(&trans->ctx, nft_trans_rule(trans), NFT_MSG_NEWRULE); + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); + nft_trans_destroy(trans); break; case NFT_MSG_DELRULE: @@ -8918,8 +8913,6 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) break; case NFT_MSG_DELFLOWTABLE: if (nft_trans_flowtable_update(trans)) { - nft_flowtable_hooks_del(nft_trans_flowtable(trans), - &nft_trans_flowtable_hooks(trans)); nf_tables_flowtable_notify(&trans->ctx, nft_trans_flowtable(trans), &nft_trans_flowtable_hooks(trans), @@ -9000,7 +8993,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) struct nftables_pernet *nft_net = nft_pernet(net); struct nft_trans *trans, *next; struct nft_trans_elem *te; - struct nft_hook *hook; if (action == NFNL_ABORT_VALIDATE && nf_tables_validate(net) < 0) @@ -9131,8 +9123,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_DELFLOWTABLE: if (nft_trans_flowtable_update(trans)) { - list_for_each_entry(hook, &nft_trans_flowtable(trans)->hook_list, list) - hook->inactive = false; + list_splice(&nft_trans_flowtable_hooks(trans), + &nft_trans_flowtable(trans)->hook_list); } else { trans->ctx.table->use++; nft_clear(trans->ctx.net, nft_trans_flowtable(trans)); diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 2d36952b1392..910ef881c3b8 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -208,7 +208,7 @@ static int nft_setup_cb_call(enum tc_setup_type type, void *type_data, return 0; } -int nft_chain_offload_priority(struct nft_base_chain *basechain) +static int nft_chain_offload_priority(const struct nft_base_chain *basechain) { if (basechain->ops.priority <= 0 || basechain->ops.priority > USHRT_MAX) @@ -217,6 +217,27 @@ int nft_chain_offload_priority(struct nft_base_chain *basechain) return 0; } +bool nft_chain_offload_support(const struct nft_base_chain *basechain) +{ + struct net_device *dev; + struct nft_hook *hook; + + if (nft_chain_offload_priority(basechain) < 0) + return false; + + list_for_each_entry(hook, &basechain->hook_list, list) { + if (hook->ops.pf != NFPROTO_NETDEV || + hook->ops.hooknum != NF_NETDEV_INGRESS) + return false; + + dev = hook->ops.dev; + if (!dev->netdev_ops->ndo_setup_tc && !flow_indr_dev_exists()) + return false; + } + + return true; +} + static void nft_flow_cls_offload_setup(struct flow_cls_offload *cls_flow, const struct nft_base_chain *basechain, const struct nft_rule *rule, diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 4394df4bc99b..e5fd6995e4bf 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -335,7 +335,8 @@ static void nft_nat_inet_eval(const struct nft_expr *expr, { const struct nft_nat *priv = nft_expr_priv(expr); - if (priv->family == nft_pf(pkt)) + if (priv->family == nft_pf(pkt) || + priv->family == NFPROTO_INET) nft_nat_eval(expr, regs, pkt); } diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 1b5d73079dc9..868db4669a29 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -373,6 +373,7 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, update_ip_l4_checksum(skb, nh, *addr, new_addr); csum_replace4(&nh->check, *addr, new_addr); skb_clear_hash(skb); + ovs_ct_clear(skb, NULL); *addr = new_addr; } @@ -420,6 +421,7 @@ static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, update_ipv6_checksum(skb, l4_proto, addr, new_addr); skb_clear_hash(skb); + ovs_ct_clear(skb, NULL); memcpy(addr, new_addr, sizeof(__be32[4])); } @@ -660,6 +662,7 @@ static int set_nsh(struct sk_buff *skb, struct sw_flow_key *flow_key, static void set_tp_port(struct sk_buff *skb, __be16 *port, __be16 new_port, __sum16 *check) { + ovs_ct_clear(skb, NULL); inet_proto_csum_replace2(check, skb, *port, new_port, false); *port = new_port; } @@ -699,6 +702,7 @@ static int set_udp(struct sk_buff *skb, struct sw_flow_key *flow_key, uh->dest = dst; flow_key->tp.src = src; flow_key->tp.dst = dst; + ovs_ct_clear(skb, NULL); } skb_clear_hash(skb); @@ -761,6 +765,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key, sh->checksum = old_csum ^ old_correct_csum ^ new_csum; skb_clear_hash(skb); + ovs_ct_clear(skb, NULL); + flow_key->tp.src = sh->source; flow_key->tp.dst = sh->dest; diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 4a947c13c813..4e70df91d0f2 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -1342,7 +1342,9 @@ int ovs_ct_clear(struct sk_buff *skb, struct sw_flow_key *key) nf_ct_put(ct); nf_ct_set(skb, NULL, IP_CT_UNTRACKED); - ovs_ct_fill_key(skb, key, false); + + if (key) + ovs_ct_fill_key(skb, key, false); return 0; } diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index b91ddc110786..da176411c1b5 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -544,7 +544,7 @@ static int do_tls_getsockopt(struct sock *sk, int optname, rc = do_tls_getsockopt_conf(sk, optval, optlen, optname == TLS_TX); break; - case TLS_TX_ZEROCOPY_SENDFILE: + case TLS_TX_ZEROCOPY_RO: rc = do_tls_getsockopt_tx_zc(sk, optval, optlen); break; default: @@ -731,7 +731,7 @@ static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, optname == TLS_TX); release_sock(sk); break; - case TLS_TX_ZEROCOPY_SENDFILE: + case TLS_TX_ZEROCOPY_RO: lock_sock(sk); rc = do_tls_setsockopt_tx_zc(sk, optval, optlen); release_sock(sk); @@ -970,7 +970,7 @@ static int tls_get_info(const struct sock *sk, struct sk_buff *skb) goto nla_failure; if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) { - err = nla_put_flag(skb, TLS_INFO_ZC_SENDFILE); + err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX); if (err) goto nla_failure; } @@ -994,7 +994,7 @@ static size_t tls_get_info_size(const struct sock *sk) nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ - nla_total_size(0) + /* TLS_INFO_ZC_SENDFILE */ + nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */ 0; return size; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 654dcef7cfb3..2206e6f8902d 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -490,7 +490,7 @@ static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other) * -ECONNREFUSED. Otherwise, if we haven't queued any skbs * to other and its full, we will hang waiting for POLLOUT. */ - if (unix_recvq_full(other) && !sock_flag(other, SOCK_DEAD)) + if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD)) return 1; if (connected) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index e0a4526ab66b..19ac872a6624 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -373,7 +373,8 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries) goto out; } - nb_pkts = xskq_cons_peek_desc_batch(xs->tx, pool, max_entries); + max_entries = xskq_cons_nb_entries(xs->tx, max_entries); + nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, max_entries); if (!nb_pkts) { xs->tx->queue_empty_descs++; goto out; @@ -389,7 +390,7 @@ u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max_entries) if (!nb_pkts) goto out; - xskq_cons_release_n(xs->tx, nb_pkts); + xskq_cons_release_n(xs->tx, max_entries); __xskq_cons_release(xs->tx); xs->sk.sk_write_space(&xs->sk); diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h index a794410989cc..fb20bf7207cf 100644 --- a/net/xdp/xsk_queue.h +++ b/net/xdp/xsk_queue.h @@ -282,14 +282,6 @@ static inline bool xskq_cons_peek_desc(struct xsk_queue *q, return xskq_cons_read_desc(q, desc, pool); } -static inline u32 xskq_cons_peek_desc_batch(struct xsk_queue *q, struct xsk_buff_pool *pool, - u32 max) -{ - u32 entries = xskq_cons_nb_entries(q, max); - - return xskq_cons_read_desc_batch(q, pool, entries); -} - /* To improve performance in the xskq_cons_release functions, only update local state here. * Reflect this to global state when we get new entries from the ring in * xskq_cons_get_entries() and whenever Rx or Tx processing are completed in the NAPI loop. diff --git a/scripts/sign-file.c b/scripts/sign-file.c index fbd34b8e8f57..7434e9ea926e 100644 --- a/scripts/sign-file.c +++ b/scripts/sign-file.c @@ -30,6 +30,13 @@ #include <openssl/engine.h> /* + * OpenSSL 3.0 deprecates the OpenSSL's ENGINE API. + * + * Remove this if/when that API is no longer used + */ +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + +/* * Use CMS if we have openssl-1.0.0 or newer available - otherwise we have to * assume that it's not available and its header file is missing and that we * should use PKCS#7 instead. Switching to the older PKCS#7 format restricts diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c index 0165da386289..2b2c8eb258d5 100644 --- a/security/keys/trusted-keys/trusted_tpm2.c +++ b/security/keys/trusted-keys/trusted_tpm2.c @@ -283,8 +283,8 @@ int tpm2_seal_trusted(struct tpm_chip *chip, /* key properties */ flags = 0; flags |= options->policydigest_len ? 0 : TPM2_OA_USER_WITH_AUTH; - flags |= payload->migratable ? (TPM2_OA_FIXED_TPM | - TPM2_OA_FIXED_PARENT) : 0; + flags |= payload->migratable ? 0 : (TPM2_OA_FIXED_TPM | + TPM2_OA_FIXED_PARENT); tpm_buf_append_u32(&buf, flags); /* policy */ diff --git a/sound/hda/hdac_device.c b/sound/hda/hdac_device.c index 3e9e9ac804f6..b7e5032b61c9 100644 --- a/sound/hda/hdac_device.c +++ b/sound/hda/hdac_device.c @@ -660,6 +660,7 @@ static const struct hda_vendor_id hda_vendor_ids[] = { { 0x14f1, "Conexant" }, { 0x17e8, "Chrontel" }, { 0x1854, "LG" }, + { 0x19e5, "Huawei" }, { 0x1aec, "Wolfson Microelectronics" }, { 0x1af4, "QEMU" }, { 0x434d, "C-Media" }, diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 0a83eb6b88b1..a77165bd92a9 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2525,6 +2525,9 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, { PCI_DEVICE(0x8086, 0x51cf), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Meteorlake-P */ + { PCI_DEVICE(0x8086, 0x7e28), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, /* Broxton-P(Apollolake) */ { PCI_DEVICE(0x8086, 0x5a98), .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_BROXTON }, diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index aa360a0af284..1248d1a51cf0 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -1052,6 +1052,13 @@ static int patch_conexant_auto(struct hda_codec *codec) snd_hda_pick_fixup(codec, cxt5051_fixup_models, cxt5051_fixups, cxt_fixups); break; + case 0x14f15098: + codec->pin_amp_workaround = 1; + spec->gen.mixer_nid = 0x22; + spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO; + snd_hda_pick_fixup(codec, cxt5066_fixup_models, + cxt5066_fixups, cxt_fixups); + break; case 0x14f150f2: codec->power_save_node = 1; fallthrough; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 31fe41795571..6c209cd26c0c 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4554,6 +4554,7 @@ HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi), HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_adlp_hdmi), HDA_CODEC_ENTRY(0x8086281f, "Raptorlake-P HDMI", patch_i915_adlp_hdmi), +HDA_CODEC_ENTRY(0x8086281d, "Meteorlake HDMI", patch_i915_adlp_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f3ad454b3fbf..b0f954118e72 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -443,6 +443,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0245: case 0x10ec0255: case 0x10ec0256: + case 0x19e58326: case 0x10ec0257: case 0x10ec0282: case 0x10ec0283: @@ -580,6 +581,7 @@ static void alc_shutup_pins(struct hda_codec *codec) switch (codec->core.vendor_id) { case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: case 0x10ec0283: case 0x10ec0286: case 0x10ec0288: @@ -3247,6 +3249,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x48, 0x0); alc_update_coef_idx(codec, 0x49, 0x0045, 0x0); break; @@ -3275,6 +3278,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x48, 0xd011); alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045); break; @@ -4910,6 +4914,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, coef0256); break; case 0x10ec0234: @@ -5025,6 +5030,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x45, 0xc489); snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); alc_process_coef_fw(codec, coef0256); @@ -5175,6 +5181,7 @@ static void alc_headset_mode_default(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x1b, 0x0e4b); alc_write_coef_idx(codec, 0x45, 0xc089); msleep(50); @@ -5274,6 +5281,7 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, coef0256); break; case 0x10ec0234: @@ -5388,6 +5396,7 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, coef0256); break; case 0x10ec0234: @@ -5489,6 +5498,7 @@ static void alc_determine_headset_type(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_write_coef_idx(codec, 0x1b, 0x0e4b); alc_write_coef_idx(codec, 0x06, 0x6104); alc_write_coefex_idx(codec, 0x57, 0x3, 0x09a3); @@ -5783,6 +5793,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: alc_process_coef_fw(codec, alc256fw); break; } @@ -6385,6 +6396,7 @@ static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec) case 0x10ec0236: case 0x10ec0255: case 0x10ec0256: + case 0x19e58326: alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */ alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15); break; @@ -9059,6 +9071,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x89c3, "Zbook Studio G9", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -9258,6 +9271,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x17aa, 0x31af, "ThinkCentre Station", ALC623_FIXUP_LENOVO_THINKSTATION_P340), + SND_PCI_QUIRK(0x17aa, 0x3802, "Lenovo Yoga DuetITL 2021", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3813, "Legion 7i 15IMHG05", ALC287_FIXUP_LEGION_15IMHG05_SPEAKERS), SND_PCI_QUIRK(0x17aa, 0x3818, "Lenovo C940", ALC298_FIXUP_LENOVO_SPK_VOLUME), SND_PCI_QUIRK(0x17aa, 0x3819, "Lenovo 13s Gen2 ITL", ALC287_FIXUP_13S_GEN2_SPEAKERS), @@ -10095,6 +10109,7 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0230: case 0x10ec0236: case 0x10ec0256: + case 0x19e58326: spec->codec_variant = ALC269_TYPE_ALC256; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; @@ -11545,6 +11560,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0b00, "ALCS1200A", patch_alc882), HDA_CODEC_ENTRY(0x10ec1168, "ALC1220", patch_alc882), HDA_CODEC_ENTRY(0x10ec1220, "ALC1220", patch_alc882), + HDA_CODEC_ENTRY(0x19e58326, "HW8326", patch_alc269), {} /* terminator */ }; MODULE_DEVICE_TABLE(hdaudio, snd_hda_id_realtek); diff --git a/sound/soc/codecs/cs35l36.c b/sound/soc/codecs/cs35l36.c index 920190daa4d1..dfe85dc2cd20 100644 --- a/sound/soc/codecs/cs35l36.c +++ b/sound/soc/codecs/cs35l36.c @@ -444,7 +444,8 @@ static bool cs35l36_volatile_reg(struct device *dev, unsigned int reg) } } -static DECLARE_TLV_DB_SCALE(dig_vol_tlv, -10200, 25, 0); +static const DECLARE_TLV_DB_RANGE(dig_vol_tlv, 0, 912, + TLV_DB_MINMAX_ITEM(-10200, 1200)); static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1); static const char * const cs35l36_pcm_sftramp_text[] = { diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c index aff618513c75..0e933181b5db 100644 --- a/sound/soc/codecs/cs42l51.c +++ b/sound/soc/codecs/cs42l51.c @@ -143,7 +143,7 @@ static const struct snd_kcontrol_new cs42l51_snd_controls[] = { 0, 0xA0, 96, adc_att_tlv), SOC_DOUBLE_R_SX_TLV("PGA Volume", CS42L51_ALC_PGA_CTL, CS42L51_ALC_PGB_CTL, - 0, 0x1A, 30, pga_tlv), + 0, 0x19, 30, pga_tlv), SOC_SINGLE("Playback Deemphasis Switch", CS42L51_DAC_CTL, 3, 1, 0), SOC_SINGLE("Auto-Mute Switch", CS42L51_DAC_CTL, 2, 1, 0), SOC_SINGLE("Soft Ramp Switch", CS42L51_DAC_CTL, 1, 1, 0), diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index 9b182b585be4..10e696406a71 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c @@ -137,7 +137,9 @@ static DECLARE_TLV_DB_SCALE(mic_tlv, 1600, 100, 0); static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0); -static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); +static DECLARE_TLV_DB_SCALE(pass_tlv, -6000, 50, 0); + +static DECLARE_TLV_DB_SCALE(mix_tlv, -5150, 50, 0); static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0); @@ -351,7 +353,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { CS42L52_SPKB_VOL, 0, 0x40, 0xC0, hl_tlv), SOC_DOUBLE_R_SX_TLV("Bypass Volume", CS42L52_PASSTHRUA_VOL, - CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pga_tlv), + CS42L52_PASSTHRUB_VOL, 0, 0x88, 0x90, pass_tlv), SOC_DOUBLE("Bypass Mute", CS42L52_MISC_CTL, 4, 5, 1, 0), @@ -364,7 +366,7 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { CS42L52_ADCB_VOL, 0, 0xA0, 0x78, ipd_tlv), SOC_DOUBLE_R_SX_TLV("ADC Mixer Volume", CS42L52_ADCA_MIXER_VOL, CS42L52_ADCB_MIXER_VOL, - 0, 0x19, 0x7F, ipd_tlv), + 0, 0x19, 0x7F, mix_tlv), SOC_DOUBLE("ADC Switch", CS42L52_ADC_MISC_CTL, 0, 1, 1, 0), diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c index dc23007336c5..510c94265b1f 100644 --- a/sound/soc/codecs/cs42l56.c +++ b/sound/soc/codecs/cs42l56.c @@ -391,9 +391,9 @@ static const struct snd_kcontrol_new cs42l56_snd_controls[] = { SOC_DOUBLE("ADC Boost Switch", CS42L56_GAIN_BIAS_CTL, 3, 2, 1, 1), SOC_DOUBLE_R_SX_TLV("Headphone Volume", CS42L56_HPA_VOLUME, - CS42L56_HPB_VOLUME, 0, 0x84, 0x48, hl_tlv), + CS42L56_HPB_VOLUME, 0, 0x44, 0x48, hl_tlv), SOC_DOUBLE_R_SX_TLV("LineOut Volume", CS42L56_LOA_VOLUME, - CS42L56_LOB_VOLUME, 0, 0x84, 0x48, hl_tlv), + CS42L56_LOB_VOLUME, 0, 0x44, 0x48, hl_tlv), SOC_SINGLE_TLV("Bass Shelving Volume", CS42L56_TONE_CTL, 0, 0x00, 1, tone_tlv), diff --git a/sound/soc/codecs/cs53l30.c b/sound/soc/codecs/cs53l30.c index 703545273900..360ca2ffd506 100644 --- a/sound/soc/codecs/cs53l30.c +++ b/sound/soc/codecs/cs53l30.c @@ -348,22 +348,22 @@ static const struct snd_kcontrol_new cs53l30_snd_controls[] = { SOC_ENUM("ADC2 NG Delay", adc2_ng_delay_enum), SOC_SINGLE_SX_TLV("ADC1A PGA Volume", - CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC1A_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC1B PGA Volume", - CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC1B_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC2A PGA Volume", - CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC2A_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC2B PGA Volume", - CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x18, pga_tlv), + CS53L30_ADC2B_AFE_CTL, 0, 0x34, 0x24, pga_tlv), SOC_SINGLE_SX_TLV("ADC1A Digital Volume", - CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC1A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), SOC_SINGLE_SX_TLV("ADC1B Digital Volume", - CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC1B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), SOC_SINGLE_SX_TLV("ADC2A Digital Volume", - CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC2A_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), SOC_SINGLE_SX_TLV("ADC2B Digital Volume", - CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x0C, dig_tlv), + CS53L30_ADC2B_DIG_VOL, 0, 0xA0, 0x6C, dig_tlv), }; static const struct snd_soc_dapm_widget cs53l30_dapm_widgets[] = { diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c index 3f00ead97006..dd53dfd87b04 100644 --- a/sound/soc/codecs/es8328.c +++ b/sound/soc/codecs/es8328.c @@ -161,13 +161,16 @@ static int es8328_put_deemph(struct snd_kcontrol *kcontrol, if (deemph > 1) return -EINVAL; + if (es8328->deemph == deemph) + return 0; + ret = es8328_set_deemph(component); if (ret < 0) return ret; es8328->deemph = deemph; - return 0; + return 1; } diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c index 66bbd8f4f1ad..08f6c56dc387 100644 --- a/sound/soc/codecs/nau8822.c +++ b/sound/soc/codecs/nau8822.c @@ -741,6 +741,8 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source, pll_param->mclk_scaler, pll_param->pre_factor); snd_soc_component_update_bits(component, + NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_OFF); + snd_soc_component_update_bits(component, NAU8822_REG_PLL_N, NAU8822_PLLMCLK_DIV2 | NAU8822_PLLN_MASK, (pll_param->pre_factor ? NAU8822_PLLMCLK_DIV2 : 0) | pll_param->pll_int); @@ -757,6 +759,8 @@ static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source, pll_param->mclk_scaler << NAU8822_MCLKSEL_SFT); snd_soc_component_update_bits(component, NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK, NAU8822_CLKM_PLL); + snd_soc_component_update_bits(component, + NAU8822_REG_POWER_MANAGEMENT_1, NAU8822_PLL_EN_MASK, NAU8822_PLL_ON); return 0; } diff --git a/sound/soc/codecs/nau8822.h b/sound/soc/codecs/nau8822.h index 489191ff187e..b45d42c15de6 100644 --- a/sound/soc/codecs/nau8822.h +++ b/sound/soc/codecs/nau8822.h @@ -90,6 +90,9 @@ #define NAU8822_REFIMP_3K 0x3 #define NAU8822_IOBUF_EN (0x1 << 2) #define NAU8822_ABIAS_EN (0x1 << 3) +#define NAU8822_PLL_EN_MASK (0x1 << 5) +#define NAU8822_PLL_ON (0x1 << 5) +#define NAU8822_PLL_OFF (0x0 << 5) /* NAU8822_REG_AUDIO_INTERFACE (0x4) */ #define NAU8822_AIFMT_MASK (0x3 << 3) diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 34cd5a2a997c..5cca89364280 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c @@ -3868,6 +3868,7 @@ static int wm8962_runtime_suspend(struct device *dev) #endif static const struct dev_pm_ops wm8962_pm = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(wm8962_runtime_suspend, wm8962_runtime_resume, NULL) }; diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c index 7973a75cac05..6d7fd88243aa 100644 --- a/sound/soc/codecs/wm_adsp.c +++ b/sound/soc/codecs/wm_adsp.c @@ -333,7 +333,7 @@ int wm_adsp_fw_put(struct snd_kcontrol *kcontrol, struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; struct wm_adsp *dsp = snd_soc_component_get_drvdata(component); - int ret = 0; + int ret = 1; if (ucontrol->value.enumerated.item[0] == dsp[e->shift_l].fw) return 0; diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index fa950dde5310..e765da9a19e7 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -1293,6 +1293,7 @@ static const struct of_device_id fsl_sai_ids[] = { { .compatible = "fsl,imx8mm-sai", .data = &fsl_sai_imx8mm_data }, { .compatible = "fsl,imx8mp-sai", .data = &fsl_sai_imx8mp_data }, { .compatible = "fsl,imx8ulp-sai", .data = &fsl_sai_imx8ulp_data }, + { .compatible = "fsl,imx8mn-sai", .data = &fsl_sai_imx8mp_data }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fsl_sai_ids); diff --git a/sound/soc/intel/boards/sof_cirrus_common.c b/sound/soc/intel/boards/sof_cirrus_common.c index e71d74ec1b0b..f4192df962d6 100644 --- a/sound/soc/intel/boards/sof_cirrus_common.c +++ b/sound/soc/intel/boards/sof_cirrus_common.c @@ -54,22 +54,29 @@ static struct snd_soc_dai_link_component cs35l41_components[] = { }, }; +/* + * Mapping between ACPI instance id and speaker position. + * + * Four speakers: + * 0: Tweeter left, 1: Woofer left + * 2: Tweeter right, 3: Woofer right + */ static struct snd_soc_codec_conf cs35l41_codec_conf[] = { { .dlc = COMP_CODEC_CONF(CS35L41_DEV0_NAME), - .name_prefix = "WL", + .name_prefix = "TL", }, { .dlc = COMP_CODEC_CONF(CS35L41_DEV1_NAME), - .name_prefix = "WR", + .name_prefix = "WL", }, { .dlc = COMP_CODEC_CONF(CS35L41_DEV2_NAME), - .name_prefix = "TL", + .name_prefix = "TR", }, { .dlc = COMP_CODEC_CONF(CS35L41_DEV3_NAME), - .name_prefix = "TR", + .name_prefix = "WR", }, }; @@ -101,6 +108,21 @@ static int cs35l41_init(struct snd_soc_pcm_runtime *rtd) return ret; } +/* + * Channel map: + * + * TL/WL: ASPRX1 on slot 0, ASPRX2 on slot 1 (default) + * TR/WR: ASPRX1 on slot 1, ASPRX2 on slot 0 + */ +static const struct { + unsigned int rx[2]; +} cs35l41_channel_map[] = { + {.rx = {0, 1}}, /* TL */ + {.rx = {0, 1}}, /* WL */ + {.rx = {1, 0}}, /* TR */ + {.rx = {1, 0}}, /* WR */ +}; + static int cs35l41_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { @@ -134,6 +156,16 @@ static int cs35l41_hw_params(struct snd_pcm_substream *substream, ret); return ret; } + + /* setup channel map */ + ret = snd_soc_dai_set_channel_map(codec_dai, 0, NULL, + ARRAY_SIZE(cs35l41_channel_map[i].rx), + (unsigned int *)cs35l41_channel_map[i].rx); + if (ret < 0) { + dev_err(codec_dai->dev, "fail to set channel map, ret %d\n", + ret); + return ret; + } } return 0; diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c index f03a7ae49d50..b41ab7a321ae 100644 --- a/sound/soc/qcom/lpass-platform.c +++ b/sound/soc/qcom/lpass-platform.c @@ -898,7 +898,7 @@ static int lpass_platform_cdc_dma_mmap(struct snd_pcm_substream *substream, struct snd_pcm_runtime *runtime = substream->runtime; unsigned long size, offset; - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); size = vma->vm_end - vma->vm_start; offset = vma->vm_pgoff << PAGE_SHIFT; return io_remap_pfn_range(vma, vma->vm_start, diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c index 8d740635a4bb..28976098a89e 100644 --- a/sound/soc/sof/sof-audio.c +++ b/sound/soc/sof/sof-audio.c @@ -318,7 +318,7 @@ sink_prepare: p->walking = false; if (ret < 0) { /* unprepare the source widget */ - if (!widget_ops[widget->id].ipc_unprepare && swidget->prepared) { + if (widget_ops[widget->id].ipc_unprepare && swidget->prepared) { widget_ops[widget->id].ipc_unprepare(swidget); swidget->prepared = false; } diff --git a/sound/soc/sof/sof-client-ipc-msg-injector.c b/sound/soc/sof/sof-client-ipc-msg-injector.c index 03490a4d4ae7..6bdfa527b7f7 100644 --- a/sound/soc/sof/sof-client-ipc-msg-injector.c +++ b/sound/soc/sof/sof-client-ipc-msg-injector.c @@ -150,7 +150,7 @@ static ssize_t sof_msg_inject_dfs_write(struct file *file, const char __user *bu { struct sof_client_dev *cdev = file->private_data; struct sof_msg_inject_priv *priv = cdev->data; - size_t size; + ssize_t size; int ret; if (*ppos) @@ -158,8 +158,10 @@ static ssize_t sof_msg_inject_dfs_write(struct file *file, const char __user *bu size = simple_write_to_buffer(priv->tx_buffer, priv->max_msg_size, ppos, buffer, count); + if (size < 0) + return size; if (size != count) - return size > 0 ? -EFAULT : size; + return -EFAULT; memset(priv->rx_buffer, 0, priv->max_msg_size); @@ -179,7 +181,7 @@ static ssize_t sof_msg_inject_ipc4_dfs_write(struct file *file, struct sof_client_dev *cdev = file->private_data; struct sof_msg_inject_priv *priv = cdev->data; struct sof_ipc4_msg *ipc4_msg = priv->tx_buffer; - size_t size; + ssize_t size; int ret; if (*ppos) @@ -192,18 +194,20 @@ static ssize_t sof_msg_inject_ipc4_dfs_write(struct file *file, size = simple_write_to_buffer(&ipc4_msg->header_u64, sizeof(ipc4_msg->header_u64), ppos, buffer, count); + if (size < 0) + return size; if (size != sizeof(ipc4_msg->header_u64)) - return size > 0 ? -EFAULT : size; + return -EFAULT; count -= size; - if (!count) { - /* Copy the payload */ - size = simple_write_to_buffer(ipc4_msg->data_ptr, - priv->max_msg_size, ppos, buffer, - count); - if (size != count) - return size > 0 ? -EFAULT : size; - } + /* Copy the payload */ + size = simple_write_to_buffer(ipc4_msg->data_ptr, + priv->max_msg_size, ppos, buffer, + count); + if (size < 0) + return size; + if (size != count) + return -EFAULT; ipc4_msg->data_size = count; diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index b470404a5376..e692ae04436a 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -291,6 +291,9 @@ int snd_usb_audioformat_set_sync_ep(struct snd_usb_audio *chip, bool is_playback; int err; + if (fmt->sync_ep) + return 0; /* already set up */ + alts = snd_usb_get_host_interface(chip, fmt->iface, fmt->altsetting); if (!alts) return 0; @@ -304,7 +307,7 @@ int snd_usb_audioformat_set_sync_ep(struct snd_usb_audio *chip, * Generic sync EP handling */ - if (altsd->bNumEndpoints < 2) + if (fmt->ep_idx > 0 || altsd->bNumEndpoints < 2) return 0; is_playback = !(get_endpoint(alts, 0)->bEndpointAddress & USB_DIR_IN); diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 78eb41b621d6..4f56e1784932 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2658,7 +2658,12 @@ YAMAHA_DEVICE(0x7010, "UB99"), .nr_rates = 2, .rate_table = (unsigned int[]) { 44100, 48000 - } + }, + .sync_ep = 0x82, + .sync_iface = 0, + .sync_altsetting = 1, + .sync_ep_idx = 1, + .implicit_fb = 1, } }, { diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c index d9aad15e0d24..02bb8cbf9194 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c @@ -395,6 +395,18 @@ static void test_func_map_prog_compatibility(void) "./test_attach_probe.o"); } +static void test_func_replace_global_func(void) +{ + const char *prog_name[] = { + "freplace/test_pkt_access", + }; + + test_fexit_bpf2bpf_common("./freplace_global_func.o", + "./test_pkt_access.o", + ARRAY_SIZE(prog_name), + prog_name, false, NULL); +} + /* NOTE: affect other tests, must run in serial mode */ void serial_test_fexit_bpf2bpf(void) { @@ -416,4 +428,6 @@ void serial_test_fexit_bpf2bpf(void) test_func_replace_multi(); if (test__start_subtest("fmod_ret_freplace")) test_fmod_ret_freplace(); + if (test__start_subtest("func_replace_global_func")) + test_func_replace_global_func(); } diff --git a/tools/testing/selftests/bpf/progs/freplace_global_func.c b/tools/testing/selftests/bpf/progs/freplace_global_func.c new file mode 100644 index 000000000000..96cb61a6ce87 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/freplace_global_func.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> + +__noinline +int test_ctx_global_func(struct __sk_buff *skb) +{ + volatile int retval = 1; + return retval; +} + +SEC("freplace/test_pkt_access") +int new_test_pkt_access(struct __sk_buff *skb) +{ + return test_ctx_global_func(skb); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c index e0b2bb1339b1..3330fb183c68 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c @@ -44,7 +44,7 @@ static inline void nop_loop(void) { int i; - for (i = 0; i < 1000000; i++) + for (i = 0; i < 100000000; i++) asm volatile("nop"); } @@ -56,12 +56,14 @@ static inline void check_tsc_msr_rdtsc(void) tsc_freq = rdmsr(HV_X64_MSR_TSC_FREQUENCY); GUEST_ASSERT(tsc_freq > 0); - /* First, check MSR-based clocksource */ + /* For increased accuracy, take mean rdtsc() before and afrer rdmsr() */ r1 = rdtsc(); t1 = rdmsr(HV_X64_MSR_TIME_REF_COUNT); + r1 = (r1 + rdtsc()) / 2; nop_loop(); r2 = rdtsc(); t2 = rdmsr(HV_X64_MSR_TIME_REF_COUNT); + r2 = (r2 + rdtsc()) / 2; GUEST_ASSERT(r2 > r1 && t2 > t1); @@ -181,12 +183,14 @@ static void host_check_tsc_msr_rdtsc(struct kvm_vm *vm) tsc_freq = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TSC_FREQUENCY); TEST_ASSERT(tsc_freq > 0, "TSC frequency must be nonzero"); - /* First, check MSR-based clocksource */ + /* For increased accuracy, take mean rdtsc() before and afrer ioctl */ r1 = rdtsc(); t1 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT); + r1 = (r1 + rdtsc()) / 2; nop_loop(); r2 = rdtsc(); t2 = vcpu_get_msr(vm, VCPU_ID, HV_X64_MSR_TIME_REF_COUNT); + r2 = (r2 + rdtsc()) / 2; TEST_ASSERT(t2 > t1, "Time reference MSR is not monotonic (%ld <= %ld)", t1, t2); diff --git a/tools/testing/selftests/net/bpf/Makefile b/tools/testing/selftests/net/bpf/Makefile index f91bf14bbee7..8a69c91fcca0 100644 --- a/tools/testing/selftests/net/bpf/Makefile +++ b/tools/testing/selftests/net/bpf/Makefile @@ -2,6 +2,7 @@ CLANG ?= clang CCINCLUDE += -I../../bpf +CCINCLUDE += -I../../../lib CCINCLUDE += -I../../../../../usr/include/ TEST_CUSTOM_PROGS = $(OUTPUT)/bpf/nat6to4.o @@ -10,5 +11,4 @@ all: $(TEST_CUSTOM_PROGS) $(OUTPUT)/%.o: %.c $(CLANG) -O2 -target bpf -c $< $(CCINCLUDE) -o $@ -clean: - rm -f $(TEST_CUSTOM_PROGS) +EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index eb8543b9a5c4..924ecb3f1f73 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -374,6 +374,45 @@ EOF return $lret } +test_local_dnat_portonly() +{ + local family=$1 + local daddr=$2 + local lret=0 + local sr_s + local sr_r + +ip netns exec "$ns0" nft -f /dev/stdin <<EOF +table $family nat { + chain output { + type nat hook output priority 0; policy accept; + meta l4proto tcp dnat to :2000 + + } +} +EOF + if [ $? -ne 0 ]; then + if [ $family = "inet" ];then + echo "SKIP: inet port test" + test_inet_nat=false + return + fi + echo "SKIP: Could not add $family dnat hook" + return + fi + + echo SERVER-$family | ip netns exec "$ns1" timeout 5 socat -u STDIN TCP-LISTEN:2000 & + sc_s=$! + + result=$(ip netns exec "$ns0" timeout 1 socat TCP:$daddr:2000 STDOUT) + + if [ "$result" = "SERVER-inet" ];then + echo "PASS: inet port rewrite without l3 address" + else + echo "ERROR: inet port rewrite" + ret=1 + fi +} test_masquerade6() { @@ -1148,6 +1187,10 @@ fi reset_counters test_local_dnat ip test_local_dnat6 ip6 + +reset_counters +test_local_dnat_portonly inet 10.0.1.99 + reset_counters $test_inet_nat && test_local_dnat inet $test_inet_nat && test_local_dnat6 inet diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 64ec2222a196..44c47670447a 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4300,8 +4300,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm, kvm_put_kvm_no_destroy(kvm); mutex_lock(&kvm->lock); list_del(&dev->vm_node); + if (ops->release) + ops->release(dev); mutex_unlock(&kvm->lock); - ops->destroy(dev); + if (ops->destroy) + ops->destroy(dev); return ret; } |