diff options
100 files changed, 1169 insertions, 768 deletions
@@ -2489,6 +2489,13 @@ D: XF86_Mach8 D: XF86_8514 D: cfdisk (curses based disk partitioning program) +N: Mat Martineau +E: mat@martineau.name +D: MPTCP subsystem co-maintainer 2020-2023 +D: Keyctl restricted keyring and Diffie-Hellman UAPI +D: Bluetooth L2CAP ERTM mode and AMP +S: USA + N: John S. Marvin E: jsm@fc.hp.com D: PA-RISC port diff --git a/Documentation/devicetree/bindings/riscv/cpus.yaml b/Documentation/devicetree/bindings/riscv/cpus.yaml index c6720764e765..a2884e3113da 100644 --- a/Documentation/devicetree/bindings/riscv/cpus.yaml +++ b/Documentation/devicetree/bindings/riscv/cpus.yaml @@ -83,7 +83,7 @@ properties: insensitive, letters in the riscv,isa string must be all lowercase to simplify parsing. $ref: "/schemas/types.yaml#/definitions/string" - pattern: ^rv(?:64|32)imaf?d?q?c?b?v?k?h?(?:_[hsxz](?:[a-z])+)*$ + pattern: ^rv(?:64|32)imaf?d?q?c?b?k?j?p?v?h?(?:[hsxz](?:[a-z])+)?(?:_[hsxz](?:[a-z])+)*$ # RISC-V requires 'timebase-frequency' in /cpus, so disallow it here timebase-frequency: false diff --git a/Documentation/networking/bridge.rst b/Documentation/networking/bridge.rst index 4aef9cddde2f..c859f3c1636e 100644 --- a/Documentation/networking/bridge.rst +++ b/Documentation/networking/bridge.rst @@ -8,7 +8,7 @@ In order to use the Ethernet bridging functionality, you'll need the userspace tools. Documentation for Linux bridging is on: - http://www.linuxfoundation.org/collaborate/workgroups/networking/bridge + https://wiki.linuxfoundation.org/networking/bridge The bridge-utilities are maintained at: git://git.kernel.org/pub/scm/linux/kernel/git/shemminger/bridge-utils.git diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst index 49db1d11d7c4..8b1045c3b59e 100644 --- a/Documentation/networking/nf_conntrack-sysctl.rst +++ b/Documentation/networking/nf_conntrack-sysctl.rst @@ -173,7 +173,9 @@ nf_conntrack_sctp_timeout_cookie_echoed - INTEGER (seconds) default 3 nf_conntrack_sctp_timeout_established - INTEGER (seconds) - default 432000 (5 days) + default 210 + + Default is set to (hb_interval * path_max_retrans + rto_max) nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds) default 0.3 @@ -190,12 +192,6 @@ nf_conntrack_sctp_timeout_heartbeat_sent - INTEGER (seconds) This timeout is used to setup conntrack entry on secondary paths. Default is set to hb_interval. -nf_conntrack_sctp_timeout_heartbeat_acked - INTEGER (seconds) - default 210 - - This timeout is used to setup conntrack entry on secondary paths. - Default is set to (hb_interval * path_max_retrans + rto_max) - nf_conntrack_udp_timeout - INTEGER (seconds) default 30 diff --git a/MAINTAINERS b/MAINTAINERS index f781f936ae35..c224f3d87a8b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7615,7 +7615,6 @@ S: Maintained F: drivers/firmware/efi/test/ EFI VARIABLE FILESYSTEM -M: Matthew Garrett <matthew.garrett@nebula.com> M: Jeremy Kerr <jk@ozlabs.org> M: Ard Biesheuvel <ardb@kernel.org> L: linux-efi@vger.kernel.org @@ -8467,16 +8466,16 @@ F: fs/fscache/ F: include/linux/fscache*.h FSCRYPT: FILE SYSTEM LEVEL ENCRYPTION SUPPORT +M: Eric Biggers <ebiggers@kernel.org> M: Theodore Y. Ts'o <tytso@mit.edu> M: Jaegeuk Kim <jaegeuk@kernel.org> -M: Eric Biggers <ebiggers@kernel.org> L: linux-fscrypt@vger.kernel.org S: Supported Q: https://patchwork.kernel.org/project/linux-fscrypt/list/ -T: git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git +T: git https://git.kernel.org/pub/scm/fs/fscrypt/linux.git F: Documentation/filesystems/fscrypt.rst F: fs/crypto/ -F: include/linux/fscrypt*.h +F: include/linux/fscrypt.h F: include/uapi/linux/fscrypt.h FSI SUBSYSTEM @@ -8519,10 +8518,10 @@ F: include/linux/fsnotify*.h FSVERITY: READ-ONLY FILE-BASED AUTHENTICITY PROTECTION M: Eric Biggers <ebiggers@kernel.org> M: Theodore Y. Ts'o <tytso@mit.edu> -L: linux-fscrypt@vger.kernel.org +L: fsverity@lists.linux.dev S: Supported -Q: https://patchwork.kernel.org/project/linux-fscrypt/list/ -T: git git://git.kernel.org/pub/scm/fs/fscrypt/fscrypt.git fsverity +Q: https://patchwork.kernel.org/project/fsverity/list/ +T: git https://git.kernel.org/pub/scm/fs/fsverity/linux.git F: Documentation/filesystems/fsverity.rst F: fs/verity/ F: include/linux/fsverity.h @@ -14633,7 +14632,6 @@ F: net/netfilter/xt_SECMARK.c F: net/netlabel/ NETWORKING [MPTCP] -M: Mat Martineau <mathew.j.martineau@linux.intel.com> M: Matthieu Baerts <matthieu.baerts@tessares.net> L: netdev@vger.kernel.org L: mptcp@lists.linux.dev @@ -17962,6 +17960,7 @@ M: Albert Ou <aou@eecs.berkeley.edu> L: linux-riscv@lists.infradead.org S: Supported Q: https://patchwork.kernel.org/project/linux-riscv/list/ +C: irc://irc.libera.chat/riscv P: Documentation/riscv/patch-acceptance.rst T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git F: arch/riscv/ diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 4067f5169144..955b0362cdfb 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -132,7 +132,7 @@ AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W) ifeq ($(CONFIG_THUMB2_KERNEL),y) CFLAGS_ISA :=-Wa,-mimplicit-it=always $(AFLAGS_NOWARN) -AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb -D__thumb2__=2 +AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb CFLAGS_ISA +=-mthumb else CFLAGS_ISA :=$(call cc-option,-marm,) $(AFLAGS_NOWARN) diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile index 971e74546fb1..13e62c7c25dc 100644 --- a/arch/arm/crypto/Makefile +++ b/arch/arm/crypto/Makefile @@ -53,7 +53,12 @@ $(obj)/%-core.S: $(src)/%-armv4.pl clean-files += poly1305-core.S sha256-core.S sha512-core.S +aflags-thumb2-$(CONFIG_THUMB2_KERNEL) := -U__thumb2__ -D__thumb2__=1 + +AFLAGS_sha256-core.o += $(aflags-thumb2-y) +AFLAGS_sha512-core.o += $(aflags-thumb2-y) + # massage the perlasm code a bit so we only get the NEON routine if we need it poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5 poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7 -AFLAGS_poly1305-core.o += $(poly1305-aflags-y) +AFLAGS_poly1305-core.o += $(poly1305-aflags-y) $(aflags-thumb2-y) diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index c1494a4dee25..53f2d8774fdb 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -161,7 +161,7 @@ void __init paging_init(const struct machine_desc *mdesc) mpu_setup(); /* allocate the zero page. */ - zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!zero_page) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index fa6999e24b07..e43f6d716b4b 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -6,6 +6,7 @@ * VM_EXEC */ #include <asm/asm-offsets.h> +#include <asm/pgtable.h> #include <asm/thread_info.h> #ifdef CONFIG_CPU_V7M diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 31d13a6001df..de4ff90785b2 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -48,8 +48,17 @@ int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); }) extern spinlock_t efi_rt_lock; +extern u64 *efi_rt_stack_top; efi_status_t __efi_rt_asm_wrapper(void *, const char *, ...); +/* + * efi_rt_stack_top[-1] contains the value the stack pointer had before + * switching to the EFI runtime stack. + */ +#define current_in_efi() \ + (!preemptible() && efi_rt_stack_top != NULL && \ + on_task_stack(current, READ_ONCE(efi_rt_stack_top[-1]), 1)) + #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) /* diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index 4e5354beafb0..66ec8caa6ac0 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -106,4 +106,19 @@ static inline struct stack_info stackinfo_get_sdei_critical(void) #define stackinfo_get_sdei_critical() stackinfo_get_unknown() #endif +#ifdef CONFIG_EFI +extern u64 *efi_rt_stack_top; + +static inline struct stack_info stackinfo_get_efi(void) +{ + unsigned long high = (u64)efi_rt_stack_top; + unsigned long low = high - THREAD_SIZE; + + return (struct stack_info) { + .low = low, + .high = high, + }; +} +#endif + #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/kernel/efi-rt-wrapper.S b/arch/arm64/kernel/efi-rt-wrapper.S index d872d18101d8..e8ae803662cf 100644 --- a/arch/arm64/kernel/efi-rt-wrapper.S +++ b/arch/arm64/kernel/efi-rt-wrapper.S @@ -46,7 +46,10 @@ SYM_FUNC_START(__efi_rt_asm_wrapper) mov x4, x6 blr x8 + mov x16, sp mov sp, x29 + str xzr, [x16, #8] // clear recorded task SP value + ldp x1, x2, [sp, #16] cmp x2, x18 ldp x29, x30, [sp], #112 @@ -71,6 +74,9 @@ SYM_FUNC_END(__efi_rt_asm_wrapper) SYM_CODE_START(__efi_rt_asm_recover) mov sp, x30 + ldr_l x16, efi_rt_stack_top // clear recorded task SP value + str xzr, [x16, #-8] + ldp x19, x20, [sp, #32] ldp x21, x22, [sp, #48] ldp x23, x24, [sp, #64] diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index fab05de2e12d..b273900f4566 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -11,6 +11,7 @@ #include <linux/init.h> #include <asm/efi.h> +#include <asm/stacktrace.h> static bool region_is_misaligned(const efi_memory_desc_t *md) { @@ -154,7 +155,7 @@ asmlinkage efi_status_t __efi_rt_asm_recover(void); bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg) { /* Check whether the exception occurred while running the firmware */ - if (current_work() != &efi_rts_work.work || regs->pc >= TASK_SIZE_64) + if (!current_in_efi() || regs->pc >= TASK_SIZE_64) return false; pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg); diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 117e2c180f3c..83154303e682 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -5,6 +5,7 @@ * Copyright (C) 2012 ARM Ltd. */ #include <linux/kernel.h> +#include <linux/efi.h> #include <linux/export.h> #include <linux/ftrace.h> #include <linux/sched.h> @@ -12,6 +13,7 @@ #include <linux/sched/task_stack.h> #include <linux/stacktrace.h> +#include <asm/efi.h> #include <asm/irq.h> #include <asm/stack_pointer.h> #include <asm/stacktrace.h> @@ -186,6 +188,13 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) : stackinfo_get_unknown(); \ }) +#define STACKINFO_EFI \ + ({ \ + ((task == current) && current_in_efi()) \ + ? stackinfo_get_efi() \ + : stackinfo_get_unknown(); \ + }) + noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie, struct task_struct *task, struct pt_regs *regs) @@ -200,6 +209,9 @@ noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry, STACKINFO_SDEI(normal), STACKINFO_SDEI(critical), #endif +#ifdef CONFIG_EFI + STACKINFO_EFI, +#endif }; struct unwind_state state = { .stacks = stacks, diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c index 5626ddb540ce..cf4c495a4321 100644 --- a/arch/arm64/kvm/guest.c +++ b/arch/arm64/kvm/guest.c @@ -1079,7 +1079,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, /* uaccess failed, don't leave stale tags */ if (num_tags != MTE_GRANULES_PER_PAGE) - mte_clear_page_tags(page); + mte_clear_page_tags(maddr); set_page_mte_tagged(page); kvm_release_pfn_dirty(pfn); diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c index 2074521d4a8c..2624963cb95b 100644 --- a/arch/arm64/kvm/vgic/vgic-v3.c +++ b/arch/arm64/kvm/vgic/vgic-v3.c @@ -350,26 +350,23 @@ retry: * The deactivation of the doorbell interrupt will trigger the * unmapping of the associated vPE. */ -static void unmap_all_vpes(struct vgic_dist *dist) +static void unmap_all_vpes(struct kvm *kvm) { - struct irq_desc *desc; + struct vgic_dist *dist = &kvm->arch.vgic; int i; - for (i = 0; i < dist->its_vm.nr_vpes; i++) { - desc = irq_to_desc(dist->its_vm.vpes[i]->irq); - irq_domain_deactivate_irq(irq_desc_get_irq_data(desc)); - } + for (i = 0; i < dist->its_vm.nr_vpes; i++) + free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i)); } -static void map_all_vpes(struct vgic_dist *dist) +static void map_all_vpes(struct kvm *kvm) { - struct irq_desc *desc; + struct vgic_dist *dist = &kvm->arch.vgic; int i; - for (i = 0; i < dist->its_vm.nr_vpes; i++) { - desc = irq_to_desc(dist->its_vm.vpes[i]->irq); - irq_domain_activate_irq(irq_desc_get_irq_data(desc), false); - } + for (i = 0; i < dist->its_vm.nr_vpes; i++) + WARN_ON(vgic_v4_request_vpe_irq(kvm_get_vcpu(kvm, i), + dist->its_vm.vpes[i]->irq)); } /** @@ -394,7 +391,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) * and enabling of the doorbells have already been done. */ if (kvm_vgic_global_state.has_gicv4_1) { - unmap_all_vpes(dist); + unmap_all_vpes(kvm); vlpi_avail = true; } @@ -444,7 +441,7 @@ int vgic_v3_save_pending_tables(struct kvm *kvm) out: if (vlpi_avail) - map_all_vpes(dist); + map_all_vpes(kvm); return ret; } diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index ad06ba6c9b00..a413718be92b 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -222,6 +222,11 @@ void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val) *val = !!(*ptr & mask); } +int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq) +{ + return request_irq(irq, vgic_v4_doorbell_handler, 0, "vcpu", vcpu); +} + /** * vgic_v4_init - Initialize the GICv4 data structures * @kvm: Pointer to the VM being initialized @@ -283,8 +288,7 @@ int vgic_v4_init(struct kvm *kvm) irq_flags &= ~IRQ_NOAUTOEN; irq_set_status_flags(irq, irq_flags); - ret = request_irq(irq, vgic_v4_doorbell_handler, - 0, "vcpu", vcpu); + ret = vgic_v4_request_vpe_irq(vcpu, irq); if (ret) { kvm_err("failed to allocate vcpu IRQ%d\n", irq); /* diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h index 0c8da72953f0..23e280fa0a16 100644 --- a/arch/arm64/kvm/vgic/vgic.h +++ b/arch/arm64/kvm/vgic/vgic.h @@ -331,5 +331,6 @@ int vgic_v4_init(struct kvm *kvm); void vgic_v4_teardown(struct kvm *kvm); void vgic_v4_configure_vsgis(struct kvm *kvm); void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val); +int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq); #endif diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h index 7226e2462584..2c0f4c887289 100644 --- a/arch/riscv/include/asm/alternative-macros.h +++ b/arch/riscv/include/asm/alternative-macros.h @@ -46,7 +46,7 @@ .macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ new_c_2, vendor_id_2, errata_id_2, enable_2 - ALTERNATIVE_CFG \old_c, \new_c_1, \vendor_id_1, \errata_id_1, \enable_1 + ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \errata_id_1, \enable_1 ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2 .endm diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index b865046e4dbb..4bf6c449d78b 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -326,7 +326,7 @@ clear_bss_done: call soc_early_init tail start_kernel -#if CONFIG_RISCV_BOOT_SPINWAIT +#ifdef CONFIG_RISCV_BOOT_SPINWAIT .Lsecondary_start: /* Set trap vector to spin forever to help debug */ la a3, .Lsecondary_park diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c index d73e96f6ed7c..a20568bd1f1a 100644 --- a/arch/riscv/kernel/probes/simulate-insn.c +++ b/arch/riscv/kernel/probes/simulate-insn.c @@ -71,11 +71,11 @@ bool __kprobes simulate_jalr(u32 opcode, unsigned long addr, struct pt_regs *reg u32 rd_index = (opcode >> 7) & 0x1f; u32 rs1_index = (opcode >> 15) & 0x1f; - ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); + ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); if (!ret) return ret; - ret = rv_insn_reg_get_val(regs, rs1_index, &base_addr); + ret = rv_insn_reg_set_val(regs, rd_index, addr + 4); if (!ret) return ret; diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 3373df413c88..ddb2afba6d25 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -39,7 +39,6 @@ static DECLARE_COMPLETION(cpu_running); void __init smp_prepare_boot_cpu(void) { - init_cpu_topology(); } void __init smp_prepare_cpus(unsigned int max_cpus) @@ -48,6 +47,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) int ret; unsigned int curr_cpuid; + init_cpu_topology(); + curr_cpuid = smp_processor_id(); store_cpu_topology(curr_cpuid); numa_store_cpu_info(curr_cpuid); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index dfd2c124cdf8..bafdc2be479a 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -6339,6 +6339,7 @@ __init int intel_pmu_init(void) break; case INTEL_FAM6_SAPPHIRERAPIDS_X: + case INTEL_FAM6_EMERALDRAPIDS_X: pmem = true; x86_pmu.late_ack = true; memcpy(hw_cache_event_ids, spr_hw_cache_event_ids, sizeof(hw_cache_event_ids)); diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 3019fb1926e3..551741e79e03 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -677,6 +677,7 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &icx_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &icx_cstates), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &icx_cstates), + X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &icx_cstates), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &icl_cstates), diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c index 1f60a2b27936..fdbb5f07448f 100644 --- a/arch/x86/kernel/cpu/aperfmperf.c +++ b/arch/x86/kernel/cpu/aperfmperf.c @@ -330,7 +330,16 @@ static void __init bp_init_freq_invariance(void) static void disable_freq_invariance_workfn(struct work_struct *work) { + int cpu; + static_branch_disable(&arch_scale_freq_key); + + /* + * Set arch_freq_scale to a default value on all cpus + * This negates the effect of scaling + */ + for_each_possible_cpu(cpu) + per_cpu(arch_freq_scale, cpu) = SCHED_CAPACITY_SCALE; } static DECLARE_WORK(disable_freq_invariance_work, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index fc9008dbed33..7eec0226d56a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -3440,18 +3440,15 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var) { u32 ar; - if (var->unusable || !var->present) - ar = 1 << 16; - else { - ar = var->type & 15; - ar |= (var->s & 1) << 4; - ar |= (var->dpl & 3) << 5; - ar |= (var->present & 1) << 7; - ar |= (var->avl & 1) << 12; - ar |= (var->l & 1) << 13; - ar |= (var->db & 1) << 14; - ar |= (var->g & 1) << 15; - } + ar = var->type & 15; + ar |= (var->s & 1) << 4; + ar |= (var->dpl & 3) << 5; + ar |= (var->present & 1) << 7; + ar |= (var->avl & 1) << 12; + ar |= (var->l & 1) << 13; + ar |= (var->db & 1) << 14; + ar |= (var->g & 1) << 15; + ar |= (var->unusable || !var->present) << 16; return ar; } diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 65cec7bb6d96..8626869c4904 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -110,26 +110,6 @@ static bool nvidia_wmi_ec_supported(void) } #endif -static bool apple_gmux_backlight_present(void) -{ - struct acpi_device *adev; - struct device *dev; - - adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1); - if (!adev) - return false; - - dev = acpi_get_first_physical_node(adev); - if (!dev) - return false; - - /* - * drivers/platform/x86/apple-gmux.c only supports old style - * Apple GMUX with an IO-resource. - */ - return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL; -} - /* Force to use vendor driver when the ACPI device is known to be * buggy */ static int video_detect_force_vendor(const struct dmi_system_id *d) @@ -766,6 +746,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native) { static DEFINE_MUTEX(init_mutex); static bool nvidia_wmi_ec_present; + static bool apple_gmux_present; static bool native_available; static bool init_done; static long video_caps; @@ -779,6 +760,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native) ACPI_UINT32_MAX, find_video, NULL, &video_caps, NULL); nvidia_wmi_ec_present = nvidia_wmi_ec_supported(); + apple_gmux_present = apple_gmux_detect(NULL, NULL); init_done = true; } if (native) @@ -800,7 +782,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native) if (nvidia_wmi_ec_present) return acpi_backlight_nvidia_wmi_ec; - if (apple_gmux_backlight_present()) + if (apple_gmux_present) return acpi_backlight_apple_gmux; /* Use ACPI video if available, except when native should be preferred. */ diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index 878deb4880cd..0689e1510721 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c @@ -34,6 +34,9 @@ static DEFINE_MUTEX(device_ctls_mutex); static LIST_HEAD(edac_device_list); +/* Default workqueue processing interval on this instance, in msecs */ +#define DEFAULT_POLL_INTERVAL 1000 + #ifdef CONFIG_EDAC_DEBUG static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) { @@ -336,7 +339,7 @@ static void edac_device_workq_function(struct work_struct *work_req) * whole one second to save timers firing all over the period * between integral seconds */ - if (edac_dev->poll_msec == 1000) + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else edac_queue_work(&edac_dev->work, edac_dev->delay); @@ -366,7 +369,7 @@ static void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, * timers firing on sub-second basis, while they are happy * to fire together on the 1 second exactly */ - if (edac_dev->poll_msec == 1000) + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else edac_queue_work(&edac_dev->work, edac_dev->delay); @@ -400,7 +403,7 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, edac_dev->delay = msecs_to_jiffies(msec); /* See comment in edac_device_workq_setup() above */ - if (edac_dev->poll_msec == 1000) + if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL) edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else edac_mod_work(&edac_dev->work, edac_dev->delay); @@ -442,11 +445,7 @@ int edac_device_add_device(struct edac_device_ctl_info *edac_dev) /* This instance is NOW RUNNING */ edac_dev->op_state = OP_RUNNING_POLL; - /* - * enable workq processing on this instance, - * default = 1000 msec - */ - edac_device_workq_setup(edac_dev, 1000); + edac_device_workq_setup(edac_dev, edac_dev->poll_msec ?: DEFAULT_POLL_INTERVAL); } else { edac_dev->op_state = OP_RUNNING_INTERRUPT; } diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c index 97a27e42dd61..c45519f59dc1 100644 --- a/drivers/edac/qcom_edac.c +++ b/drivers/edac/qcom_edac.c @@ -252,7 +252,7 @@ clear: static int dump_syn_reg(struct edac_device_ctl_info *edev_ctl, int err_type, u32 bank) { - struct llcc_drv_data *drv = edev_ctl->pvt_info; + struct llcc_drv_data *drv = edev_ctl->dev->platform_data; int ret; ret = dump_syn_reg_values(drv, bank, err_type); @@ -289,7 +289,7 @@ static irqreturn_t llcc_ecc_irq_handler(int irq, void *edev_ctl) { struct edac_device_ctl_info *edac_dev_ctl = edev_ctl; - struct llcc_drv_data *drv = edac_dev_ctl->pvt_info; + struct llcc_drv_data *drv = edac_dev_ctl->dev->platform_data; irqreturn_t irq_rc = IRQ_NONE; u32 drp_error, trp_error, i; int ret; @@ -358,7 +358,6 @@ static int qcom_llcc_edac_probe(struct platform_device *pdev) edev_ctl->dev_name = dev_name(dev); edev_ctl->ctl_name = "llcc"; edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE; - edev_ctl->pvt_info = llcc_driv_data; rc = edac_device_add_device(edev_ctl); if (rc) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h index 4f40167ad61f..4f40167ad61f 100755..100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index c1a633ca1e6d..e315f669ec06 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -104,7 +104,7 @@ static const struct of_device_id ksz9477_dt_ids[] = { }, { .compatible = "microchip,ksz8563", - .data = &ksz_switch_chips[KSZ9893] + .data = &ksz_switch_chips[KSZ8563] }, { .compatible = "microchip,ksz9567", diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 0805f249fff2..c26b8597945b 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -356,7 +356,7 @@ static int adin1110_read_fifo(struct adin1110_port_priv *port_priv) if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) || (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST)) - rxb->offload_fwd_mark = 1; + rxb->offload_fwd_mark = port_priv->priv->forwarding; netif_rx(rxb); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 59debdc344a5..58747292521d 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11166,7 +11166,7 @@ static void tg3_reset_task(struct work_struct *work) rtnl_lock(); tg3_full_lock(tp, 0); - if (!netif_running(tp->dev)) { + if (tp->pcierr_recovery || !netif_running(tp->dev)) { tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); rtnl_unlock(); @@ -18101,6 +18101,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, netdev_info(netdev, "PCI I/O error detected\n"); + /* Want to make sure that the reset task doesn't run */ + tg3_reset_task_cancel(tp); + rtnl_lock(); /* Could be second call or maybe we don't have netdev yet */ @@ -18117,9 +18120,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, tg3_timer_stop(tp); - /* Want to make sure that the reset task doesn't run */ - tg3_reset_task_cancel(tp); - netif_device_detach(netdev); /* Clean up software state, even if MMIO is blocked */ diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index bf0190e1d2ea..00e2108f2ca4 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -450,7 +450,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, /* ring full, shall not happen because queue is stopped if full * below */ - netif_stop_queue(tx->adapter->netdev); + netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); spin_unlock_irqrestore(&tx->lock, flags); @@ -493,7 +493,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { /* ring can get full with next frame */ - netif_stop_queue(tx->adapter->netdev); + netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); } spin_unlock_irqrestore(&tx->lock, flags); @@ -503,11 +503,14 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) { + struct tsnep_tx_entry *entry; + struct netdev_queue *nq; unsigned long flags; int budget = 128; - struct tsnep_tx_entry *entry; - int count; int length; + int count; + + nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); spin_lock_irqsave(&tx->lock, flags); @@ -564,8 +567,8 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) } while (likely(budget)); if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && - netif_queue_stopped(tx->adapter->netdev)) { - netif_wake_queue(tx->adapter->netdev); + netif_tx_queue_stopped(nq)) { + netif_tx_wake_queue(nq); } spin_unlock_irqrestore(&tx->lock, flags); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 644f3c963730..2341597408d1 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3191,7 +3191,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) for (q = 0; q < fep->num_rx_queues; q++) { rxq = fep->rx_queue[q]; for (i = 0; i < rxq->bd.ring_size; i++) - page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page); + page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false); for (i = 0; i < XDP_STATS_TOTAL; i++) rxq->stats[i] = 0; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 0d1bab4ac1b0..2a9f1eeeb701 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -249,6 +249,7 @@ struct iavf_cloud_filter { /* board specific private data structure */ struct iavf_adapter { + struct workqueue_struct *wq; struct work_struct reset_task; struct work_struct adminq_task; struct delayed_work client_task; @@ -459,7 +460,6 @@ struct iavf_device { /* needed by iavf_ethtool.c */ extern char iavf_driver_name[]; -extern struct workqueue_struct *iavf_wq; static inline const char *iavf_state_str(enum iavf_state_t state) { diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index d79ead5e8d0c..6f171d1d85b7 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -532,7 +532,7 @@ static int iavf_set_priv_flags(struct net_device *netdev, u32 flags) if (changed_flags & IAVF_FLAG_LEGACY_RX) { if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); } } @@ -672,7 +672,7 @@ static int iavf_set_ringparam(struct net_device *netdev, if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); } return 0; @@ -1433,7 +1433,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx adapter->aq_required |= IAVF_FLAG_AQ_ADD_FDIR_FILTER; spin_unlock_bh(&adapter->fdir_fltr_lock); - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); ret: if (err && fltr) @@ -1474,7 +1474,7 @@ static int iavf_del_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx spin_unlock_bh(&adapter->fdir_fltr_lock); if (fltr && fltr->state == IAVF_FDIR_FLTR_DEL_REQUEST) - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); return err; } @@ -1658,7 +1658,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->adv_rss_lock); if (!err) - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); mutex_unlock(&adapter->crit_lock); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index adc02adef83a..4b09785d2147 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -49,7 +49,6 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver") MODULE_LICENSE("GPL v2"); static const struct net_device_ops iavf_netdev_ops; -struct workqueue_struct *iavf_wq; int iavf_status_to_errno(enum iavf_status status) { @@ -277,7 +276,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) if (!(adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); } } @@ -291,7 +290,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter) void iavf_schedule_request_stats(struct iavf_adapter *adapter) { adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS; - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } /** @@ -411,7 +410,7 @@ static irqreturn_t iavf_msix_aq(int irq, void *data) if (adapter->state != __IAVF_REMOVE) /* schedule work on the private workqueue */ - queue_work(iavf_wq, &adapter->adminq_task); + queue_work(adapter->wq, &adapter->adminq_task); return IRQ_HANDLED; } @@ -1034,7 +1033,7 @@ int iavf_replace_primary_mac(struct iavf_adapter *adapter, /* schedule the watchdog task to immediately process the request */ if (f) { - queue_work(iavf_wq, &adapter->watchdog_task.work); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); return 0; } return -ENOMEM; @@ -1257,7 +1256,7 @@ static void iavf_up_complete(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES; if (CLIENT_ENABLED(adapter)) adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN; - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } /** @@ -1414,7 +1413,7 @@ void iavf_down(struct iavf_adapter *adapter) adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES; } - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } /** @@ -2248,7 +2247,7 @@ iavf_set_vlan_offload_features(struct iavf_adapter *adapter, if (aq_required) { adapter->aq_required |= aq_required; - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0); } } @@ -2693,6 +2692,15 @@ static void iavf_watchdog_task(struct work_struct *work) goto restart_watchdog; } + if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES) && + adapter->netdev_registered && + !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section) && + rtnl_trylock()) { + netdev_update_features(adapter->netdev); + rtnl_unlock(); + adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; + } + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) iavf_change_state(adapter, __IAVF_COMM_FAILED); @@ -2700,7 +2708,7 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; mutex_unlock(&adapter->crit_lock); - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); return; } @@ -2708,31 +2716,31 @@ static void iavf_watchdog_task(struct work_struct *work) case __IAVF_STARTUP: iavf_startup(adapter); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_VERSION_CHECK: iavf_init_version_check(adapter); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(30)); return; case __IAVF_INIT_GET_RESOURCES: iavf_init_get_resources(adapter); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_EXTENDED_CAPS: iavf_init_process_extended_caps(adapter); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_CONFIG_ADAPTER: iavf_init_config_adapter(adapter); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(1)); return; case __IAVF_INIT_FAILED: @@ -2751,14 +2759,14 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED; iavf_shutdown_adminq(hw); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, (5 * HZ)); return; } /* Try again from failed step*/ iavf_change_state(adapter, adapter->last_state); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ); + queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ); return; case __IAVF_COMM_FAILED: if (test_bit(__IAVF_IN_REMOVE_TASK, @@ -2789,13 +2797,14 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(10)); return; case __IAVF_RESETTING: mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); + queue_delayed_work(adapter->wq, &adapter->watchdog_task, + HZ * 2); return; case __IAVF_DOWN: case __IAVF_DOWN_PENDING: @@ -2834,9 +2843,9 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; dev_err(&adapter->pdev->dev, "Hardware reset detected\n"); - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); mutex_unlock(&adapter->crit_lock); - queue_delayed_work(iavf_wq, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, HZ * 2); return; } @@ -2845,12 +2854,13 @@ static void iavf_watchdog_task(struct work_struct *work) mutex_unlock(&adapter->crit_lock); restart_watchdog: if (adapter->state >= __IAVF_DOWN) - queue_work(iavf_wq, &adapter->adminq_task); + queue_work(adapter->wq, &adapter->adminq_task); if (adapter->aq_required) - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(20)); else - queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); + queue_delayed_work(adapter->wq, &adapter->watchdog_task, + HZ * 2); } /** @@ -2952,7 +2962,7 @@ static void iavf_reset_task(struct work_struct *work) */ if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state != __IAVF_REMOVE) - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); goto reset_finish; } @@ -3116,7 +3126,7 @@ continue_reset: bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID); bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID); - mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2); + mod_delayed_work(adapter->wq, &adapter->watchdog_task, 2); /* We were running when the reset started, so we need to restore some * state here. @@ -3208,7 +3218,7 @@ static void iavf_adminq_task(struct work_struct *work) if (adapter->state == __IAVF_REMOVE) return; - queue_work(iavf_wq, &adapter->adminq_task); + queue_work(adapter->wq, &adapter->adminq_task); goto out; } @@ -3232,24 +3242,6 @@ static void iavf_adminq_task(struct work_struct *work) } while (pending); mutex_unlock(&adapter->crit_lock); - if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) { - if (adapter->netdev_registered || - !test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) { - struct net_device *netdev = adapter->netdev; - - rtnl_lock(); - netdev_update_features(netdev); - rtnl_unlock(); - /* Request VLAN offload settings */ - if (VLAN_V2_ALLOWED(adapter)) - iavf_set_vlan_offload_features - (adapter, 0, netdev->features); - - iavf_set_queue_vlan_tag_loc(adapter); - } - - adapter->flags &= ~IAVF_FLAG_SETUP_NETDEV_FEATURES; - } if ((adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || adapter->state == __IAVF_RESETTING) @@ -4349,7 +4341,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu) if (netif_running(netdev)) { adapter->flags |= IAVF_FLAG_RESET_NEEDED; - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); } return 0; @@ -4898,6 +4890,13 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw = &adapter->hw; hw->back = adapter; + adapter->wq = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, + iavf_driver_name); + if (!adapter->wq) { + err = -ENOMEM; + goto err_alloc_wq; + } + adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1; iavf_change_state(adapter, __IAVF_STARTUP); @@ -4942,7 +4941,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&adapter->adminq_task, iavf_adminq_task); INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task); INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task); - queue_delayed_work(iavf_wq, &adapter->watchdog_task, + queue_delayed_work(adapter->wq, &adapter->watchdog_task, msecs_to_jiffies(5 * (pdev->devfn & 0x07))); /* Setup the wait queue for indicating transition to down status */ @@ -4954,6 +4953,8 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; err_ioremap: + destroy_workqueue(adapter->wq); +err_alloc_wq: free_netdev(netdev); err_alloc_etherdev: pci_disable_pcie_error_reporting(pdev); @@ -5023,7 +5024,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d) return err; } - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); netif_device_attach(adapter->netdev); @@ -5170,6 +5171,8 @@ static void iavf_remove(struct pci_dev *pdev) } spin_unlock_bh(&adapter->adv_rss_lock); + destroy_workqueue(adapter->wq); + free_netdev(netdev); pci_disable_pcie_error_reporting(pdev); @@ -5196,24 +5199,11 @@ static struct pci_driver iavf_driver = { **/ static int __init iavf_init_module(void) { - int ret; - pr_info("iavf: %s\n", iavf_driver_string); pr_info("%s\n", iavf_copyright); - iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1, - iavf_driver_name); - if (!iavf_wq) { - pr_err("%s: Failed to create workqueue\n", iavf_driver_name); - return -ENOMEM; - } - - ret = pci_register_driver(&iavf_driver); - if (ret) - destroy_workqueue(iavf_wq); - - return ret; + return pci_register_driver(&iavf_driver); } module_init(iavf_init_module); @@ -5227,7 +5217,6 @@ module_init(iavf_init_module); static void __exit iavf_exit_module(void) { pci_unregister_driver(&iavf_driver); - destroy_workqueue(iavf_wq); } module_exit(iavf_exit_module); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 24a701fd140e..365ca0c710c4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1952,7 +1952,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, if (!(adapter->flags & IAVF_FLAG_RESET_PENDING)) { adapter->flags |= IAVF_FLAG_RESET_PENDING; dev_info(&adapter->pdev->dev, "Scheduling reset task\n"); - queue_work(iavf_wq, &adapter->reset_task); + queue_work(adapter->wq, &adapter->reset_task); } break; default: @@ -2226,6 +2226,14 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, iavf_process_config(adapter); adapter->flags |= IAVF_FLAG_SETUP_NETDEV_FEATURES; + + /* Request VLAN offload settings */ + if (VLAN_V2_ALLOWED(adapter)) + iavf_set_vlan_offload_features(adapter, 0, + netdev->features); + + iavf_set_queue_vlan_tag_loc(adapter); + was_mac_changed = !ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 94aa834cd9a6..a596e07b3ce9 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -3235,9 +3235,6 @@ int ice_vsi_release(struct ice_vsi *vsi) } } - if (vsi->type == ICE_VSI_PF) - ice_devlink_destroy_pf_port(pf); - if (vsi->type == ICE_VSI_VF && vsi->agg_node && vsi->agg_node->valid) vsi->agg_node->num_vsis--; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index a9a7f8b52140..237ede2cffb0 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4590,7 +4590,7 @@ static void ice_print_wake_reason(struct ice_pf *pf) } /** - * ice_register_netdev - register netdev and devlink port + * ice_register_netdev - register netdev * @pf: pointer to the PF struct */ static int ice_register_netdev(struct ice_pf *pf) @@ -4602,11 +4602,6 @@ static int ice_register_netdev(struct ice_pf *pf) if (!vsi || !vsi->netdev) return -EIO; - err = ice_devlink_create_pf_port(pf); - if (err) - goto err_devlink_create; - - SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); err = register_netdev(vsi->netdev); if (err) goto err_register_netdev; @@ -4617,8 +4612,6 @@ static int ice_register_netdev(struct ice_pf *pf) return 0; err_register_netdev: - ice_devlink_destroy_pf_port(pf); -err_devlink_create: free_netdev(vsi->netdev); vsi->netdev = NULL; clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); @@ -4636,6 +4629,7 @@ static int ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) { struct device *dev = &pdev->dev; + struct ice_vsi *vsi; struct ice_pf *pf; struct ice_hw *hw; int i, err; @@ -4918,6 +4912,18 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) pcie_print_link_status(pf->pdev); probe_done: + err = ice_devlink_create_pf_port(pf); + if (err) + goto err_create_pf_port; + + vsi = ice_get_main_vsi(pf); + if (!vsi || !vsi->netdev) { + err = -EINVAL; + goto err_netdev_reg; + } + + SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); + err = ice_register_netdev(pf); if (err) goto err_netdev_reg; @@ -4955,6 +4961,8 @@ err_init_aux_unroll: err_devlink_reg_param: ice_devlink_unregister_params(pf); err_netdev_reg: + ice_devlink_destroy_pf_port(pf); +err_create_pf_port: err_send_version_unroll: ice_vsi_release_all(pf); err_alloc_sw_unroll: @@ -5083,6 +5091,7 @@ static void ice_remove(struct pci_dev *pdev) ice_setup_mc_magic_wake(pf); ice_vsi_release_all(pf); mutex_destroy(&(&pf->hw)->fdir_fltr_lock); + ice_devlink_destroy_pf_port(pf); ice_set_wake(pf); ice_free_irq_msix_misc(pf); ice_for_each_vsi(pf, i) { diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index e708c2d04983..b144f2237748 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1259,13 +1259,20 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) gic->handler = NULL; gic->arg = NULL; + if (!i) + snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_hwc@pci:%s", + pci_name(pdev)); + else + snprintf(gic->name, MANA_IRQ_NAME_SZ, "mana_q%d@pci:%s", + i - 1, pci_name(pdev)); + irq = pci_irq_vector(pdev, i); if (irq < 0) { err = irq; goto free_mask; } - err = request_irq(irq, mana_gd_intr, 0, "mana_intr", gic); + err = request_irq(irq, mana_gd_intr, 0, gic->name, gic); if (err) goto free_mask; irq_set_affinity_and_hint(irq, req_mask); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index b4e0fc7f65bd..0f54849a3823 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1101,14 +1101,14 @@ static void ravb_error_interrupt(struct net_device *ndev) ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); if (eis & EIS_QFS) { ris2 = ravb_read(ndev, RIS2); - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED), RIS2); /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF0) priv->stats[RAVB_BE].rx_over_errors++; - /* Receive Descriptor Empty int */ + /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF1) priv->stats[RAVB_NC].rx_over_errors++; @@ -2973,6 +2973,9 @@ static int __maybe_unused ravb_suspend(struct device *dev) else ret = ravb_close(ndev); + if (priv->info->ccc_gac) + ravb_ptp_stop(ndev); + return ret; } @@ -3011,6 +3014,9 @@ static int __maybe_unused ravb_resume(struct device *dev) /* Restore descriptor base address table */ ravb_write(ndev, priv->desc_bat_dma, DBAT); + if (priv->info->ccc_gac) + ravb_ptp_init(ndev, priv->pdev); + if (netif_running(ndev)) { if (priv->wol_enabled) { ret = ravb_wol_restore(ndev); diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 6441892636db..2370c7797a0a 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1074,8 +1074,11 @@ static struct device_node *rswitch_get_port_node(struct rswitch_device *rdev) port = NULL; goto out; } - if (index == rdev->etha->index) + if (index == rdev->etha->index) { + if (!of_device_is_available(port)) + port = NULL; break; + } } out: @@ -1106,7 +1109,7 @@ static int rswitch_etha_get_params(struct rswitch_device *rdev) port = rswitch_get_port_node(rdev); if (!port) - return -ENODEV; + return 0; /* ignored */ err = of_get_phy_mode(port, &rdev->etha->phy_interface); of_node_put(port); @@ -1324,13 +1327,13 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv) { int i, err; - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + rswitch_for_each_enabled_port(priv, i) { err = rswitch_ether_port_init_one(priv->rdev[i]); if (err) goto err_init_one; } - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + rswitch_for_each_enabled_port(priv, i) { err = rswitch_serdes_init(priv->rdev[i]); if (err) goto err_serdes; @@ -1339,12 +1342,12 @@ static int rswitch_ether_port_init_all(struct rswitch_private *priv) return 0; err_serdes: - for (i--; i >= 0; i--) + rswitch_for_each_enabled_port_continue_reverse(priv, i) rswitch_serdes_deinit(priv->rdev[i]); i = RSWITCH_NUM_PORTS; err_init_one: - for (i--; i >= 0; i--) + rswitch_for_each_enabled_port_continue_reverse(priv, i) rswitch_ether_port_deinit_one(priv->rdev[i]); return err; @@ -1608,6 +1611,7 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index) netif_napi_add(ndev, &rdev->napi, rswitch_poll); port = rswitch_get_port_node(rdev); + rdev->disabled = !port; err = of_get_ethdev_address(port, ndev); of_node_put(port); if (err) { @@ -1707,16 +1711,16 @@ static int rswitch_init(struct rswitch_private *priv) if (err) goto err_ether_port_init_all; - for (i = 0; i < RSWITCH_NUM_PORTS; i++) { + rswitch_for_each_enabled_port(priv, i) { err = register_netdev(priv->rdev[i]->ndev); if (err) { - for (i--; i >= 0; i--) + rswitch_for_each_enabled_port_continue_reverse(priv, i) unregister_netdev(priv->rdev[i]->ndev); goto err_register_netdev; } } - for (i = 0; i < RSWITCH_NUM_PORTS; i++) + rswitch_for_each_enabled_port(priv, i) netdev_info(priv->rdev[i]->ndev, "MAC address %pM\n", priv->rdev[i]->ndev->dev_addr); diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index edbdd1b98d3d..49efb0f31c77 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -13,6 +13,17 @@ #define RSWITCH_MAX_NUM_QUEUES 128 #define RSWITCH_NUM_PORTS 3 +#define rswitch_for_each_enabled_port(priv, i) \ + for (i = 0; i < RSWITCH_NUM_PORTS; i++) \ + if (priv->rdev[i]->disabled) \ + continue; \ + else + +#define rswitch_for_each_enabled_port_continue_reverse(priv, i) \ + for (i--; i >= 0; i--) \ + if (priv->rdev[i]->disabled) \ + continue; \ + else #define TX_RING_SIZE 1024 #define RX_RING_SIZE 1024 @@ -938,6 +949,7 @@ struct rswitch_device { struct rswitch_gwca_queue *tx_queue; struct rswitch_gwca_queue *rx_queue; u8 ts_tag; + bool disabled; int port; struct rswitch_etha *etha; diff --git a/drivers/net/mdio/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c index 4a2e94faf57e..c4542ecf5623 100644 --- a/drivers/net/mdio/mdio-mux-meson-g12a.c +++ b/drivers/net/mdio/mdio-mux-meson-g12a.c @@ -4,6 +4,7 @@ */ #include <linux/bitfield.h> +#include <linux/delay.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/device.h> @@ -150,6 +151,7 @@ static const struct clk_ops g12a_ephy_pll_ops = { static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv) { + u32 value; int ret; /* Enable the phy clock */ @@ -163,18 +165,25 @@ static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv) /* Initialize ephy control */ writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0); - writel(FIELD_PREP(PHY_CNTL1_ST_MODE, 3) | - FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) | - FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) | - PHY_CNTL1_CLK_EN | - PHY_CNTL1_CLKFREQ | - PHY_CNTL1_PHY_ENB, - priv->regs + ETH_PHY_CNTL1); + + /* Make sure we get a 0 -> 1 transition on the enable bit */ + value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) | + FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) | + FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) | + PHY_CNTL1_CLK_EN | + PHY_CNTL1_CLKFREQ; + writel(value, priv->regs + ETH_PHY_CNTL1); writel(PHY_CNTL2_USE_INTERNAL | PHY_CNTL2_SMI_SRC_MAC | PHY_CNTL2_RX_CLK_EPHY, priv->regs + ETH_PHY_CNTL2); + value |= PHY_CNTL1_PHY_ENB; + writel(value, priv->regs + ETH_PHY_CNTL1); + + /* The phy needs a bit of time to power up */ + mdelay(10); + return 0; } diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index b80a9b74662b..1deb61b22bc7 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -1576,7 +1576,6 @@ static int arm_cmn_event_init(struct perf_event *event) hw->dn++; continue; } - hw->dtcs_used |= arm_cmn_node_to_xp(cmn, dn)->dtc; hw->num_dns++; if (bynodeid) break; @@ -1589,6 +1588,12 @@ static int arm_cmn_event_init(struct perf_event *event) nodeid, nid.x, nid.y, nid.port, nid.dev, type); return -EINVAL; } + /* + * Keep assuming non-cycles events count in all DTC domains; turns out + * it's hard to make a worthwhile optimisation around this, short of + * going all-in with domain-local counter allocation as well. + */ + hw->dtcs_used = (1U << cmn->num_dtcs) - 1; return arm_cmn_validate_group(cmn, event); } diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c index 8d924986381b..3cbb01ec10e3 100644 --- a/drivers/platform/x86/amd/pmc.c +++ b/drivers/platform/x86/amd/pmc.c @@ -22,6 +22,7 @@ #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/rtc.h> +#include <linux/serio.h> #include <linux/suspend.h> #include <linux/seq_file.h> #include <linux/uaccess.h> @@ -160,6 +161,10 @@ static bool enable_stb; module_param(enable_stb, bool, 0644); MODULE_PARM_DESC(enable_stb, "Enable the STB debug mechanism"); +static bool disable_workarounds; +module_param(disable_workarounds, bool, 0644); +MODULE_PARM_DESC(disable_workarounds, "Disable workarounds for platform bugs"); + static struct amd_pmc_dev pmc; static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret); static int amd_pmc_read_stb(struct amd_pmc_dev *dev, u32 *buf); @@ -653,6 +658,33 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev) return -EINVAL; } +static int amd_pmc_czn_wa_irq1(struct amd_pmc_dev *pdev) +{ + struct device *d; + int rc; + + if (!pdev->major) { + rc = amd_pmc_get_smu_version(pdev); + if (rc) + return rc; + } + + if (pdev->major > 64 || (pdev->major == 64 && pdev->minor > 65)) + return 0; + + d = bus_find_device_by_name(&serio_bus, NULL, "serio0"); + if (!d) + return 0; + if (device_may_wakeup(d)) { + dev_info_once(d, "Disabling IRQ1 wakeup source to avoid platform firmware bug\n"); + disable_irq_wake(1); + device_set_wakeup_enable(d, false); + } + put_device(d); + + return 0; +} + static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg) { struct rtc_device *rtc_device; @@ -715,8 +747,8 @@ static void amd_pmc_s2idle_prepare(void) /* Reset and Start SMU logging - to monitor the s0i3 stats */ amd_pmc_setup_smu_logging(pdev); - /* Activate CZN specific RTC functionality */ - if (pdev->cpu_id == AMD_CPU_ID_CZN) { + /* Activate CZN specific platform bug workarounds */ + if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) { rc = amd_pmc_verify_czn_rtc(pdev, &arg); if (rc) { dev_err(pdev->dev, "failed to set RTC: %d\n", rc); @@ -782,6 +814,25 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = { .check = amd_pmc_s2idle_check, .restore = amd_pmc_s2idle_restore, }; + +static int __maybe_unused amd_pmc_suspend_handler(struct device *dev) +{ + struct amd_pmc_dev *pdev = dev_get_drvdata(dev); + + if (pdev->cpu_id == AMD_CPU_ID_CZN && !disable_workarounds) { + int rc = amd_pmc_czn_wa_irq1(pdev); + + if (rc) { + dev_err(pdev->dev, "failed to adjust keyboard wakeup: %d\n", rc); + return rc; + } + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(amd_pmc_pm, amd_pmc_suspend_handler, NULL); + #endif static const struct pci_device_id pmc_pci_ids[] = { @@ -980,6 +1031,9 @@ static struct platform_driver amd_pmc_driver = { .name = "amd_pmc", .acpi_match_table = amd_pmc_acpi_ids, .dev_groups = pmc_groups, +#ifdef CONFIG_SUSPEND + .pm = &amd_pmc_pm, +#endif }, .probe = amd_pmc_probe, .remove = amd_pmc_remove, diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c index ca33df7ea550..9333f82cfa8a 100644 --- a/drivers/platform/x86/apple-gmux.c +++ b/drivers/platform/x86/apple-gmux.c @@ -64,29 +64,6 @@ struct apple_gmux_data { static struct apple_gmux_data *apple_gmux_data; -/* - * gmux port offsets. Many of these are not yet used, but may be in the - * future, and it's useful to have them documented here anyhow. - */ -#define GMUX_PORT_VERSION_MAJOR 0x04 -#define GMUX_PORT_VERSION_MINOR 0x05 -#define GMUX_PORT_VERSION_RELEASE 0x06 -#define GMUX_PORT_SWITCH_DISPLAY 0x10 -#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11 -#define GMUX_PORT_INTERRUPT_ENABLE 0x14 -#define GMUX_PORT_INTERRUPT_STATUS 0x16 -#define GMUX_PORT_SWITCH_DDC 0x28 -#define GMUX_PORT_SWITCH_EXTERNAL 0x40 -#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41 -#define GMUX_PORT_DISCRETE_POWER 0x50 -#define GMUX_PORT_MAX_BRIGHTNESS 0x70 -#define GMUX_PORT_BRIGHTNESS 0x74 -#define GMUX_PORT_VALUE 0xc2 -#define GMUX_PORT_READ 0xd0 -#define GMUX_PORT_WRITE 0xd4 - -#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4) - #define GMUX_INTERRUPT_ENABLE 0xff #define GMUX_INTERRUPT_DISABLE 0x00 @@ -249,23 +226,6 @@ static void gmux_write32(struct apple_gmux_data *gmux_data, int port, gmux_pio_write32(gmux_data, port, val); } -static bool gmux_is_indexed(struct apple_gmux_data *gmux_data) -{ - u16 val; - - outb(0xaa, gmux_data->iostart + 0xcc); - outb(0x55, gmux_data->iostart + 0xcd); - outb(0x00, gmux_data->iostart + 0xce); - - val = inb(gmux_data->iostart + 0xcc) | - (inb(gmux_data->iostart + 0xcd) << 8); - - if (val == 0x55aa) - return true; - - return false; -} - /** * DOC: Backlight control * @@ -605,60 +565,43 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) int ret = -ENXIO; acpi_status status; unsigned long long gpe; + bool indexed = false; + u32 version; if (apple_gmux_data) return -EBUSY; + if (!apple_gmux_detect(pnp, &indexed)) { + pr_info("gmux device not present\n"); + return -ENODEV; + } + gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL); if (!gmux_data) return -ENOMEM; pnp_set_drvdata(pnp, gmux_data); res = pnp_get_resource(pnp, IORESOURCE_IO, 0); - if (!res) { - pr_err("Failed to find gmux I/O resource\n"); - goto err_free; - } - gmux_data->iostart = res->start; gmux_data->iolen = resource_size(res); - if (gmux_data->iolen < GMUX_MIN_IO_LEN) { - pr_err("gmux I/O region too small (%lu < %u)\n", - gmux_data->iolen, GMUX_MIN_IO_LEN); - goto err_free; - } - if (!request_region(gmux_data->iostart, gmux_data->iolen, "Apple gmux")) { pr_err("gmux I/O already in use\n"); goto err_free; } - /* - * Invalid version information may indicate either that the gmux - * device isn't present or that it's a new one that uses indexed - * io - */ - - ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR); - ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR); - ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE); - if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) { - if (gmux_is_indexed(gmux_data)) { - u32 version; - mutex_init(&gmux_data->index_lock); - gmux_data->indexed = true; - version = gmux_read32(gmux_data, - GMUX_PORT_VERSION_MAJOR); - ver_major = (version >> 24) & 0xff; - ver_minor = (version >> 16) & 0xff; - ver_release = (version >> 8) & 0xff; - } else { - pr_info("gmux device not present\n"); - ret = -ENODEV; - goto err_release; - } + if (indexed) { + mutex_init(&gmux_data->index_lock); + gmux_data->indexed = true; + version = gmux_read32(gmux_data, GMUX_PORT_VERSION_MAJOR); + ver_major = (version >> 24) & 0xff; + ver_minor = (version >> 16) & 0xff; + ver_release = (version >> 8) & 0xff; + } else { + ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR); + ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR); + ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE); } pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor, ver_release, (gmux_data->indexed ? "indexed" : "classic")); diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 104188d70988..1038dfdcdd32 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -225,6 +225,7 @@ struct asus_wmi { int tablet_switch_event_code; u32 tablet_switch_dev_id; + bool tablet_switch_inverted; enum fan_type fan_type; enum fan_type gpu_fan_type; @@ -493,6 +494,13 @@ static bool asus_wmi_dev_is_present(struct asus_wmi *asus, u32 dev_id) } /* Input **********************************************************************/ +static void asus_wmi_tablet_sw_report(struct asus_wmi *asus, bool value) +{ + input_report_switch(asus->inputdev, SW_TABLET_MODE, + asus->tablet_switch_inverted ? !value : value); + input_sync(asus->inputdev); +} + static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event_code) { struct device *dev = &asus->platform_device->dev; @@ -501,7 +509,7 @@ static void asus_wmi_tablet_sw_init(struct asus_wmi *asus, u32 dev_id, int event result = asus_wmi_get_devstate_simple(asus, dev_id); if (result >= 0) { input_set_capability(asus->inputdev, EV_SW, SW_TABLET_MODE); - input_report_switch(asus->inputdev, SW_TABLET_MODE, result); + asus_wmi_tablet_sw_report(asus, result); asus->tablet_switch_dev_id = dev_id; asus->tablet_switch_event_code = event_code; } else if (result == -ENODEV) { @@ -534,6 +542,7 @@ static int asus_wmi_input_init(struct asus_wmi *asus) case asus_wmi_no_tablet_switch: break; case asus_wmi_kbd_dock_devid: + asus->tablet_switch_inverted = true; asus_wmi_tablet_sw_init(asus, ASUS_WMI_DEVID_KBD_DOCK, NOTIFY_KBD_DOCK_CHANGE); break; case asus_wmi_lid_flip_devid: @@ -573,10 +582,8 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus) return; result = asus_wmi_get_devstate_simple(asus, asus->tablet_switch_dev_id); - if (result >= 0) { - input_report_switch(asus->inputdev, SW_TABLET_MODE, result); - input_sync(asus->inputdev); - } + if (result >= 0) + asus_wmi_tablet_sw_report(asus, result); } /* dGPU ********************************************************************/ diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c index 0a259a27459f..502783a7adb1 100644 --- a/drivers/platform/x86/dell/dell-wmi-base.c +++ b/drivers/platform/x86/dell/dell-wmi-base.c @@ -261,6 +261,9 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = { { KE_KEY, 0x57, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, 0x58, { KEY_BRIGHTNESSUP } }, + /*Speaker Mute*/ + { KE_KEY, 0x109, { KEY_MUTE} }, + /* Mic mute */ { KE_KEY, 0x150, { KEY_MICMUTE } }, diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c index 5e7e6659a849..322cfaeda17b 100644 --- a/drivers/platform/x86/gigabyte-wmi.c +++ b/drivers/platform/x86/gigabyte-wmi.c @@ -141,6 +141,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev) static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"), + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H WIFI-CF"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"), diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c index 0a99058be813..2ef201b625b3 100644 --- a/drivers/platform/x86/hp/hp-wmi.c +++ b/drivers/platform/x86/hp/hp-wmi.c @@ -90,6 +90,7 @@ enum hp_wmi_event_ids { HPWMI_PEAKSHIFT_PERIOD = 0x0F, HPWMI_BATTERY_CHARGE_PERIOD = 0x10, HPWMI_SANITIZATION_MODE = 0x17, + HPWMI_OMEN_KEY = 0x1D, HPWMI_SMART_EXPERIENCE_APP = 0x21, }; @@ -216,6 +217,8 @@ static const struct key_entry hp_wmi_keymap[] = { { KE_KEY, 0x213b, { KEY_INFO } }, { KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } }, { KE_KEY, 0x216a, { KEY_SETUP } }, + { KE_KEY, 0x21a5, { KEY_PROG2 } }, /* HP Omen Key */ + { KE_KEY, 0x21a7, { KEY_FN_ESC } }, { KE_KEY, 0x21a9, { KEY_TOUCHPAD_OFF } }, { KE_KEY, 0x121a9, { KEY_TOUCHPAD_ON } }, { KE_KEY, 0x231b, { KEY_HELP } }, @@ -548,7 +551,7 @@ static int __init hp_wmi_enable_hotkeys(void) static int hp_wmi_set_block(void *data, bool blocked) { - enum hp_wmi_radio r = (enum hp_wmi_radio) data; + enum hp_wmi_radio r = (long)data; int query = BIT(r + 8) | ((!blocked) << r); int ret; @@ -810,6 +813,7 @@ static void hp_wmi_notify(u32 value, void *context) case HPWMI_SMART_ADAPTER: break; case HPWMI_BEZEL_BUTTON: + case HPWMI_OMEN_KEY: key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY); if (key_code < 0) break; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index a95946800ae9..02860c32625e 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -10496,8 +10496,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof, if (err) goto unlock; } - } - if (dytc_capabilities & BIT(DYTC_FC_PSC)) { + } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output); if (err) goto unlock; @@ -10525,14 +10524,16 @@ static void dytc_profile_refresh(void) err = dytc_command(DYTC_CMD_MMC_GET, &output); else err = dytc_cql_command(DYTC_CMD_GET, &output); - } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) + funcmode = DYTC_FUNCTION_MMC; + } else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { err = dytc_command(DYTC_CMD_GET, &output); - + /* Check if we are PSC mode, or have AMT enabled */ + funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF; + } mutex_unlock(&dytc_mutex); if (err) return; - funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF; perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF; convert_dytc_to_profile(funcmode, perfmode, &profile); if (profile != dytc_current_profile) { diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 49cc18a87473..29a2865b8e2e 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -981,6 +981,9 @@ queue_rtpg: * * Returns true if and only if alua_rtpg_work() will be called asynchronously. * That function is responsible for calling @qdata->fn(). + * + * Context: may be called from atomic context (alua_check()) only if the caller + * holds an sdev reference. */ static bool alua_rtpg_queue(struct alua_port_group *pg, struct scsi_device *sdev, @@ -989,8 +992,6 @@ static bool alua_rtpg_queue(struct alua_port_group *pg, int start_queue = 0; unsigned long flags; - might_sleep(); - if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) return false; diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 4dbf51e2623a..f6da34850af9 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -5850,7 +5850,7 @@ static int hpsa_scsi_host_alloc(struct ctlr_info *h) { struct Scsi_Host *sh; - sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); + sh = scsi_host_alloc(&hpsa_driver_template, sizeof(struct ctlr_info)); if (sh == NULL) { dev_err(&h->pdev->dev, "scsi_host_alloc failed\n"); return -ENOMEM; diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 1d1cf641937c..0454d94e8cf0 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -849,7 +849,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost); - struct iscsi_session *session = tcp_sw_host->session; + struct iscsi_session *session; struct iscsi_conn *conn; struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; @@ -859,6 +859,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, switch (param) { case ISCSI_HOST_PARAM_IPADDRESS: + session = tcp_sw_host->session; if (!session) return -ENOTCONN; @@ -959,11 +960,13 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, if (!cls_session) goto remove_host; session = cls_session->dd_data; - tcp_sw_host = iscsi_host_priv(shost); - tcp_sw_host->session = session; if (iscsi_tcp_r2tpool_alloc(session)) goto remove_session; + + /* We are now fully setup so expose the session to sysfs. */ + tcp_sw_host = iscsi_host_priv(shost); + tcp_sw_host->session = session; return cls_session; remove_session: @@ -983,10 +986,17 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session) if (WARN_ON_ONCE(session->leadconn)) return; + iscsi_session_remove(cls_session); + /* + * Our get_host_param needs to access the session, so remove the + * host from sysfs before freeing the session to make sure userspace + * is no longer accessing the callout. + */ + iscsi_host_remove(shost, false); + iscsi_tcp_r2tpool_free(cls_session->dd_data); - iscsi_session_teardown(cls_session); - iscsi_host_remove(shost, false); + iscsi_session_free(cls_session); iscsi_host_free(shost); } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index ef2fc860257e..127f3d7f19dc 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -3104,17 +3104,32 @@ dec_session_count: } EXPORT_SYMBOL_GPL(iscsi_session_setup); -/** - * iscsi_session_teardown - destroy session, host, and cls_session - * @cls_session: iscsi session +/* + * issi_session_remove - Remove session from iSCSI class. */ -void iscsi_session_teardown(struct iscsi_cls_session *cls_session) +void iscsi_session_remove(struct iscsi_cls_session *cls_session) { struct iscsi_session *session = cls_session->dd_data; - struct module *owner = cls_session->transport->owner; struct Scsi_Host *shost = session->host; iscsi_remove_session(cls_session); + /* + * host removal only has to wait for its children to be removed from + * sysfs, and iscsi_tcp needs to do iscsi_host_remove before freeing + * the session, so drop the session count here. + */ + iscsi_host_dec_session_cnt(shost); +} +EXPORT_SYMBOL_GPL(iscsi_session_remove); + +/** + * iscsi_session_free - Free iscsi session and it's resources + * @cls_session: iscsi session + */ +void iscsi_session_free(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *session = cls_session->dd_data; + struct module *owner = cls_session->transport->owner; iscsi_pool_free(&session->cmdpool); kfree(session->password); @@ -3132,10 +3147,19 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) kfree(session->discovery_parent_type); iscsi_free_session(cls_session); - - iscsi_host_dec_session_cnt(shost); module_put(owner); } +EXPORT_SYMBOL_GPL(iscsi_session_free); + +/** + * iscsi_session_teardown - destroy session and cls_session + * @cls_session: iscsi session + */ +void iscsi_session_teardown(struct iscsi_cls_session *cls_session) +{ + iscsi_session_remove(cls_session); + iscsi_session_free(cls_session); +} EXPORT_SYMBOL_GPL(iscsi_session_teardown); /** diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index bac111456fa1..2b95b4550a63 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -73,8 +73,8 @@ static bool __target_check_io_state(struct se_cmd *se_cmd, { struct se_session *sess = se_cmd->se_sess; - assert_spin_locked(&sess->sess_cmd_lock); - WARN_ON_ONCE(!irqs_disabled()); + lockdep_assert_held(&sess->sess_cmd_lock); + /* * If command already reached CMD_T_COMPLETE state within * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index bda61be5f035..3a1c4d31e010 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1234,12 +1234,14 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba) * clock scaling is in progress */ ufshcd_scsi_block_requests(hba); + mutex_lock(&hba->wb_mutex); down_write(&hba->clk_scaling_lock); if (!hba->clk_scaling.is_allowed || ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) { ret = -EBUSY; up_write(&hba->clk_scaling_lock); + mutex_unlock(&hba->wb_mutex); ufshcd_scsi_unblock_requests(hba); goto out; } @@ -1251,12 +1253,16 @@ out: return ret; } -static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock) +static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err, bool scale_up) { - if (writelock) - up_write(&hba->clk_scaling_lock); - else - up_read(&hba->clk_scaling_lock); + up_write(&hba->clk_scaling_lock); + + /* Enable Write Booster if we have scaled up else disable it */ + if (ufshcd_enable_wb_if_scaling_up(hba) && !err) + ufshcd_wb_toggle(hba, scale_up); + + mutex_unlock(&hba->wb_mutex); + ufshcd_scsi_unblock_requests(hba); ufshcd_release(hba); } @@ -1273,7 +1279,6 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock) static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) { int ret = 0; - bool is_writelock = true; ret = ufshcd_clock_scaling_prepare(hba); if (ret) @@ -1302,15 +1307,8 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up) } } - /* Enable Write Booster if we have scaled up else disable it */ - if (ufshcd_enable_wb_if_scaling_up(hba)) { - downgrade_write(&hba->clk_scaling_lock); - is_writelock = false; - ufshcd_wb_toggle(hba, scale_up); - } - out_unprepare: - ufshcd_clock_scaling_unprepare(hba, is_writelock); + ufshcd_clock_scaling_unprepare(hba, ret, scale_up); return ret; } @@ -6066,9 +6064,11 @@ static void ufshcd_force_error_recovery(struct ufs_hba *hba) static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow) { + mutex_lock(&hba->wb_mutex); down_write(&hba->clk_scaling_lock); hba->clk_scaling.is_allowed = allow; up_write(&hba->clk_scaling_lock); + mutex_unlock(&hba->wb_mutex); } static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) @@ -9793,6 +9793,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) /* Initialize mutex for exception event control */ mutex_init(&hba->ee_ctrl_mutex); + mutex_init(&hba->wb_mutex); init_rwsem(&hba->clk_scaling_lock); ufshcd_init_clk_gating(hba); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 23c24fe98c00..2209372f236d 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1856,24 +1856,33 @@ unwind: * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when * hugetlbfs is in use. */ -static void vfio_test_domain_fgsp(struct vfio_domain *domain) +static void vfio_test_domain_fgsp(struct vfio_domain *domain, struct list_head *regions) { - struct page *pages; int ret, order = get_order(PAGE_SIZE * 2); + struct vfio_iova *region; + struct page *pages; + dma_addr_t start; pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); if (!pages) return; - ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, - IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE); - if (!ret) { - size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); + list_for_each_entry(region, regions, list) { + start = ALIGN(region->start, PAGE_SIZE * 2); + if (start >= region->end || (region->end - start < PAGE_SIZE * 2)) + continue; - if (unmapped == PAGE_SIZE) - iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE); - else - domain->fgsp = true; + ret = iommu_map(domain->domain, start, page_to_phys(pages), PAGE_SIZE * 2, + IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE); + if (!ret) { + size_t unmapped = iommu_unmap(domain->domain, start, PAGE_SIZE); + + if (unmapped == PAGE_SIZE) + iommu_unmap(domain->domain, start + PAGE_SIZE, PAGE_SIZE); + else + domain->fgsp = true; + } + break; } __free_pages(pages, order); @@ -2326,7 +2335,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, } } - vfio_test_domain_fgsp(domain); + vfio_test_domain_fgsp(domain, &iova_copy); /* replay mappings on new domains */ ret = vfio_iommu_replay(iommu, domain); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 69a1b8c6a2ec..a2f04a3808db 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -482,11 +482,12 @@ ext4_xattr_inode_verify_hashes(struct inode *ea_inode, */ e_hash = ext4_xattr_hash_entry_signed(entry->e_name, entry->e_name_len, &tmp_data, 1); - if (e_hash == entry->e_hash) - return 0; - /* Still no match - bad */ - return -EFSCORRUPTED; + if (e_hash != entry->e_hash) + return -EFSCORRUPTED; + + /* Let people know about old hash */ + pr_warn_once("ext4: filesystem with signed xattr name hash"); } return 0; } @@ -3096,7 +3097,7 @@ static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value, while (name_len--) { hash = (hash << NAME_HASH_SHIFT) ^ (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ - *name++; + (unsigned char)*name++; } while (value_count--) { hash = (hash << VALUE_HASH_SHIFT) ^ diff --git a/fs/fuse/acl.c b/fs/fuse/acl.c index a4850aee2639..ad670369955f 100644 --- a/fs/fuse/acl.c +++ b/fs/fuse/acl.c @@ -11,9 +11,10 @@ #include <linux/posix_acl.h> #include <linux/posix_acl_xattr.h> -struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu) +static struct posix_acl *__fuse_get_acl(struct fuse_conn *fc, + struct user_namespace *mnt_userns, + struct inode *inode, int type, bool rcu) { - struct fuse_conn *fc = get_fuse_conn(inode); int size; const char *name; void *value = NULL; @@ -25,7 +26,7 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu) if (fuse_is_bad(inode)) return ERR_PTR(-EIO); - if (!fc->posix_acl || fc->no_getxattr) + if (fc->no_getxattr) return NULL; if (type == ACL_TYPE_ACCESS) @@ -53,6 +54,46 @@ struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu) return acl; } +static inline bool fuse_no_acl(const struct fuse_conn *fc, + const struct inode *inode) +{ + /* + * Refuse interacting with POSIX ACLs for daemons that + * don't support FUSE_POSIX_ACL and are not mounted on + * the host to retain backwards compatibility. + */ + return !fc->posix_acl && (i_user_ns(inode) != &init_user_ns); +} + +struct posix_acl *fuse_get_acl(struct user_namespace *mnt_userns, + struct dentry *dentry, int type) +{ + struct inode *inode = d_inode(dentry); + struct fuse_conn *fc = get_fuse_conn(inode); + + if (fuse_no_acl(fc, inode)) + return ERR_PTR(-EOPNOTSUPP); + + return __fuse_get_acl(fc, mnt_userns, inode, type, false); +} + +struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu) +{ + struct fuse_conn *fc = get_fuse_conn(inode); + + /* + * FUSE daemons before FUSE_POSIX_ACL was introduced could get and set + * POSIX ACLs without them being used for permission checking by the + * vfs. Retain that behavior for backwards compatibility as there are + * filesystems that do all permission checking for acls in the daemon + * and not in the kernel. + */ + if (!fc->posix_acl) + return NULL; + + return __fuse_get_acl(fc, &init_user_ns, inode, type, rcu); +} + int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry, struct posix_acl *acl, int type) { @@ -64,7 +105,7 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry, if (fuse_is_bad(inode)) return -EIO; - if (!fc->posix_acl || fc->no_setxattr) + if (fc->no_setxattr || fuse_no_acl(fc, inode)) return -EOPNOTSUPP; if (type == ACL_TYPE_ACCESS) @@ -99,7 +140,13 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry, return ret; } - if (!vfsgid_in_group_p(i_gid_into_vfsgid(&init_user_ns, inode)) && + /* + * Fuse daemons without FUSE_POSIX_ACL never changed the passed + * through POSIX ACLs. Such daemons don't expect setgid bits to + * be stripped. + */ + if (fc->posix_acl && + !vfsgid_in_group_p(i_gid_into_vfsgid(&init_user_ns, inode)) && !capable_wrt_inode_uidgid(&init_user_ns, inode, CAP_FSETID)) extra_flags |= FUSE_SETXATTR_ACL_KILL_SGID; @@ -108,8 +155,15 @@ int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry, } else { ret = fuse_removexattr(inode, name); } - forget_all_cached_acls(inode); - fuse_invalidate_attr(inode); + + if (fc->posix_acl) { + /* + * Fuse daemons without FUSE_POSIX_ACL never cached POSIX ACLs + * and didn't invalidate attributes. Retain that behavior. + */ + forget_all_cached_acls(inode); + fuse_invalidate_attr(inode); + } return ret; } diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index cd1a071b625a..2725fb54328e 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1942,7 +1942,8 @@ static const struct inode_operations fuse_dir_inode_operations = { .permission = fuse_permission, .getattr = fuse_getattr, .listxattr = fuse_listxattr, - .get_inode_acl = fuse_get_acl, + .get_inode_acl = fuse_get_inode_acl, + .get_acl = fuse_get_acl, .set_acl = fuse_set_acl, .fileattr_get = fuse_fileattr_get, .fileattr_set = fuse_fileattr_set, @@ -1964,7 +1965,8 @@ static const struct inode_operations fuse_common_inode_operations = { .permission = fuse_permission, .getattr = fuse_getattr, .listxattr = fuse_listxattr, - .get_inode_acl = fuse_get_acl, + .get_inode_acl = fuse_get_inode_acl, + .get_acl = fuse_get_acl, .set_acl = fuse_set_acl, .fileattr_get = fuse_fileattr_get, .fileattr_set = fuse_fileattr_set, diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index c673faefdcb9..46797a171a84 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -1264,11 +1264,11 @@ ssize_t fuse_getxattr(struct inode *inode, const char *name, void *value, ssize_t fuse_listxattr(struct dentry *entry, char *list, size_t size); int fuse_removexattr(struct inode *inode, const char *name); extern const struct xattr_handler *fuse_xattr_handlers[]; -extern const struct xattr_handler *fuse_acl_xattr_handlers[]; -extern const struct xattr_handler *fuse_no_acl_xattr_handlers[]; struct posix_acl; -struct posix_acl *fuse_get_acl(struct inode *inode, int type, bool rcu); +struct posix_acl *fuse_get_inode_acl(struct inode *inode, int type, bool rcu); +struct posix_acl *fuse_get_acl(struct user_namespace *mnt_userns, + struct dentry *dentry, int type); int fuse_set_acl(struct user_namespace *mnt_userns, struct dentry *dentry, struct posix_acl *acl, int type); diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 6b3beda16c1b..de9b9ec5ce81 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -311,7 +311,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, fuse_dax_dontcache(inode, attr->flags); } -static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr) +static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr, + struct fuse_conn *fc) { inode->i_mode = attr->mode & S_IFMT; inode->i_size = attr->size; @@ -333,6 +334,12 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr) new_decode_dev(attr->rdev)); } else BUG(); + /* + * Ensure that we don't cache acls for daemons without FUSE_POSIX_ACL + * so they see the exact same behavior as before. + */ + if (!fc->posix_acl) + inode->i_acl = inode->i_default_acl = ACL_DONT_CACHE; } static int fuse_inode_eq(struct inode *inode, void *_nodeidp) @@ -372,7 +379,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid, if (!inode) return NULL; - fuse_init_inode(inode, attr); + fuse_init_inode(inode, attr, fc); get_fuse_inode(inode)->nodeid = nodeid; inode->i_flags |= S_AUTOMOUNT; goto done; @@ -388,7 +395,7 @@ retry: if (!fc->writeback_cache || !S_ISREG(attr->mode)) inode->i_flags |= S_NOCMTIME; inode->i_generation = generation; - fuse_init_inode(inode, attr); + fuse_init_inode(inode, attr, fc); unlock_new_inode(inode); } else if (fuse_stale_inode(inode, generation, attr)) { /* nodeid was reused, any I/O on the old inode should fail */ @@ -1174,7 +1181,6 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, if ((flags & FUSE_POSIX_ACL)) { fc->default_permissions = 1; fc->posix_acl = 1; - fm->sb->s_xattr = fuse_acl_xattr_handlers; } if (flags & FUSE_CACHE_SYMLINKS) fc->cache_symlinks = 1; @@ -1420,13 +1426,6 @@ static void fuse_sb_defaults(struct super_block *sb) if (sb->s_user_ns != &init_user_ns) sb->s_iflags |= SB_I_UNTRUSTED_MOUNTER; sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION); - - /* - * If we are not in the initial user namespace posix - * acls must be translated. - */ - if (sb->s_user_ns != &init_user_ns) - sb->s_xattr = fuse_no_acl_xattr_handlers; } static int fuse_fill_super_submount(struct super_block *sb, diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c index 0d3e7177fce0..9fe571ab569e 100644 --- a/fs/fuse/xattr.c +++ b/fs/fuse/xattr.c @@ -203,27 +203,6 @@ static int fuse_xattr_set(const struct xattr_handler *handler, return fuse_setxattr(inode, name, value, size, flags, 0); } -static bool no_xattr_list(struct dentry *dentry) -{ - return false; -} - -static int no_xattr_get(const struct xattr_handler *handler, - struct dentry *dentry, struct inode *inode, - const char *name, void *value, size_t size) -{ - return -EOPNOTSUPP; -} - -static int no_xattr_set(const struct xattr_handler *handler, - struct user_namespace *mnt_userns, - struct dentry *dentry, struct inode *nodee, - const char *name, const void *value, - size_t size, int flags) -{ - return -EOPNOTSUPP; -} - static const struct xattr_handler fuse_xattr_handler = { .prefix = "", .get = fuse_xattr_get, @@ -234,33 +213,3 @@ const struct xattr_handler *fuse_xattr_handlers[] = { &fuse_xattr_handler, NULL }; - -const struct xattr_handler *fuse_acl_xattr_handlers[] = { - &posix_acl_access_xattr_handler, - &posix_acl_default_xattr_handler, - &fuse_xattr_handler, - NULL -}; - -static const struct xattr_handler fuse_no_acl_access_xattr_handler = { - .name = XATTR_NAME_POSIX_ACL_ACCESS, - .flags = ACL_TYPE_ACCESS, - .list = no_xattr_list, - .get = no_xattr_get, - .set = no_xattr_set, -}; - -static const struct xattr_handler fuse_no_acl_default_xattr_handler = { - .name = XATTR_NAME_POSIX_ACL_DEFAULT, - .flags = ACL_TYPE_ACCESS, - .list = no_xattr_list, - .get = no_xattr_get, - .set = no_xattr_set, -}; - -const struct xattr_handler *fuse_no_acl_xattr_handlers[] = { - &fuse_no_acl_access_xattr_handler, - &fuse_no_acl_default_xattr_handler, - &fuse_xattr_handler, - NULL -}; diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 723639376ae2..61323deb80bc 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -80,6 +80,15 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd) brelse(bd->bd_bh); } +static int __gfs2_writepage(struct page *page, struct writeback_control *wbc, + void *data) +{ + struct address_space *mapping = data; + int ret = mapping->a_ops->writepage(page, wbc); + mapping_set_error(mapping, ret); + return ret; +} + /** * gfs2_ail1_start_one - Start I/O on a transaction * @sdp: The superblock @@ -131,7 +140,7 @@ __acquires(&sdp->sd_ail_lock) if (!mapping) continue; spin_unlock(&sdp->sd_ail_lock); - ret = filemap_fdatawrite_wbc(mapping, wbc); + ret = write_cache_pages(mapping, wbc, __gfs2_writepage, mapping); if (need_resched()) { blk_finish_plug(plug); cond_resched(); diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index 0ef070349014..c0950edb26b0 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -662,6 +662,39 @@ static struct shrinker nfsd_file_shrinker = { }; /** + * nfsd_file_cond_queue - conditionally unhash and queue a nfsd_file + * @nf: nfsd_file to attempt to queue + * @dispose: private list to queue successfully-put objects + * + * Unhash an nfsd_file, try to get a reference to it, and then put that + * reference. If it's the last reference, queue it to the dispose list. + */ +static void +nfsd_file_cond_queue(struct nfsd_file *nf, struct list_head *dispose) + __must_hold(RCU) +{ + int decrement = 1; + + /* If we raced with someone else unhashing, ignore it */ + if (!nfsd_file_unhash(nf)) + return; + + /* If we can't get a reference, ignore it */ + if (!nfsd_file_get(nf)) + return; + + /* Extra decrement if we remove from the LRU */ + if (nfsd_file_lru_remove(nf)) + ++decrement; + + /* If refcount goes to 0, then put on the dispose list */ + if (refcount_sub_and_test(decrement, &nf->nf_ref)) { + list_add(&nf->nf_lru, dispose); + trace_nfsd_file_closing(nf); + } +} + +/** * nfsd_file_queue_for_close: try to close out any open nfsd_files for an inode * @inode: inode on which to close out nfsd_files * @dispose: list on which to gather nfsd_files to close out @@ -688,30 +721,11 @@ nfsd_file_queue_for_close(struct inode *inode, struct list_head *dispose) rcu_read_lock(); do { - int decrement = 1; - nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key, nfsd_file_rhash_params); if (!nf) break; - - /* If we raced with someone else unhashing, ignore it */ - if (!nfsd_file_unhash(nf)) - continue; - - /* If we can't get a reference, ignore it */ - if (!nfsd_file_get(nf)) - continue; - - /* Extra decrement if we remove from the LRU */ - if (nfsd_file_lru_remove(nf)) - ++decrement; - - /* If refcount goes to 0, then put on the dispose list */ - if (refcount_sub_and_test(decrement, &nf->nf_ref)) { - list_add(&nf->nf_lru, dispose); - trace_nfsd_file_closing(nf); - } + nfsd_file_cond_queue(nf, dispose); } while (1); rcu_read_unlock(); } @@ -928,11 +942,8 @@ __nfsd_file_cache_purge(struct net *net) nf = rhashtable_walk_next(&iter); while (!IS_ERR_OR_NULL(nf)) { - if (!net || nf->nf_net == net) { - nfsd_file_unhash(nf); - nfsd_file_lru_remove(nf); - list_add(&nf->nf_lru, &dispose); - } + if (!net || nf->nf_net == net) + nfsd_file_cond_queue(nf, &dispose); nf = rhashtable_walk_next(&iter); } diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h index ddb10aa67b14..1f68b49bcd68 100644 --- a/include/linux/apple-gmux.h +++ b/include/linux/apple-gmux.h @@ -8,18 +8,118 @@ #define LINUX_APPLE_GMUX_H #include <linux/acpi.h> +#include <linux/io.h> +#include <linux/pnp.h> #define GMUX_ACPI_HID "APP000B" +/* + * gmux port offsets. Many of these are not yet used, but may be in the + * future, and it's useful to have them documented here anyhow. + */ +#define GMUX_PORT_VERSION_MAJOR 0x04 +#define GMUX_PORT_VERSION_MINOR 0x05 +#define GMUX_PORT_VERSION_RELEASE 0x06 +#define GMUX_PORT_SWITCH_DISPLAY 0x10 +#define GMUX_PORT_SWITCH_GET_DISPLAY 0x11 +#define GMUX_PORT_INTERRUPT_ENABLE 0x14 +#define GMUX_PORT_INTERRUPT_STATUS 0x16 +#define GMUX_PORT_SWITCH_DDC 0x28 +#define GMUX_PORT_SWITCH_EXTERNAL 0x40 +#define GMUX_PORT_SWITCH_GET_EXTERNAL 0x41 +#define GMUX_PORT_DISCRETE_POWER 0x50 +#define GMUX_PORT_MAX_BRIGHTNESS 0x70 +#define GMUX_PORT_BRIGHTNESS 0x74 +#define GMUX_PORT_VALUE 0xc2 +#define GMUX_PORT_READ 0xd0 +#define GMUX_PORT_WRITE 0xd4 + +#define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4) + #if IS_ENABLED(CONFIG_APPLE_GMUX) +static inline bool apple_gmux_is_indexed(unsigned long iostart) +{ + u16 val; + + outb(0xaa, iostart + 0xcc); + outb(0x55, iostart + 0xcd); + outb(0x00, iostart + 0xce); + + val = inb(iostart + 0xcc) | (inb(iostart + 0xcd) << 8); + if (val == 0x55aa) + return true; + + return false; +} /** - * apple_gmux_present() - detect if gmux is built into the machine + * apple_gmux_detect() - detect if gmux is built into the machine + * + * @pnp_dev: Device to probe or NULL to use the first matching device + * @indexed_ret: Returns (by reference) if the gmux is indexed or not + * + * Detect if a supported gmux device is present by actually probing it. + * This avoids the false positives returned on some models by + * apple_gmux_present(). + * + * Return: %true if a supported gmux ACPI device is detected and the kernel + * was configured with CONFIG_APPLE_GMUX, %false otherwise. + */ +static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret) +{ + u8 ver_major, ver_minor, ver_release; + struct device *dev = NULL; + struct acpi_device *adev; + struct resource *res; + bool indexed = false; + bool ret = false; + + if (!pnp_dev) { + adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1); + if (!adev) + return false; + + dev = get_device(acpi_get_first_physical_node(adev)); + acpi_dev_put(adev); + if (!dev) + return false; + + pnp_dev = to_pnp_dev(dev); + } + + res = pnp_get_resource(pnp_dev, IORESOURCE_IO, 0); + if (!res || resource_size(res) < GMUX_MIN_IO_LEN) + goto out; + + /* + * Invalid version information may indicate either that the gmux + * device isn't present or that it's a new one that uses indexed io. + */ + ver_major = inb(res->start + GMUX_PORT_VERSION_MAJOR); + ver_minor = inb(res->start + GMUX_PORT_VERSION_MINOR); + ver_release = inb(res->start + GMUX_PORT_VERSION_RELEASE); + if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) { + indexed = apple_gmux_is_indexed(res->start); + if (!indexed) + goto out; + } + + if (indexed_ret) + *indexed_ret = indexed; + + ret = true; +out: + put_device(dev); + return ret; +} + +/** + * apple_gmux_present() - check if gmux ACPI device is present * * Drivers may use this to activate quirks specific to dual GPU MacBook Pros * and Mac Pros, e.g. for deferred probing, runtime pm and backlight. * - * Return: %true if gmux is present and the kernel was configured + * Return: %true if gmux ACPI device is present and the kernel was configured * with CONFIG_APPLE_GMUX, %false otherwise. */ static inline bool apple_gmux_present(void) @@ -34,6 +134,11 @@ static inline bool apple_gmux_present(void) return false; } +static inline bool apple_gmux_detect(struct pnp_dev *pnp_dev, bool *indexed_ret) +{ + return false; +} + #endif /* !CONFIG_APPLE_GMUX */ #endif /* LINUX_APPLE_GMUX_H */ diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index b3ba04615caa..56189e4252da 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -336,9 +336,12 @@ struct gdma_queue_spec { }; }; +#define MANA_IRQ_NAME_SZ 32 + struct gdma_irq_context { void (*handler)(void *arg); void *arg; + char name[MANA_IRQ_NAME_SZ]; }; struct gdma_context { diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 695eebc6f2c8..e39fb0736ade 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h @@ -422,6 +422,8 @@ extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost, extern struct iscsi_cls_session * iscsi_session_setup(struct iscsi_transport *, struct Scsi_Host *shost, uint16_t, int, int, uint32_t, unsigned int); +void iscsi_session_remove(struct iscsi_cls_session *cls_session); +void iscsi_session_free(struct iscsi_cls_session *cls_session); extern void iscsi_session_teardown(struct iscsi_cls_session *); extern void iscsi_session_recovery_timedout(struct iscsi_cls_session *); extern int iscsi_set_param(struct iscsi_cls_conn *cls_conn, diff --git a/include/uapi/linux/netfilter/nf_conntrack_sctp.h b/include/uapi/linux/netfilter/nf_conntrack_sctp.h index c742469afe21..2d6f80d75ae7 100644 --- a/include/uapi/linux/netfilter/nf_conntrack_sctp.h +++ b/include/uapi/linux/netfilter/nf_conntrack_sctp.h @@ -15,8 +15,7 @@ enum sctp_conntrack { SCTP_CONNTRACK_SHUTDOWN_RECD, SCTP_CONNTRACK_SHUTDOWN_ACK_SENT, SCTP_CONNTRACK_HEARTBEAT_SENT, - SCTP_CONNTRACK_HEARTBEAT_ACKED, - SCTP_CONNTRACK_DATA_SENT, + SCTP_CONNTRACK_HEARTBEAT_ACKED, /* no longer used */ SCTP_CONNTRACK_MAX }; diff --git a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h index 94e74034706d..aa805e6d4e28 100644 --- a/include/uapi/linux/netfilter/nfnetlink_cttimeout.h +++ b/include/uapi/linux/netfilter/nfnetlink_cttimeout.h @@ -94,8 +94,7 @@ enum ctattr_timeout_sctp { CTA_TIMEOUT_SCTP_SHUTDOWN_RECD, CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, CTA_TIMEOUT_SCTP_HEARTBEAT_SENT, - CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, - CTA_TIMEOUT_SCTP_DATA_SENT, + CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED, /* no longer used */ __CTA_TIMEOUT_SCTP_MAX }; #define CTA_TIMEOUT_SCTP_MAX (__CTA_TIMEOUT_SCTP_MAX - 1) diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 5cf81dff60aa..727084cd79be 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -808,6 +808,7 @@ struct ufs_hba_monitor { * @urgent_bkops_lvl: keeps track of urgent bkops level for device * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for * device is known or not. + * @wb_mutex: used to serialize devfreq and sysfs write booster toggling * @clk_scaling_lock: used to serialize device commands and clock scaling * @desc_size: descriptor sizes reported by device * @scsi_block_reqs_cnt: reference counting for scsi block requests @@ -951,6 +952,7 @@ struct ufs_hba { enum bkops_status urgent_bkops_lvl; bool is_urgent_bkops_lvl_checked; + struct mutex wb_mutex; struct rw_semaphore clk_scaling_lock; unsigned char desc_size[QUERY_DESC_IDN_MAX]; atomic_t scsi_block_reqs_cnt; diff --git a/kernel/module/main.c b/kernel/module/main.c index 48568a0f5651..4ac3fe43e6c8 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2393,7 +2393,8 @@ static bool finished_loading(const char *name) sched_annotate_sleep(); mutex_lock(&module_mutex); mod = find_module_all(name, strlen(name), true); - ret = !mod || mod->state == MODULE_STATE_LIVE; + ret = !mod || mod->state == MODULE_STATE_LIVE + || mod->state == MODULE_STATE_GOING; mutex_unlock(&module_mutex); return ret; @@ -2569,20 +2570,35 @@ static int add_unformed_module(struct module *mod) mod->state = MODULE_STATE_UNFORMED; -again: mutex_lock(&module_mutex); old = find_module_all(mod->name, strlen(mod->name), true); if (old != NULL) { - if (old->state != MODULE_STATE_LIVE) { + if (old->state == MODULE_STATE_COMING + || old->state == MODULE_STATE_UNFORMED) { /* Wait in case it fails to load. */ mutex_unlock(&module_mutex); err = wait_event_interruptible(module_wq, finished_loading(mod->name)); if (err) goto out_unlocked; - goto again; + + /* The module might have gone in the meantime. */ + mutex_lock(&module_mutex); + old = find_module_all(mod->name, strlen(mod->name), + true); } - err = -EEXIST; + + /* + * We are here only when the same module was being loaded. Do + * not try to load it again right now. It prevents long delays + * caused by serialized module load failures. It might happen + * when more devices of the same type trigger load of + * a particular module. + */ + if (old && old->state == MODULE_STATE_LIVE) + err = -EEXIST; + else + err = -EBUSY; goto out; } mod_update_bounds(mod); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index bb1ee6d7bdde..e838feb6adc5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8290,12 +8290,18 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) if (retval) goto out_put_task; + /* + * With non-SMP configs, user_cpus_ptr/user_mask isn't used and + * alloc_user_cpus_ptr() returns NULL. + */ user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE); - if (IS_ENABLED(CONFIG_SMP) && !user_mask) { + if (user_mask) { + cpumask_copy(user_mask, in_mask); + } else if (IS_ENABLED(CONFIG_SMP)) { retval = -ENOMEM; goto out_put_task; } - cpumask_copy(user_mask, in_mask); + ac = (struct affinity_context){ .new_mask = in_mask, .user_mask = user_mask, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c36aa54ae071..0f8736991427 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7229,10 +7229,10 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) eenv_task_busy_time(&eenv, p, prev_cpu); for (; pd; pd = pd->next) { + unsigned long util_min = p_util_min, util_max = p_util_max; unsigned long cpu_cap, cpu_thermal_cap, util; unsigned long cur_delta, max_spare_cap = 0; unsigned long rq_util_min, rq_util_max; - unsigned long util_min, util_max; unsigned long prev_spare_cap = 0; int max_spare_cap_cpu = -1; unsigned long base_energy; @@ -7251,6 +7251,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) eenv.pd_cap = 0; for_each_cpu(cpu, cpus) { + struct rq *rq = cpu_rq(cpu); + eenv.pd_cap += cpu_thermal_cap; if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) @@ -7269,24 +7271,19 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu) * much capacity we can get out of the CPU; this is * aligned with sched_cpu_util(). */ - if (uclamp_is_used()) { - if (uclamp_rq_is_idle(cpu_rq(cpu))) { - util_min = p_util_min; - util_max = p_util_max; - } else { - /* - * Open code uclamp_rq_util_with() except for - * the clamp() part. Ie: apply max aggregation - * only. util_fits_cpu() logic requires to - * operate on non clamped util but must use the - * max-aggregated uclamp_{min, max}. - */ - rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN); - rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX); - - util_min = max(rq_util_min, p_util_min); - util_max = max(rq_util_max, p_util_max); - } + if (uclamp_is_used() && !uclamp_rq_is_idle(rq)) { + /* + * Open code uclamp_rq_util_with() except for + * the clamp() part. Ie: apply max aggregation + * only. util_fits_cpu() logic requires to + * operate on non clamped util but must use the + * max-aggregated uclamp_{min, max}. + */ + rq_util_min = uclamp_rq_get(rq, UCLAMP_MIN); + rq_util_max = uclamp_rq_get(rq, UCLAMP_MAX); + + util_min = max(rq_util_min, p_util_min); + util_max = max(rq_util_max, p_util_max); } if (!util_fits_cpu(util, util_min, util_max, cpu)) continue; @@ -8871,16 +8868,23 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) * * Thermal pressure will impact all cpus in this perf domain * equally. */ - if (static_branch_unlikely(&sched_asym_cpucapacity)) { + if (sched_energy_enabled()) { unsigned long inv_cap = capacity_orig - thermal_load_avg(rq); - struct perf_domain *pd = rcu_dereference(rq->rd->pd); + struct perf_domain *pd; + rcu_read_lock(); + + pd = rcu_dereference(rq->rd->pd); rq->cpu_capacity_inverted = 0; for (; pd; pd = pd->next) { struct cpumask *pd_span = perf_domain_span(pd); unsigned long pd_cap_orig, pd_cap; + /* We can't be inverted against our own pd */ + if (cpumask_test_cpu(cpu_of(rq), pd_span)) + continue; + cpu = cpumask_any(pd_span); pd_cap_orig = arch_scale_cpu_capacity(cpu); @@ -8905,6 +8909,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) break; } } + + rcu_read_unlock(); } trace_sched_cpu_capacity_tp(rq); diff --git a/lib/nlattr.c b/lib/nlattr.c index 9055e8b4d144..489e15bde5c1 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/jiffies.h> +#include <linux/nospec.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/types.h> @@ -381,6 +382,7 @@ static int validate_nla(const struct nlattr *nla, int maxtype, if (type <= 0 || type > maxtype) return 0; + type = array_index_nospec(type, maxtype + 1); pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); @@ -596,6 +598,7 @@ static int __nla_validate_parse(const struct nlattr *head, int len, int maxtype, } continue; } + type = array_index_nospec(type, maxtype + 1); if (policy) { int err = validate_nla(nla, maxtype, policy, validate, extack, depth); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 5581d22cc191..078a0a420c8a 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -137,12 +137,12 @@ static int ops_init(const struct pernet_operations *ops, struct net *net) return 0; if (ops->id && ops->size) { -cleanup: ng = rcu_dereference_protected(net->gen, lockdep_is_held(&pernet_ops_rwsem)); ng->ptr[*ops->id] = NULL; } +cleanup: kfree(data); out: diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index ce9ff3c62e84..3bb890a40ed7 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -30,6 +30,7 @@ #include <linux/slab.h> #include <linux/netlink.h> #include <linux/hash.h> +#include <linux/nospec.h> #include <net/arp.h> #include <net/inet_dscp.h> @@ -1022,6 +1023,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) if (type > RTAX_MAX) return false; + type = array_index_nospec(type, RTAX_MAX + 1); if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; bool ecn_ca = false; diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c index 7fcfdfd8f9de..0e3ee1532848 100644 --- a/net/ipv4/metrics.c +++ b/net/ipv4/metrics.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/netlink.h> +#include <linux/nospec.h> #include <linux/rtnetlink.h> #include <linux/types.h> #include <net/ip.h> @@ -25,6 +26,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, return -EINVAL; } + type = array_index_nospec(type, RTAX_MAX + 1); if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 60fd91bb5171..c314fdde0097 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -547,7 +547,20 @@ int ip6_forward(struct sk_buff *skb) pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { int proxied = ip6_forward_proxy_check(skb); if (proxied > 0) { - hdr->hop_limit--; + /* It's tempting to decrease the hop limit + * here by 1, as we do at the end of the + * function too. + * + * But that would be incorrect, as proxying is + * not forwarding. The ip6_input function + * will handle this packet locally, and it + * depends on the hop limit being unchanged. + * + * One example is the NDP hop limit, that + * always has to stay 255, but other would be + * similar checks around RA packets, where the + * user can even change the desired limit. + */ return ip6_input(skb); } else if (proxied < 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); diff --git a/net/mctp/af_mctp.c b/net/mctp/af_mctp.c index fc9e728b6333..45bbe3e54cc2 100644 --- a/net/mctp/af_mctp.c +++ b/net/mctp/af_mctp.c @@ -544,9 +544,6 @@ static int mctp_sk_init(struct sock *sk) static void mctp_sk_close(struct sock *sk, long timeout) { - struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk); - - del_timer_sync(&msk->key_expiry); sk_common_release(sk); } @@ -580,7 +577,14 @@ static void mctp_sk_unhash(struct sock *sk) spin_lock_irqsave(&key->lock, fl2); __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED); } + sock_set_flag(sk, SOCK_DEAD); spin_unlock_irqrestore(&net->mctp.keys_lock, flags); + + /* Since there are no more tag allocations (we have removed all of the + * keys), stop any pending expiry events. the timer cannot be re-queued + * as the sk is no longer observable + */ + del_timer_sync(&msk->key_expiry); } static struct proto mctp_proto = { diff --git a/net/mctp/route.c b/net/mctp/route.c index f9a80b82dc51..f51a05ec7162 100644 --- a/net/mctp/route.c +++ b/net/mctp/route.c @@ -147,6 +147,7 @@ static struct mctp_sk_key *mctp_key_alloc(struct mctp_sock *msk, key->valid = true; spin_lock_init(&key->lock); refcount_set(&key->refs, 1); + sock_hold(key->sk); return key; } @@ -165,6 +166,7 @@ void mctp_key_unref(struct mctp_sk_key *key) mctp_dev_release_key(key->dev, key); spin_unlock_irqrestore(&key->lock, flags); + sock_put(key->sk); kfree(key); } @@ -177,6 +179,11 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk) spin_lock_irqsave(&net->mctp.keys_lock, flags); + if (sock_flag(&msk->sk, SOCK_DEAD)) { + rc = -EINVAL; + goto out_unlock; + } + hlist_for_each_entry(tmp, &net->mctp.keys, hlist) { if (mctp_key_match(tmp, key->local_addr, key->peer_addr, key->tag)) { @@ -198,6 +205,7 @@ static int mctp_key_add(struct mctp_sk_key *key, struct mctp_sock *msk) hlist_add_head(&key->sklist, &msk->keys); } +out_unlock: spin_unlock_irqrestore(&net->mctp.keys_lock, flags); return rc; @@ -315,8 +323,8 @@ static int mctp_frag_queue(struct mctp_sk_key *key, struct sk_buff *skb) static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) { + struct mctp_sk_key *key, *any_key = NULL; struct net *net = dev_net(skb->dev); - struct mctp_sk_key *key; struct mctp_sock *msk; struct mctp_hdr *mh; unsigned long f; @@ -361,13 +369,11 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) * key for reassembly - we'll create a more specific * one for future packets if required (ie, !EOM). */ - key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f); - if (key) { - msk = container_of(key->sk, + any_key = mctp_lookup_key(net, skb, MCTP_ADDR_ANY, &f); + if (any_key) { + msk = container_of(any_key->sk, struct mctp_sock, sk); - spin_unlock_irqrestore(&key->lock, f); - mctp_key_unref(key); - key = NULL; + spin_unlock_irqrestore(&any_key->lock, f); } } @@ -419,14 +425,14 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb) * this function. */ rc = mctp_key_add(key, msk); - if (rc) { - kfree(key); - } else { + if (!rc) trace_mctp_key_acquire(key); - /* we don't need to release key->lock on exit */ - mctp_key_unref(key); - } + /* we don't need to release key->lock on exit, so + * clean up here and suppress the unlock via + * setting to NULL + */ + mctp_key_unref(key); key = NULL; } else { @@ -473,6 +479,8 @@ out_unlock: spin_unlock_irqrestore(&key->lock, f); mctp_key_unref(key); } + if (any_key) + mctp_key_unref(any_key); out: if (rc) kfree_skb(skb); diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index d88b92a8ffca..945dd40e7077 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -27,22 +27,16 @@ #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_timeout.h> -/* FIXME: Examine ipfilter's timeouts and conntrack transitions more - closely. They're more complex. --RR - - And so for me for SCTP :D -Kiran */ - static const char *const sctp_conntrack_names[] = { - "NONE", - "CLOSED", - "COOKIE_WAIT", - "COOKIE_ECHOED", - "ESTABLISHED", - "SHUTDOWN_SENT", - "SHUTDOWN_RECD", - "SHUTDOWN_ACK_SENT", - "HEARTBEAT_SENT", - "HEARTBEAT_ACKED", + [SCTP_CONNTRACK_NONE] = "NONE", + [SCTP_CONNTRACK_CLOSED] = "CLOSED", + [SCTP_CONNTRACK_COOKIE_WAIT] = "COOKIE_WAIT", + [SCTP_CONNTRACK_COOKIE_ECHOED] = "COOKIE_ECHOED", + [SCTP_CONNTRACK_ESTABLISHED] = "ESTABLISHED", + [SCTP_CONNTRACK_SHUTDOWN_SENT] = "SHUTDOWN_SENT", + [SCTP_CONNTRACK_SHUTDOWN_RECD] = "SHUTDOWN_RECD", + [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = "SHUTDOWN_ACK_SENT", + [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT", }; #define SECS * HZ @@ -54,13 +48,11 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_CLOSED] = 10 SECS, [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, - [SCTP_CONNTRACK_ESTABLISHED] = 5 DAYS, + [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, - [SCTP_CONNTRACK_HEARTBEAT_ACKED] = 210 SECS, - [SCTP_CONNTRACK_DATA_SENT] = 30 SECS, }; #define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 @@ -74,8 +66,6 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { #define sSR SCTP_CONNTRACK_SHUTDOWN_RECD #define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT #define sHS SCTP_CONNTRACK_HEARTBEAT_SENT -#define sHA SCTP_CONNTRACK_HEARTBEAT_ACKED -#define sDS SCTP_CONNTRACK_DATA_SENT #define sIV SCTP_CONNTRACK_MAX /* @@ -98,10 +88,6 @@ SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of the SHUTDOWN chunk. Connection is closed. HEARTBEAT_SENT - We have seen a HEARTBEAT in a new flow. -HEARTBEAT_ACKED - We have seen a HEARTBEAT-ACK/DATA/SACK in the direction - opposite to that of the HEARTBEAT/DATA chunk. Secondary connection - is established. -DATA_SENT - We have seen a DATA/SACK in a new flow. */ /* TODO @@ -115,38 +101,36 @@ cookie echoed to closed. */ /* SCTP conntrack state transitions */ -static const u8 sctp_conntracks[2][12][SCTP_CONNTRACK_MAX] = { +static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { { /* ORIGINAL */ -/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS */ -/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW, sHA, sCW}, -/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL}, -/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, -/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL, sSS, sCL}, -/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA, sHA, sSA}, -/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* Can't have Stale cookie*/ -/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* 5.2.4 - Big TODO */ -/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL, sHA, sCL},/* Can't come in orig dir */ -/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL, sHA, sCL}, -/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS}, -/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS}, -/* data/sack */ {sDS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS} +/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ +/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW}, +/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL}, +/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, +/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL}, +/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA, sSA}, +/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't have Stale cookie*/ +/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA, sCL},/* 5.2.4 - Big TODO */ +/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},/* Can't come in orig dir */ +/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL, sCL}, +/* heartbeat */ {sHS, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, +/* heartbeat_ack*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, }, { /* REPLY */ -/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sDS */ -/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},/* INIT in sCL Big TODO */ -/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV}, -/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV, sCL, sIV}, -/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV, sSR, sIV}, -/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV, sHA, sIV}, -/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV, sHA, sIV}, -/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV, sHA, sIV},/* Can't come in reply dir */ -/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV, sHA, sIV}, -/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV, sHA, sIV}, -/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS, sHA, sHA}, -/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA, sHA}, -/* data/sack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHA, sHA, sHA}, +/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ +/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* INIT in sCL Big TODO */ +/* init_ack */ {sIV, sCW, sCW, sCE, sES, sSS, sSR, sSA, sIV}, +/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV}, +/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA, sIV}, +/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA, sIV}, +/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA, sIV}, +/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sIV},/* Can't come in reply dir */ +/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA, sIV}, +/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL, sIV}, +/* heartbeat */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS}, +/* heartbeat_ack*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA, sES}, } }; @@ -160,8 +144,8 @@ static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct) #define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \ for ((offset) = (dataoff) + sizeof(struct sctphdr), (count) = 0; \ - (offset) < (skb)->len && \ - ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))); \ + ((sch) = skb_header_pointer((skb), (offset), sizeof(_sch), &(_sch))) && \ + (sch)->length; \ (offset) += (ntohs((sch)->length) + 3) & ~3, (count)++) /* Some validity checks to make sure the chunks are fine */ @@ -258,11 +242,6 @@ static int sctp_new_state(enum ip_conntrack_dir dir, pr_debug("SCTP_CID_HEARTBEAT_ACK"); i = 10; break; - case SCTP_CID_DATA: - case SCTP_CID_SACK: - pr_debug("SCTP_CID_DATA/SACK"); - i = 11; - break; default: /* Other chunks like DATA or SACK do not change the state */ pr_debug("Unknown chunk type, Will stay in %s\n", @@ -316,9 +295,7 @@ sctp_new(struct nf_conn *ct, const struct sk_buff *skb, ih->init_tag); ct->proto.sctp.vtag[IP_CT_DIR_REPLY] = ih->init_tag; - } else if (sch->type == SCTP_CID_HEARTBEAT || - sch->type == SCTP_CID_DATA || - sch->type == SCTP_CID_SACK) { + } else if (sch->type == SCTP_CID_HEARTBEAT) { pr_debug("Setting vtag %x for secondary conntrack\n", sh->vtag); ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL] = sh->vtag; @@ -404,19 +381,19 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, if (!sctp_new(ct, skb, sh, dataoff)) return -NF_ACCEPT; - } else { - /* Check the verification tag (Sec 8.5) */ - if (!test_bit(SCTP_CID_INIT, map) && - !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) && - !test_bit(SCTP_CID_COOKIE_ECHO, map) && - !test_bit(SCTP_CID_ABORT, map) && - !test_bit(SCTP_CID_SHUTDOWN_ACK, map) && - !test_bit(SCTP_CID_HEARTBEAT, map) && - !test_bit(SCTP_CID_HEARTBEAT_ACK, map) && - sh->vtag != ct->proto.sctp.vtag[dir]) { - pr_debug("Verification tag check failed\n"); - goto out; - } + } + + /* Check the verification tag (Sec 8.5) */ + if (!test_bit(SCTP_CID_INIT, map) && + !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, map) && + !test_bit(SCTP_CID_COOKIE_ECHO, map) && + !test_bit(SCTP_CID_ABORT, map) && + !test_bit(SCTP_CID_SHUTDOWN_ACK, map) && + !test_bit(SCTP_CID_HEARTBEAT, map) && + !test_bit(SCTP_CID_HEARTBEAT_ACK, map) && + sh->vtag != ct->proto.sctp.vtag[dir]) { + pr_debug("Verification tag check failed\n"); + goto out; } old_state = new_state = SCTP_CONNTRACK_NONE; @@ -424,22 +401,29 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) { /* Special cases of Verification tag check (Sec 8.5.1) */ if (sch->type == SCTP_CID_INIT) { - /* Sec 8.5.1 (A) */ + /* (A) vtag MUST be zero */ if (sh->vtag != 0) goto out_unlock; } else if (sch->type == SCTP_CID_ABORT) { - /* Sec 8.5.1 (B) */ - if (sh->vtag != ct->proto.sctp.vtag[dir] && - sh->vtag != ct->proto.sctp.vtag[!dir]) + /* (B) vtag MUST match own vtag if T flag is unset OR + * MUST match peer's vtag if T flag is set + */ + if ((!(sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[dir]) || + ((sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[!dir])) goto out_unlock; } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) { - /* Sec 8.5.1 (C) */ - if (sh->vtag != ct->proto.sctp.vtag[dir] && - sh->vtag != ct->proto.sctp.vtag[!dir] && - sch->flags & SCTP_CHUNK_FLAG_T) + /* (C) vtag MUST match own vtag if T flag is unset OR + * MUST match peer's vtag if T flag is set + */ + if ((!(sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[dir]) || + ((sch->flags & SCTP_CHUNK_FLAG_T) && + sh->vtag != ct->proto.sctp.vtag[!dir])) goto out_unlock; } else if (sch->type == SCTP_CID_COOKIE_ECHO) { - /* Sec 8.5.1 (D) */ + /* (D) vtag must be same as init_vtag as found in INIT_ACK */ if (sh->vtag != ct->proto.sctp.vtag[dir]) goto out_unlock; } else if (sch->type == SCTP_CID_HEARTBEAT) { @@ -476,11 +460,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, } else if (ct->proto.sctp.flags & SCTP_FLAG_HEARTBEAT_VTAG_FAILED) { ct->proto.sctp.flags &= ~SCTP_FLAG_HEARTBEAT_VTAG_FAILED; } - } else if (sch->type == SCTP_CID_DATA || sch->type == SCTP_CID_SACK) { - if (ct->proto.sctp.vtag[dir] == 0) { - pr_debug("Setting vtag %x for dir %d\n", sh->vtag, dir); - ct->proto.sctp.vtag[dir] = sh->vtag; - } } old_state = ct->proto.sctp.state; @@ -518,8 +497,12 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, } ct->proto.sctp.state = new_state; - if (old_state != new_state) + if (old_state != new_state) { nf_conntrack_event_cache(IPCT_PROTOINFO, ct); + if (new_state == SCTP_CONNTRACK_ESTABLISHED && + !test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) + nf_conntrack_event_cache(IPCT_ASSURED, ct); + } } spin_unlock_bh(&ct->lock); @@ -533,14 +516,6 @@ int nf_conntrack_sctp_packet(struct nf_conn *ct, nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]); - if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED && - dir == IP_CT_DIR_REPLY && - new_state == SCTP_CONNTRACK_ESTABLISHED) { - pr_debug("Setting assured bit\n"); - set_bit(IPS_ASSURED_BIT, &ct->status); - nf_conntrack_event_cache(IPCT_ASSURED, ct); - } - return NF_ACCEPT; out_unlock: @@ -701,7 +676,6 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = { [CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 }, [CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 }, [CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 }, - [CTA_TIMEOUT_SCTP_DATA_SENT] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CONNTRACK_TIMEOUT */ diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 0250725e38a4..460294bd4b60 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -601,8 +601,6 @@ enum nf_ct_sysctl_index { NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_RECD, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT, NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_SENT, - NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED, - NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_DATA_SENT, #endif #ifdef CONFIG_NF_CT_PROTO_DCCP NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST, @@ -887,18 +885,6 @@ static struct ctl_table nf_ct_sysctl_table[] = { .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { - .procname = "nf_conntrack_sctp_timeout_heartbeat_acked", - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, - [NF_SYSCTL_CT_PROTO_TIMEOUT_SCTP_DATA_SENT] = { - .procname = "nf_conntrack_sctp_timeout_data_sent", - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec_jiffies, - }, #endif #ifdef CONFIG_NF_CT_PROTO_DCCP [NF_SYSCTL_CT_PROTO_TIMEOUT_DCCP_REQUEST] = { @@ -1042,8 +1028,6 @@ static void nf_conntrack_standalone_init_sctp_sysctl(struct net *net, XASSIGN(SHUTDOWN_RECD, sn); XASSIGN(SHUTDOWN_ACK_SENT, sn); XASSIGN(HEARTBEAT_SENT, sn); - XASSIGN(HEARTBEAT_ACKED, sn); - XASSIGN(DATA_SENT, sn); #undef XASSIGN #endif } diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 7325bee7d144..19ea4d3c3553 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -38,10 +38,12 @@ static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe) return !nft_rbtree_interval_end(rbe); } -static bool nft_rbtree_equal(const struct nft_set *set, const void *this, - const struct nft_rbtree_elem *interval) +static int nft_rbtree_cmp(const struct nft_set *set, + const struct nft_rbtree_elem *e1, + const struct nft_rbtree_elem *e2) { - return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0; + return memcmp(nft_set_ext_key(&e1->ext), nft_set_ext_key(&e2->ext), + set->klen); } static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set, @@ -52,7 +54,6 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set const struct nft_rbtree_elem *rbe, *interval = NULL; u8 genmask = nft_genmask_cur(net); const struct rb_node *parent; - const void *this; int d; parent = rcu_dereference_raw(priv->root.rb_node); @@ -62,12 +63,11 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set rbe = rb_entry(parent, struct nft_rbtree_elem, node); - this = nft_set_ext_key(&rbe->ext); - d = memcmp(this, key, set->klen); + d = memcmp(nft_set_ext_key(&rbe->ext), key, set->klen); if (d < 0) { parent = rcu_dereference_raw(parent->rb_left); if (interval && - nft_rbtree_equal(set, this, interval) && + !nft_rbtree_cmp(set, rbe, interval) && nft_rbtree_interval_end(rbe) && nft_rbtree_interval_start(interval)) continue; @@ -215,154 +215,216 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set, return rbe; } +static int nft_rbtree_gc_elem(const struct nft_set *__set, + struct nft_rbtree *priv, + struct nft_rbtree_elem *rbe) +{ + struct nft_set *set = (struct nft_set *)__set; + struct rb_node *prev = rb_prev(&rbe->node); + struct nft_rbtree_elem *rbe_prev; + struct nft_set_gc_batch *gcb; + + gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC); + if (!gcb) + return -ENOMEM; + + /* search for expired end interval coming before this element. */ + do { + rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); + if (nft_rbtree_interval_end(rbe_prev)) + break; + + prev = rb_prev(prev); + } while (prev != NULL); + + rb_erase(&rbe_prev->node, &priv->root); + rb_erase(&rbe->node, &priv->root); + atomic_sub(2, &set->nelems); + + nft_set_gc_batch_add(gcb, rbe); + nft_set_gc_batch_complete(gcb); + + return 0; +} + +static bool nft_rbtree_update_first(const struct nft_set *set, + struct nft_rbtree_elem *rbe, + struct rb_node *first) +{ + struct nft_rbtree_elem *first_elem; + + first_elem = rb_entry(first, struct nft_rbtree_elem, node); + /* this element is closest to where the new element is to be inserted: + * update the first element for the node list path. + */ + if (nft_rbtree_cmp(set, rbe, first_elem) < 0) + return true; + + return false; +} + static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, struct nft_rbtree_elem *new, struct nft_set_ext **ext) { - bool overlap = false, dup_end_left = false, dup_end_right = false; + struct nft_rbtree_elem *rbe, *rbe_le = NULL, *rbe_ge = NULL; + struct rb_node *node, *parent, **p, *first = NULL; struct nft_rbtree *priv = nft_set_priv(set); u8 genmask = nft_genmask_next(net); - struct nft_rbtree_elem *rbe; - struct rb_node *parent, **p; - int d; + int d, err; - /* Detect overlaps as we descend the tree. Set the flag in these cases: - * - * a1. _ _ __>| ?_ _ __| (insert end before existing end) - * a2. _ _ ___| ?_ _ _>| (insert end after existing end) - * a3. _ _ ___? >|_ _ __| (insert start before existing end) - * - * and clear it later on, as we eventually reach the points indicated by - * '?' above, in the cases described below. We'll always meet these - * later, locally, due to tree ordering, and overlaps for the intervals - * that are the closest together are always evaluated last. - * - * b1. _ _ __>| !_ _ __| (insert end before existing start) - * b2. _ _ ___| !_ _ _>| (insert end after existing start) - * b3. _ _ ___! >|_ _ __| (insert start after existing end, as a leaf) - * '--' no nodes falling in this range - * b4. >|_ _ ! (insert start before existing start) - * - * Case a3. resolves to b3.: - * - if the inserted start element is the leftmost, because the '0' - * element in the tree serves as end element - * - otherwise, if an existing end is found immediately to the left. If - * there are existing nodes in between, we need to further descend the - * tree before we can conclude the new start isn't causing an overlap - * - * or to b4., which, preceded by a3., means we already traversed one or - * more existing intervals entirely, from the right. - * - * For a new, rightmost pair of elements, we'll hit cases b3. and b2., - * in that order. - * - * The flag is also cleared in two special cases: - * - * b5. |__ _ _!|<_ _ _ (insert start right before existing end) - * b6. |__ _ >|!__ _ _ (insert end right after existing start) - * - * which always happen as last step and imply that no further - * overlapping is possible. - * - * Another special case comes from the fact that start elements matching - * an already existing start element are allowed: insertion is not - * performed but we return -EEXIST in that case, and the error will be - * cleared by the caller if NLM_F_EXCL is not present in the request. - * This way, request for insertion of an exact overlap isn't reported as - * error to userspace if not desired. - * - * However, if the existing start matches a pre-existing start, but the - * end element doesn't match the corresponding pre-existing end element, - * we need to report a partial overlap. This is a local condition that - * can be noticed without need for a tracking flag, by checking for a - * local duplicated end for a corresponding start, from left and right, - * separately. + /* Descend the tree to search for an existing element greater than the + * key value to insert that is greater than the new element. This is the + * first element to walk the ordered elements to find possible overlap. */ - parent = NULL; p = &priv->root.rb_node; while (*p != NULL) { parent = *p; rbe = rb_entry(parent, struct nft_rbtree_elem, node); - d = memcmp(nft_set_ext_key(&rbe->ext), - nft_set_ext_key(&new->ext), - set->klen); + d = nft_rbtree_cmp(set, rbe, new); + if (d < 0) { p = &parent->rb_left; - - if (nft_rbtree_interval_start(new)) { - if (nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext) && !*p) - overlap = false; - } else { - if (dup_end_left && !*p) - return -ENOTEMPTY; - - overlap = nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, - genmask) && - !nft_set_elem_expired(&rbe->ext); - - if (overlap) { - dup_end_right = true; - continue; - } - } } else if (d > 0) { - p = &parent->rb_right; + if (!first || + nft_rbtree_update_first(set, rbe, first)) + first = &rbe->node; - if (nft_rbtree_interval_end(new)) { - if (dup_end_right && !*p) - return -ENOTEMPTY; - - overlap = nft_rbtree_interval_end(rbe) && - nft_set_elem_active(&rbe->ext, - genmask) && - !nft_set_elem_expired(&rbe->ext); - - if (overlap) { - dup_end_left = true; - continue; - } - } else if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) { - overlap = nft_rbtree_interval_end(rbe); - } + p = &parent->rb_right; } else { - if (nft_rbtree_interval_end(rbe) && - nft_rbtree_interval_start(new)) { + if (nft_rbtree_interval_end(rbe)) p = &parent->rb_left; - - if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) - overlap = false; - } else if (nft_rbtree_interval_start(rbe) && - nft_rbtree_interval_end(new)) { + else p = &parent->rb_right; + } + } + + if (!first) + first = rb_first(&priv->root); + + /* Detect overlap by going through the list of valid tree nodes. + * Values stored in the tree are in reversed order, starting from + * highest to lowest value. + */ + for (node = first; node != NULL; node = rb_next(node)) { + rbe = rb_entry(node, struct nft_rbtree_elem, node); - if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) - overlap = false; - } else if (nft_set_elem_active(&rbe->ext, genmask) && - !nft_set_elem_expired(&rbe->ext)) { - *ext = &rbe->ext; - return -EEXIST; - } else { - overlap = false; - if (nft_rbtree_interval_end(rbe)) - p = &parent->rb_left; - else - p = &parent->rb_right; + if (!nft_set_elem_active(&rbe->ext, genmask)) + continue; + + /* perform garbage collection to avoid bogus overlap reports. */ + if (nft_set_elem_expired(&rbe->ext)) { + err = nft_rbtree_gc_elem(set, priv, rbe); + if (err < 0) + return err; + + continue; + } + + d = nft_rbtree_cmp(set, rbe, new); + if (d == 0) { + /* Matching end element: no need to look for an + * overlapping greater or equal element. + */ + if (nft_rbtree_interval_end(rbe)) { + rbe_le = rbe; + break; + } + + /* first element that is greater or equal to key value. */ + if (!rbe_ge) { + rbe_ge = rbe; + continue; + } + + /* this is a closer more or equal element, update it. */ + if (nft_rbtree_cmp(set, rbe_ge, new) != 0) { + rbe_ge = rbe; + continue; + } + + /* element is equal to key value, make sure flags are + * the same, an existing more or equal start element + * must not be replaced by more or equal end element. + */ + if ((nft_rbtree_interval_start(new) && + nft_rbtree_interval_start(rbe_ge)) || + (nft_rbtree_interval_end(new) && + nft_rbtree_interval_end(rbe_ge))) { + rbe_ge = rbe; + continue; } + } else if (d > 0) { + /* annotate element greater than the new element. */ + rbe_ge = rbe; + continue; + } else if (d < 0) { + /* annotate element less than the new element. */ + rbe_le = rbe; + break; } + } - dup_end_left = dup_end_right = false; + /* - new start element matching existing start element: full overlap + * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. + */ + if (rbe_ge && !nft_rbtree_cmp(set, new, rbe_ge) && + nft_rbtree_interval_start(rbe_ge) == nft_rbtree_interval_start(new)) { + *ext = &rbe_ge->ext; + return -EEXIST; } - if (overlap) + /* - new end element matching existing end element: full overlap + * reported as -EEXIST, cleared by caller if NLM_F_EXCL is not given. + */ + if (rbe_le && !nft_rbtree_cmp(set, new, rbe_le) && + nft_rbtree_interval_end(rbe_le) == nft_rbtree_interval_end(new)) { + *ext = &rbe_le->ext; + return -EEXIST; + } + + /* - new start element with existing closest, less or equal key value + * being a start element: partial overlap, reported as -ENOTEMPTY. + * Anonymous sets allow for two consecutive start element since they + * are constant, skip them to avoid bogus overlap reports. + */ + if (!nft_set_is_anonymous(set) && rbe_le && + nft_rbtree_interval_start(rbe_le) && nft_rbtree_interval_start(new)) + return -ENOTEMPTY; + + /* - new end element with existing closest, less or equal key value + * being a end element: partial overlap, reported as -ENOTEMPTY. + */ + if (rbe_le && + nft_rbtree_interval_end(rbe_le) && nft_rbtree_interval_end(new)) return -ENOTEMPTY; + /* - new end element with existing closest, greater or equal key value + * being an end element: partial overlap, reported as -ENOTEMPTY + */ + if (rbe_ge && + nft_rbtree_interval_end(rbe_ge) && nft_rbtree_interval_end(new)) + return -ENOTEMPTY; + + /* Accepted element: pick insertion point depending on key value */ + parent = NULL; + p = &priv->root.rb_node; + while (*p != NULL) { + parent = *p; + rbe = rb_entry(parent, struct nft_rbtree_elem, node); + d = nft_rbtree_cmp(set, rbe, new); + + if (d < 0) + p = &parent->rb_left; + else if (d > 0) + p = &parent->rb_right; + else if (nft_rbtree_interval_end(rbe)) + p = &parent->rb_left; + else + p = &parent->rb_right; + } + rb_link_node_rcu(&new->node, parent, p); rb_insert_color(&new->node, &priv->root); return 0; @@ -501,23 +563,37 @@ static void nft_rbtree_gc(struct work_struct *work) struct nft_rbtree *priv; struct rb_node *node; struct nft_set *set; + struct net *net; + u8 genmask; priv = container_of(work, struct nft_rbtree, gc_work.work); set = nft_set_container_of(priv); + net = read_pnet(&set->net); + genmask = nft_genmask_cur(net); write_lock_bh(&priv->lock); write_seqcount_begin(&priv->count); for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { rbe = rb_entry(node, struct nft_rbtree_elem, node); + if (!nft_set_elem_active(&rbe->ext, genmask)) + continue; + + /* elements are reversed in the rbtree for historical reasons, + * from highest to lowest value, that is why end element is + * always visited before the start element. + */ if (nft_rbtree_interval_end(rbe)) { rbe_end = rbe; continue; } if (!nft_set_elem_expired(&rbe->ext)) continue; - if (nft_set_elem_mark_busy(&rbe->ext)) + + if (nft_set_elem_mark_busy(&rbe->ext)) { + rbe_end = NULL; continue; + } if (rbe_prev) { rb_erase(&rbe_prev->node, &priv->root); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index bca2a470ccad..c64277659753 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -580,7 +580,9 @@ static int netlink_insert(struct sock *sk, u32 portid) if (nlk_sk(sk)->bound) goto err; - nlk_sk(sk)->portid = portid; + /* portid can be read locklessly from netlink_getname(). */ + WRITE_ONCE(nlk_sk(sk)->portid, portid); + sock_hold(sk); err = __netlink_insert(table, sk); @@ -1096,9 +1098,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, return -EINVAL; if (addr->sa_family == AF_UNSPEC) { - sk->sk_state = NETLINK_UNCONNECTED; - nlk->dst_portid = 0; - nlk->dst_group = 0; + /* paired with READ_ONCE() in netlink_getsockbyportid() */ + WRITE_ONCE(sk->sk_state, NETLINK_UNCONNECTED); + /* dst_portid and dst_group can be read locklessly */ + WRITE_ONCE(nlk->dst_portid, 0); + WRITE_ONCE(nlk->dst_group, 0); return 0; } if (addr->sa_family != AF_NETLINK) @@ -1119,9 +1123,11 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, err = netlink_autobind(sock); if (err == 0) { - sk->sk_state = NETLINK_CONNECTED; - nlk->dst_portid = nladdr->nl_pid; - nlk->dst_group = ffs(nladdr->nl_groups); + /* paired with READ_ONCE() in netlink_getsockbyportid() */ + WRITE_ONCE(sk->sk_state, NETLINK_CONNECTED); + /* dst_portid and dst_group can be read locklessly */ + WRITE_ONCE(nlk->dst_portid, nladdr->nl_pid); + WRITE_ONCE(nlk->dst_group, ffs(nladdr->nl_groups)); } return err; @@ -1138,10 +1144,12 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr, nladdr->nl_pad = 0; if (peer) { - nladdr->nl_pid = nlk->dst_portid; - nladdr->nl_groups = netlink_group_mask(nlk->dst_group); + /* Paired with WRITE_ONCE() in netlink_connect() */ + nladdr->nl_pid = READ_ONCE(nlk->dst_portid); + nladdr->nl_groups = netlink_group_mask(READ_ONCE(nlk->dst_group)); } else { - nladdr->nl_pid = nlk->portid; + /* Paired with WRITE_ONCE() in netlink_insert() */ + nladdr->nl_pid = READ_ONCE(nlk->portid); netlink_lock_table(); nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; netlink_unlock_table(); @@ -1168,8 +1176,9 @@ static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid) /* Don't bother queuing skb if kernel socket has no input function */ nlk = nlk_sk(sock); - if (sock->sk_state == NETLINK_CONNECTED && - nlk->dst_portid != nlk_sk(ssk)->portid) { + /* dst_portid and sk_state can be changed in netlink_connect() */ + if (READ_ONCE(sock->sk_state) == NETLINK_CONNECTED && + READ_ONCE(nlk->dst_portid) != nlk_sk(ssk)->portid) { sock_put(sock); return ERR_PTR(-ECONNREFUSED); } @@ -1886,8 +1895,9 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) goto out; netlink_skb_flags |= NETLINK_SKB_DST; } else { - dst_portid = nlk->dst_portid; - dst_group = nlk->dst_group; + /* Paired with WRITE_ONCE() in netlink_connect() */ + dst_portid = READ_ONCE(nlk->dst_portid); + dst_group = READ_ONCE(nlk->dst_group); } /* Paired with WRITE_ONCE() in netlink_insert() */ diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index a8da88db7893..4e7c968cde2d 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c @@ -121,6 +121,7 @@ static void nr_heartbeat_expiry(struct timer_list *t) is accepted() it isn't 'dead' so doesn't get removed. */ if (sock_flag(sk, SOCK_DESTROY) || (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_DEAD))) { + sock_hold(sk); bh_unlock_sock(sk); nr_destroy_socket(sk); goto out; diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index 9a11a499ea2d..c322a61eaeea 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1700,7 +1700,6 @@ static void taprio_reset(struct Qdisc *sch) int i; hrtimer_cancel(&q->advance_timer); - qdisc_synchronize(sch); if (q->qdiscs) { for (i = 0; i < dev->num_tx_queues; i++) diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index 59e653b528b1..6b95d3ba8fe1 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -73,6 +73,12 @@ int sctp_bind_addr_copy(struct net *net, struct sctp_bind_addr *dest, } } + /* If somehow no addresses were found that can be used with this + * scope, it's an error. + */ + if (list_empty(&dest->address_list)) + error = -ENETUNREACH; + out: if (error) sctp_bind_addr_clean(dest); diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 3b55502b2965..5c7ad301d742 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -482,6 +482,12 @@ static int x25_listen(struct socket *sock, int backlog) int rc = -EOPNOTSUPP; lock_sock(sk); + if (sock->state != SS_UNCONNECTED) { + rc = -EINVAL; + release_sock(sk); + return rc; + } + if (sk->sk_state != TCP_LISTEN) { memset(&x25_sk(sk)->dest_addr, 0, X25_ADDR_LEN); sk->sk_max_ack_backlog = backlog; diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs index 29bf9c2e8aee..30103325696d 100644 --- a/rust/kernel/print.rs +++ b/rust/kernel/print.rs @@ -142,17 +142,24 @@ pub fn call_printk_cont(args: fmt::Arguments<'_>) { macro_rules! print_macro ( // The non-continuation cases (most of them, e.g. `INFO`). ($format_string:path, false, $($arg:tt)+) => ( - // SAFETY: This hidden macro should only be called by the documented - // printing macros which ensure the format string is one of the fixed - // ones. All `__LOG_PREFIX`s are null-terminated as they are generated - // by the `module!` proc macro or fixed values defined in a kernel - // crate. - unsafe { - $crate::print::call_printk( - &$format_string, - crate::__LOG_PREFIX, - format_args!($($arg)+), - ); + // To remain sound, `arg`s must be expanded outside the `unsafe` block. + // Typically one would use a `let` binding for that; however, `format_args!` + // takes borrows on the arguments, but does not extend the scope of temporaries. + // Therefore, a `match` expression is used to keep them around, since + // the scrutinee is kept until the end of the `match`. + match format_args!($($arg)+) { + // SAFETY: This hidden macro should only be called by the documented + // printing macros which ensure the format string is one of the fixed + // ones. All `__LOG_PREFIX`s are null-terminated as they are generated + // by the `module!` proc macro or fixed values defined in a kernel + // crate. + args => unsafe { + $crate::print::call_printk( + &$format_string, + crate::__LOG_PREFIX, + args, + ); + } } ); diff --git a/sound/soc/codecs/es8326.c b/sound/soc/codecs/es8326.c index 9ddf6a35e91c..9ddf6a35e91c 100755..100644 --- a/sound/soc/codecs/es8326.c +++ b/sound/soc/codecs/es8326.c diff --git a/sound/soc/codecs/es8326.h b/sound/soc/codecs/es8326.h index 8e5ffe5ee10d..8e5ffe5ee10d 100755..100644 --- a/sound/soc/codecs/es8326.h +++ b/sound/soc/codecs/es8326.h diff --git a/tools/testing/selftests/amd-pstate/Makefile b/tools/testing/selftests/amd-pstate/Makefile index 5f195ee756d6..5fd1424db37d 100644 --- a/tools/testing/selftests/amd-pstate/Makefile +++ b/tools/testing/selftests/amd-pstate/Makefile @@ -7,11 +7,6 @@ all: uname_M := $(shell uname -m 2>/dev/null || echo not) ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) -ifeq (x86,$(ARCH)) -TEST_GEN_FILES += ../../../power/x86/amd_pstate_tracer/amd_pstate_trace.py -TEST_GEN_FILES += ../../../power/x86/intel_pstate_tracer/intel_pstate_tracer.py -endif - TEST_PROGS := run.sh TEST_FILES := basic.sh tbench.sh gitsource.sh diff --git a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c index ea0978f22db8..251794f83719 100644 --- a/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c +++ b/tools/testing/selftests/kvm/x86_64/nx_huge_pages_test.c @@ -241,7 +241,7 @@ int main(int argc, char **argv) while ((opt = getopt(argc, argv, "hp:t:r")) != -1) { switch (opt) { case 'p': - reclaim_period_ms = atoi_non_negative("Reclaim period", optarg); + reclaim_period_ms = atoi_positive("Reclaim period", optarg); break; case 't': token = atoi_paranoid(optarg); diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index dae510c263b4..13c75dc18c10 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -434,6 +434,7 @@ static void *juggle_shinfo_state(void *arg) int main(int argc, char *argv[]) { struct timespec min_ts, max_ts, vm_ts; + struct kvm_xen_hvm_attr evt_reset; struct kvm_vm *vm; pthread_t thread; bool verbose; @@ -962,10 +963,8 @@ int main(int argc, char *argv[]) } done: - struct kvm_xen_hvm_attr evt_reset = { - .type = KVM_XEN_ATTR_TYPE_EVTCHN, - .u.evtchn.flags = KVM_XEN_EVTCHN_RESET, - }; + evt_reset.type = KVM_XEN_ATTR_TYPE_EVTCHN; + evt_reset.u.evtchn.flags = KVM_XEN_EVTCHN_RESET; vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset); alarm(0); diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c index 495ceabffe88..9584eb57e0ed 100644 --- a/virt/kvm/vfio.c +++ b/virt/kvm/vfio.c @@ -336,7 +336,7 @@ static int kvm_vfio_has_attr(struct kvm_device *dev, return -ENXIO; } -static void kvm_vfio_destroy(struct kvm_device *dev) +static void kvm_vfio_release(struct kvm_device *dev) { struct kvm_vfio *kv = dev->private; struct kvm_vfio_group *kvg, *tmp; @@ -355,7 +355,7 @@ static void kvm_vfio_destroy(struct kvm_device *dev) kvm_vfio_update_coherency(dev); kfree(kv); - kfree(dev); /* alloc by kvm_ioctl_create_device, free by .destroy */ + kfree(dev); /* alloc by kvm_ioctl_create_device, free by .release */ } static int kvm_vfio_create(struct kvm_device *dev, u32 type); @@ -363,7 +363,7 @@ static int kvm_vfio_create(struct kvm_device *dev, u32 type); static struct kvm_device_ops kvm_vfio_ops = { .name = "kvm-vfio", .create = kvm_vfio_create, - .destroy = kvm_vfio_destroy, + .release = kvm_vfio_release, .set_attr = kvm_vfio_set_attr, .has_attr = kvm_vfio_has_attr, }; |