diff options
212 files changed, 1721 insertions, 1054 deletions
@@ -627,6 +627,10 @@ S: 48287 Sawleaf  S: Fremont, California 94539  S: USA +N: Tomas Cech +E: sleep_walker@suse.com +D: arm/palm treo support +  N: Florent Chabaud  E: florent.chabaud@polytechnique.org  D: software suspend diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml index c388ae5da1e4..c9c346e6228e 100644 --- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml +++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-display-engine.yaml @@ -94,6 +94,7 @@ if:            - allwinner,sun8i-a83t-display-engine            - allwinner,sun8i-r40-display-engine            - allwinner,sun9i-a80-display-engine +          - allwinner,sun20i-d1-display-engine            - allwinner,sun50i-a64-display-engine  then: diff --git a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml index 8cd0adbf7021..7029cb1f38ff 100644 --- a/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#  title: Qualcomm Atheros ath9k wireless devices Generic Binding  maintainers: -  - Kalle Valo <kvalo@codeaurora.org> +  - Toke Høiland-Jørgensen <toke@toke.dk>  description: |    This node provides properties for configuring the ath9k wireless device. diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml index 8c01fdba134b..a677b056f112 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml @@ -9,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#  title: Qualcomm Technologies ath11k wireless devices Generic Binding  maintainers: -  - Kalle Valo <kvalo@codeaurora.org> +  - Kalle Valo <kvalo@kernel.org>  description: |    These are dt entries for Qualcomm Technologies, Inc. IEEE 802.11ax diff --git a/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.yaml b/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.yaml index e9a533080b32..ef18a572a1ff 100644 --- a/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.yaml +++ b/Documentation/devicetree/bindings/sound/qcom,lpass-cpu.yaml @@ -25,12 +25,12 @@ properties:        - qcom,sc7280-lpass-cpu    reg: -    minItems: 2 +    minItems: 1      maxItems: 6      description: LPAIF core registers    reg-names: -    minItems: 2 +    minItems: 1      maxItems: 6    clocks: @@ -42,12 +42,12 @@ properties:      maxItems: 10    interrupts: -    minItems: 2 +    minItems: 1      maxItems: 4      description: LPAIF DMA buffer interrupt    interrupt-names: -    minItems: 2 +    minItems: 1      maxItems: 4    qcom,adsp: diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index 4d19b19bcc08..73a4176144b3 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -301,7 +301,7 @@ through which it can issue requests and negotiate::  		void (*issue_read)(struct netfs_io_subrequest *subreq);  		bool (*is_still_valid)(struct netfs_io_request *rreq);  		int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, -					 struct folio *folio, void **_fsdata); +					 struct folio **foliop, void **_fsdata);  		void (*done)(struct netfs_io_request *rreq);  	}; @@ -381,8 +381,10 @@ The operations are as follows:     allocated/grabbed the folio to be modified to allow the filesystem to flush     conflicting state before allowing it to be modified. -   It should return 0 if everything is now fine, -EAGAIN if the folio should be -   regrabbed and any other error code to abort the operation. +   It may unlock and discard the folio it was given and set the caller's folio +   pointer to NULL.  It should return 0 if everything is now fine (``*foliop`` +   left set) or the op should be retried (``*foliop`` cleared) and any other +   error code to abort the operation.   * ``done`` diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index 9f41961d11d5..b3a534ed0e7c 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -1085,7 +1085,7 @@ cipso_cache_enable - BOOLEAN  cipso_cache_bucket_size - INTEGER  	The CIPSO label cache consists of a fixed size hash table with each  	hash bucket containing a number of cache entries.  This variable limits -	the number of entries in each hash bucket; the larger the value the +	the number of entries in each hash bucket; the larger the value is, the  	more CIPSO label mappings that can be cached.  When the number of  	entries in a given hash bucket reaches this limit adding new entries  	causes the oldest entry in the bucket to be removed to make room. @@ -1179,7 +1179,7 @@ ip_autobind_reuse - BOOLEAN  	option should only be set by experts.  	Default: 0 -ip_dynaddr - BOOLEAN +ip_dynaddr - INTEGER  	If set non-zero, enables support for dynamic addresses.  	If set to a non-zero value larger than 1, a kernel log  	message will be printed when dynamic address rewriting diff --git a/Documentation/sound/soc/dai.rst b/Documentation/sound/soc/dai.rst index 009b07e5a0f3..bf8431386d26 100644 --- a/Documentation/sound/soc/dai.rst +++ b/Documentation/sound/soc/dai.rst @@ -10,7 +10,7 @@ AC97  ====  AC97 is a five wire interface commonly found on many PC sound cards. It is -now also popular in many portable devices. This DAI has a reset line and time +now also popular in many portable devices. This DAI has a RESET line and time  multiplexes its data on its SDATA_OUT (playback) and SDATA_IN (capture) lines.  The bit clock (BCLK) is always driven by the CODEC (usually 12.288MHz) and the  frame (FRAME) (usually 48kHz) is always driven by the controller. Each AC97 diff --git a/MAINTAINERS b/MAINTAINERS index f679152bdbad..651616ed8ae2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1038,6 +1038,7 @@ F:	arch/arm64/boot/dts/amd/  AMD XGBE DRIVER  M:	Tom Lendacky <thomas.lendacky@amd.com> +M:	"Shyam Sundar S K" <Shyam-sundar.S-k@amd.com>  L:	netdev@vger.kernel.org  S:	Supported  F:	arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi @@ -2497,10 +2498,8 @@ F:	drivers/power/reset/oxnas-restart.c  N:	oxnas  ARM/PALM TREO SUPPORT -M:	Tomas Cech <sleep_walker@suse.com>  L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S:	Maintained -W:	http://hackndev.com +S:	Orphan  F:	arch/arm/mach-pxa/palmtreo.*  ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT @@ -14363,7 +14362,8 @@ S:	Maintained  F:	drivers/net/phy/nxp-c45-tja11xx.c  NXP FSPI DRIVER -M:	Ashish Kumar <ashish.kumar@nxp.com> +M:	Han Xu <han.xu@nxp.com> +M:	Haibo Chen <haibo.chen@nxp.com>  R:	Yogesh Gaur <yogeshgaur.83@gmail.com>  L:	linux-spi@vger.kernel.org  S:	Maintained @@ -17273,12 +17273,15 @@ N:	riscv  K:	riscv  RISC-V/MICROCHIP POLARFIRE SOC SUPPORT -M:	Lewis Hanly <lewis.hanly@microchip.com>  M:	Conor Dooley <conor.dooley@microchip.com> +M:	Daire McNamara <daire.mcnamara@microchip.com>  L:	linux-riscv@lists.infradead.org  S:	Supported  F:	arch/riscv/boot/dts/microchip/ +F:	drivers/char/hw_random/mpfs-rng.c +F:	drivers/clk/microchip/clk-mpfs.c  F:	drivers/mailbox/mailbox-mpfs.c +F:	drivers/pci/controller/pcie-microchip-host.c  F:	drivers/soc/microchip/  F:	include/soc/microchip/mpfs.h diff --git a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi index fded07f370b3..d6ba4b2a60f6 100644 --- a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi +++ b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi @@ -226,7 +226,7 @@  		reg = <0x28>;  		#gpio-cells = <2>;  		gpio-controller; -		ngpio = <32>; +		ngpios = <62>;  	};  	sgtl5000: codec@a { diff --git a/arch/arm/boot/dts/imx6ull-colibri.dtsi b/arch/arm/boot/dts/imx6ull-colibri.dtsi index 15621e03fa4d..2c3ae715c683 100644 --- a/arch/arm/boot/dts/imx6ull-colibri.dtsi +++ b/arch/arm/boot/dts/imx6ull-colibri.dtsi @@ -166,7 +166,7 @@  	atmel_mxt_ts: touchscreen@4a {  		compatible = "atmel,maxtouch";  		pinctrl-names = "default"; -		pinctrl-0 = <&pinctrl_atmel_conn>; +		pinctrl-0 = <&pinctrl_atmel_conn &pinctrl_atmel_snvs_conn>;  		reg = <0x4a>;  		interrupt-parent = <&gpio5>;  		interrupts = <4 IRQ_TYPE_EDGE_FALLING>;       /* SODIMM 107 / INT */ @@ -331,7 +331,6 @@  	pinctrl_atmel_conn: atmelconngrp {  		fsl,pins = <  			MX6UL_PAD_JTAG_MOD__GPIO1_IO10          0xb0a0  /* SODIMM 106 */ -			MX6ULL_PAD_SNVS_TAMPER4__GPIO5_IO04     0xb0a0	/* SODIMM 107 */  		>;  	}; @@ -684,6 +683,12 @@  };  &iomuxc_snvs { +	pinctrl_atmel_snvs_conn: atmelsnvsconngrp { +		fsl,pins = < +			MX6ULL_PAD_SNVS_TAMPER4__GPIO5_IO04     0xb0a0	/* SODIMM 107 */ +		>; +	}; +  	pinctrl_snvs_gpio1: snvsgpio1grp {  		fsl,pins = <  			MX6ULL_PAD_SNVS_TAMPER6__GPIO5_IO06	0x110a0	/* SODIMM 93 */ diff --git a/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi b/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi index 4cab1b3b3b29..725dcf707b31 100644 --- a/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi +++ b/arch/arm/boot/dts/lan966x-kontron-kswitch-d10-mmt.dtsi @@ -87,22 +87,22 @@  	phy4: ethernet-phy@5 {  		reg = <5>; -		coma-mode-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>; +		coma-mode-gpios = <&gpio 37 GPIO_OPEN_DRAIN>;  	};  	phy5: ethernet-phy@6 {  		reg = <6>; -		coma-mode-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>; +		coma-mode-gpios = <&gpio 37 GPIO_OPEN_DRAIN>;  	};  	phy6: ethernet-phy@7 {  		reg = <7>; -		coma-mode-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>; +		coma-mode-gpios = <&gpio 37 GPIO_OPEN_DRAIN>;  	};  	phy7: ethernet-phy@8 {  		reg = <8>; -		coma-mode-gpios = <&gpio 37 GPIO_ACTIVE_HIGH>; +		coma-mode-gpios = <&gpio 37 GPIO_OPEN_DRAIN>;  	};  }; diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi index 814ad0b46232..c3b8a6d63027 100644 --- a/arch/arm/boot/dts/qcom-msm8974.dtsi +++ b/arch/arm/boot/dts/qcom-msm8974.dtsi @@ -506,6 +506,8 @@  			interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;  			clocks = <&gcc GCC_BLSP1_UART2_APPS_CLK>, <&gcc GCC_BLSP1_AHB_CLK>;  			clock-names = "core", "iface"; +			pinctrl-names = "default"; +			pinctrl-0 = <&blsp1_uart2_default>;  			status = "disabled";  		}; @@ -581,6 +583,9 @@  			interrupts = <GIC_SPI 113 IRQ_TYPE_NONE>;  			clocks = <&gcc GCC_BLSP2_UART1_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;  			clock-names = "core", "iface"; +			pinctrl-names = "default", "sleep"; +			pinctrl-0 = <&blsp2_uart1_default>; +			pinctrl-1 = <&blsp2_uart1_sleep>;  			status = "disabled";  		}; @@ -599,6 +604,8 @@  			interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;  			clocks = <&gcc GCC_BLSP2_UART4_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;  			clock-names = "core", "iface"; +			pinctrl-names = "default"; +			pinctrl-0 = <&blsp2_uart4_default>;  			status = "disabled";  		}; @@ -639,6 +646,9 @@  			interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;  			clocks = <&gcc GCC_BLSP2_QUP6_I2C_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;  			clock-names = "core", "iface"; +			pinctrl-names = "default", "sleep"; +			pinctrl-0 = <&blsp2_i2c6_default>; +			pinctrl-1 = <&blsp2_i2c6_sleep>;  			#address-cells = <1>;  			#size-cells = <0>;  		}; @@ -1256,7 +1266,7 @@  				};  			}; -			blsp1_uart2_active: blsp1-uart2-active { +			blsp1_uart2_default: blsp1-uart2-default {  				rx {  					pins = "gpio5";  					function = "blsp_uart2"; @@ -1272,7 +1282,7 @@  				};  			}; -			blsp2_uart1_active: blsp2-uart1-active { +			blsp2_uart1_default: blsp2-uart1-default {  				tx-rts {  					pins = "gpio41", "gpio44";  					function = "blsp_uart7"; @@ -1295,7 +1305,7 @@  				bias-pull-down;  			}; -			blsp2_uart4_active: blsp2-uart4-active { +			blsp2_uart4_default: blsp2-uart4-default {  				tx-rts {  					pins = "gpio53", "gpio56";  					function = "blsp_uart10"; @@ -1406,7 +1416,19 @@  				bias-pull-up;  			}; -			/* BLSP2_I2C6 info is missing - nobody uses it though? */ +			blsp2_i2c6_default: blsp2-i2c6-default { +				pins = "gpio87", "gpio88"; +				function = "blsp_i2c12"; +				drive-strength = <2>; +				bias-disable; +			}; + +			blsp2_i2c6_sleep: blsp2-i2c6-sleep { +				pins = "gpio87", "gpio88"; +				function = "blsp_i2c12"; +				drive-strength = <2>; +				bias-pull-up; +			};  			spi8_default: spi8_default {  				mosi { diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 89c71d419f82..659a17fc755c 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -1124,7 +1124,7 @@  				clocks = <&pmc PMC_TYPE_PERIPHERAL 55>, <&pmc PMC_TYPE_GCK 55>;  				clock-names = "pclk", "gclk";  				assigned-clocks = <&pmc PMC_TYPE_CORE PMC_I2S1_MUX>; -				assigned-parrents = <&pmc PMC_TYPE_GCK 55>; +				assigned-clock-parents = <&pmc PMC_TYPE_GCK 55>;  				status = "disabled";  			}; diff --git a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts index f19ed981da9d..3706216ffb40 100644 --- a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts +++ b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts @@ -169,7 +169,7 @@  	flash@0 {  		#address-cells = <1>;  		#size-cells = <1>; -		compatible = "mxicy,mx25l1606e", "winbond,w25q128"; +		compatible = "mxicy,mx25l1606e", "jedec,spi-nor";  		reg = <0>;  		spi-max-frequency = <40000000>;  	}; diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h index f1d0a7807cd0..41536feb4392 100644 --- a/arch/arm/include/asm/domain.h +++ b/arch/arm/include/asm/domain.h @@ -112,19 +112,6 @@ static __always_inline void set_domain(unsigned int val)  }  #endif -#ifdef CONFIG_CPU_USE_DOMAINS -#define modify_domain(dom,type)					\ -	do {							\ -		unsigned int domain = get_domain();		\ -		domain &= ~domain_mask(dom);			\ -		domain = domain | domain_val(dom, type);	\ -		set_domain(domain);				\ -	} while (0) - -#else -static inline void modify_domain(unsigned dom, unsigned type)	{ } -#endif -  /*   * Generate the T (user) versions of the LDR/STR and related   * instructions (inline assembly) diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h index 92282558caf7..2b8970d8e5a2 100644 --- a/arch/arm/include/asm/mach/map.h +++ b/arch/arm/include/asm/mach/map.h @@ -27,6 +27,7 @@ enum {  	MT_HIGH_VECTORS,  	MT_MEMORY_RWX,  	MT_MEMORY_RW, +	MT_MEMORY_RO,  	MT_ROM,  	MT_MEMORY_RWX_NONCACHED,  	MT_MEMORY_RW_DTCM, diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 93051e2f402c..1408a6a15d0e 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -163,5 +163,31 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)  		((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1;	\  }) + +/* + * Update ITSTATE after normal execution of an IT block instruction. + * + * The 8 IT state bits are split into two parts in CPSR: + *	ITSTATE<1:0> are in CPSR<26:25> + *	ITSTATE<7:2> are in CPSR<15:10> + */ +static inline unsigned long it_advance(unsigned long cpsr) +{ +	if ((cpsr & 0x06000400) == 0) { +		/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */ +		cpsr &= ~PSR_IT_MASK; +	} else { +		/* We need to shift left ITSTATE<4:0> */ +		const unsigned long mask = 0x06001c00;  /* Mask ITSTATE<4:0> */ +		unsigned long it = cpsr & mask; +		it <<= 1; +		it |= it >> (27 - 10);  /* Carry ITSTATE<2> to correct place */ +		it &= mask; +		cpsr &= ~mask; +		cpsr |= it; +	} +	return cpsr; +} +  #endif /* __ASSEMBLY__ */  #endif diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 7aa3ded4af92..6a447ac67d80 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -302,6 +302,7 @@ local_restart:  	b	ret_fast_syscall  #endif  ENDPROC(vector_swi) +	.ltorg  	/*  	 * This is the really slow path.  We're going to be doing diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c index 87389d9456b9..30d781d80fe0 100644 --- a/arch/arm/mach-rockchip/pm.c +++ b/arch/arm/mach-rockchip/pm.c @@ -311,7 +311,7 @@ void __init rockchip_suspend_init(void)  					     &match);  	if (!match) {  		pr_err("Failed to find PMU node\n"); -		return; +		goto out_put;  	}  	pm_data = (struct rockchip_pm_data *) match->data; @@ -320,9 +320,12 @@ void __init rockchip_suspend_init(void)  		if (ret) {  			pr_err("%s: matches init error %d\n", __func__, ret); -			return; +			goto out_put;  		}  	}  	suspend_set_ops(pm_data->ops); + +out_put: +	of_node_put(np);  } diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index a3a4589ec73b..fc439c2c16f8 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -631,7 +631,11 @@ config CPU_USE_DOMAINS  	bool  	help  	  This option enables or disables the use of domain switching -	  via the set_fs() function. +	  using the DACR (domain access control register) to protect memory +	  domains from each other. In Linux we use three domains: kernel, user +	  and IO. The domains are used to protect userspace from kernelspace +	  and to handle IO-space as a special type of memory by assigning +	  manager or client roles to running code (such as a process).  config CPU_V7M_NUM_IRQ  	int "Number of external interrupts connected to the NVIC" diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 6f499559d193..f8dd0b3cc8e0 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -935,6 +935,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)  	if (type == TYPE_LDST)  		do_alignment_finish_ldst(addr, instr, regs, offset); +	if (thumb_mode(regs)) +		regs->ARM_cpsr = it_advance(regs->ARM_cpsr); +  	return 0;   bad_or_fault: diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 5e2be37a198e..cd17e324aa51 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -296,6 +296,13 @@ static struct mem_type mem_types[] __ro_after_init = {  		.prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,  		.domain    = DOMAIN_KERNEL,  	}, +	[MT_MEMORY_RO] = { +		.prot_pte  = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | +			     L_PTE_XN | L_PTE_RDONLY, +		.prot_l1   = PMD_TYPE_TABLE, +		.prot_sect = PMD_TYPE_SECT, +		.domain    = DOMAIN_KERNEL, +	},  	[MT_ROM] = {  		.prot_sect = PMD_TYPE_SECT,  		.domain    = DOMAIN_KERNEL, @@ -489,6 +496,7 @@ static void __init build_mem_type_table(void)  			/* Also setup NX memory mapping */  			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; +			mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN;  		}  		if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {  			/* @@ -568,6 +576,7 @@ static void __init build_mem_type_table(void)  		mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;  		mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;  		mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; +		mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;  #endif  		/* @@ -587,6 +596,8 @@ static void __init build_mem_type_table(void)  			mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;  			mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;  			mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; +			mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S; +			mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED;  			mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;  			mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S;  			mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; @@ -647,6 +658,8 @@ static void __init build_mem_type_table(void)  	mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;  	mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;  	mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; +	mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd; +	mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot;  	mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;  	mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;  	mem_types[MT_ROM].prot_sect |= cp->pmd; @@ -1360,7 +1373,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)  		map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK);  		map.virtual = FDT_FIXED_BASE;  		map.length = FDT_FIXED_SIZE; -		map.type = MT_ROM; +		map.type = MT_MEMORY_RO;  		create_mapping(&map);  	} diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index fb9f3eb6bf48..8bc7a2d6d6c7 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -108,8 +108,7 @@ static unsigned int spectre_v2_install_workaround(unsigned int method)  #else  static unsigned int spectre_v2_install_workaround(unsigned int method)  { -	pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n", -		smp_processor_id()); +	pr_info_once("Spectre V2: workarounds disabled by configuration\n");  	return SPECTRE_VULNERABLE;  } @@ -209,10 +208,10 @@ static int spectre_bhb_install_workaround(int method)  			return SPECTRE_VULNERABLE;  		spectre_bhb_method = method; -	} -	pr_info("CPU%u: Spectre BHB: using %s workaround\n", -		smp_processor_id(), spectre_bhb_method_name(method)); +		pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n", +			smp_processor_id(), spectre_bhb_method_name(method)); +	}  	return SPECTRE_MITIGATED;  } diff --git a/arch/arm/probes/decode.h b/arch/arm/probes/decode.h index 973173598992..facc889d05ee 100644 --- a/arch/arm/probes/decode.h +++ b/arch/arm/probes/decode.h @@ -14,6 +14,7 @@  #include <linux/types.h>  #include <linux/stddef.h>  #include <asm/probes.h> +#include <asm/ptrace.h>  #include <asm/kprobes.h>  void __init arm_probes_decode_init(void); @@ -35,31 +36,6 @@ void __init find_str_pc_offset(void);  #endif -/* - * Update ITSTATE after normal execution of an IT block instruction. - * - * The 8 IT state bits are split into two parts in CPSR: - *	ITSTATE<1:0> are in CPSR<26:25> - *	ITSTATE<7:2> are in CPSR<15:10> - */ -static inline unsigned long it_advance(unsigned long cpsr) -	{ -	if ((cpsr & 0x06000400) == 0) { -		/* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */ -		cpsr &= ~PSR_IT_MASK; -	} else { -		/* We need to shift left ITSTATE<4:0> */ -		const unsigned long mask = 0x06001c00;  /* Mask ITSTATE<4:0> */ -		unsigned long it = cpsr & mask; -		it <<= 1; -		it |= it >> (27 - 10);  /* Carry ITSTATE<2> to correct place */ -		it &= mask; -		cpsr &= ~mask; -		cpsr |= it; -	} -	return cpsr; -} -  static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)  {  	long cpsr = regs->ARM_cpsr; diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi index 66023d553524..d084c33d5ca8 100644 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4906.dtsi @@ -9,6 +9,14 @@  		/delete-node/ cpu@3;  	}; +	timer { +		compatible = "arm,armv8-timer"; +		interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, +			     <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, +			     <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>, +			     <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>; +	}; +  	pmu {  		compatible = "arm,cortex-a53-pmu";  		interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi index a4be040a00c0..967d2cd3c3ce 100644 --- a/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi +++ b/arch/arm64/boot/dts/broadcom/bcm4908/bcm4908.dtsi @@ -29,6 +29,8 @@  			device_type = "cpu";  			compatible = "brcm,brahma-b53";  			reg = <0x0>; +			enable-method = "spin-table"; +			cpu-release-addr = <0x0 0xfff8>;  			next-level-cache = <&l2>;  		}; diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi index 92465f777603..d5cdd77e5a95 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi @@ -224,9 +224,12 @@  			little-endian;  		}; -		efuse@1e80000 { +		sfp: efuse@1e80000 {  			compatible = "fsl,ls1028a-sfp";  			reg = <0x0 0x1e80000 0x0 0x10000>; +			clocks = <&clockgen QORIQ_CLK_PLATFORM_PLL +					    QORIQ_CLK_PLL_DIV(4)>; +			clock-names = "sfp";  			#address-cells = <1>;  			#size-cells = <1>; diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi index 913d845eb51a..1977103a5ef4 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi @@ -376,7 +376,8 @@ camera: &i2c7 {  		<&cru ACLK_VIO>,  		<&cru ACLK_GIC_PRE>,  		<&cru PCLK_DDR>, -		<&cru ACLK_HDCP>; +		<&cru ACLK_HDCP>, +		<&cru ACLK_VDU>;  	assigned-clock-rates =  		<600000000>, <1600000000>,  		<1000000000>, @@ -388,6 +389,7 @@ camera: &i2c7 {  		<400000000>,  		<200000000>,  		<200000000>, +		<400000000>,  		<400000000>;  }; diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index fbd0346624e6..9d5b0e8c9cca 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi @@ -1462,7 +1462,8 @@  			<&cru HCLK_PERILP1>, <&cru PCLK_PERILP1>,  			<&cru ACLK_VIO>, <&cru ACLK_HDCP>,  			<&cru ACLK_GIC_PRE>, -			<&cru PCLK_DDR>; +			<&cru PCLK_DDR>, +			<&cru ACLK_VDU>;  		assigned-clock-rates =  			 <594000000>,  <800000000>,  			<1000000000>, @@ -1473,7 +1474,8 @@  			 <100000000>,   <50000000>,  			 <400000000>, <400000000>,  			 <200000000>, -			 <200000000>; +			 <200000000>, +			 <400000000>;  	};  	grf: syscon@ff770000 { diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts index 1534e11a9ad1..fa953b736642 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-a.dts @@ -687,6 +687,7 @@  };  &usb_host0_xhci { +	dr_mode = "host";  	status = "okay";  }; diff --git a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts index 7bdcecc0dfe4..02d5f5a8ca03 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts +++ b/arch/arm64/boot/dts/rockchip/rk3566-quartz64-b.dts @@ -133,7 +133,7 @@  	assigned-clocks = <&cru SCLK_GMAC1_RX_TX>, <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>;  	assigned-clock-parents = <&cru SCLK_GMAC1_RGMII_SPEED>, <&cru SCLK_GMAC1>, <&gmac1_clkin>;  	clock_in_out = "input"; -	phy-mode = "rgmii-id"; +	phy-mode = "rgmii";  	phy-supply = <&vcc_3v3>;  	pinctrl-names = "default";  	pinctrl-0 = <&gmac1m1_miim diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h index 95af12e82a32..cdbd9653aa14 100644 --- a/arch/um/include/asm/page.h +++ b/arch/um/include/asm/page.h @@ -102,8 +102,8 @@ extern unsigned long uml_physmem;   * casting is the right thing, but 32-bit UML can't have 64-bit virtual   * addresses   */ -#define __pa(virt) to_phys((void *) (unsigned long) (virt)) -#define __va(phys) to_virt((unsigned long) (phys)) +#define __pa(virt) uml_to_phys((void *) (unsigned long) (virt)) +#define __va(phys) uml_to_virt((unsigned long) (phys))  #define phys_to_pfn(p) ((p) >> PAGE_SHIFT)  #define pfn_to_phys(pfn) PFN_PHYS(pfn) diff --git a/arch/um/include/shared/mem.h b/arch/um/include/shared/mem.h index 4862c91d4213..98aacd544108 100644 --- a/arch/um/include/shared/mem.h +++ b/arch/um/include/shared/mem.h @@ -9,12 +9,12 @@  extern int phys_mapping(unsigned long phys, unsigned long long *offset_out);  extern unsigned long uml_physmem; -static inline unsigned long to_phys(void *virt) +static inline unsigned long uml_to_phys(void *virt)  {  	return(((unsigned long) virt) - uml_physmem);  } -static inline void *to_virt(unsigned long phys) +static inline void *uml_to_virt(unsigned long phys)  {  	return((void *) uml_physmem + phys);  } diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c index 87d3129e7362..c316c993a949 100644 --- a/arch/um/os-Linux/skas/process.c +++ b/arch/um/os-Linux/skas/process.c @@ -251,7 +251,7 @@ static int userspace_tramp(void *stack)  	signal(SIGTERM, SIG_DFL);  	signal(SIGWINCH, SIG_IGN); -	fd = phys_mapping(to_phys(__syscall_stub_start), &offset); +	fd = phys_mapping(uml_to_phys(__syscall_stub_start), &offset);  	addr = mmap64((void *) STUB_CODE, UM_KERN_PAGE_SIZE,  		      PROT_EXEC, MAP_FIXED | MAP_PRIVATE, fd, offset);  	if (addr == MAP_FAILED) { @@ -261,7 +261,7 @@ static int userspace_tramp(void *stack)  	}  	if (stack != NULL) { -		fd = phys_mapping(to_phys(stack), &offset); +		fd = phys_mapping(uml_to_phys(stack), &offset);  		addr = mmap((void *) STUB_DATA,  			    UM_KERN_PAGE_SIZE, PROT_READ | PROT_WRITE,  			    MAP_FIXED | MAP_SHARED, fd, offset); @@ -534,7 +534,7 @@ int copy_context_skas0(unsigned long new_stack, int pid)  	struct stub_data *data = (struct stub_data *) current_stack;  	struct stub_data *child_data = (struct stub_data *) new_stack;  	unsigned long long new_offset; -	int new_fd = phys_mapping(to_phys((void *)new_stack), &new_offset); +	int new_fd = phys_mapping(uml_to_phys((void *)new_stack), &new_offset);  	/*  	 * prepare offset and fd of child's stack as argument for parent's diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index bb05ed4f46bd..10a3bfc1eb23 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -11,6 +11,7 @@  #include <asm/cpufeatures.h>  #include <asm/msr-index.h>  #include <asm/unwind_hints.h> +#include <asm/percpu.h>  #define RETPOLINE_THUNK_SIZE	32 @@ -280,7 +281,7 @@ static inline void indirect_branch_prediction_barrier(void)  /* The Intel SPEC CTRL MSR base value cache */  extern u64 x86_spec_ctrl_base; -extern u64 x86_spec_ctrl_current; +DECLARE_PER_CPU(u64, x86_spec_ctrl_current);  extern void write_spec_ctrl_current(u64 val, bool force);  extern u64 spec_ctrl_current(void); diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c index 43177c20ce4f..eaea733b368a 100644 --- a/drivers/acpi/acpi_video.c +++ b/drivers/acpi/acpi_video.c @@ -73,7 +73,7 @@ module_param(device_id_scheme, bool, 0444);  static int only_lcd = -1;  module_param(only_lcd, int, 0444); -static bool has_backlight; +static bool may_report_brightness_keys;  static int register_count;  static DEFINE_MUTEX(register_count_mutex);  static DEFINE_MUTEX(video_list_lock); @@ -1224,7 +1224,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,  	acpi_video_device_find_cap(data);  	if (data->cap._BCM && data->cap._BCL) -		has_backlight = true; +		may_report_brightness_keys = true;  	mutex_lock(&video->device_list_lock);  	list_add_tail(&data->entry, &video->video_device_list); @@ -1693,6 +1693,9 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)  		break;  	} +	if (keycode) +		may_report_brightness_keys = true; +  	acpi_notifier_call_chain(device, event, 0);  	if (keycode && (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS)) { @@ -2253,7 +2256,7 @@ void acpi_video_unregister(void)  	if (register_count) {  		acpi_bus_unregister_driver(&acpi_video_bus);  		register_count = 0; -		has_backlight = false; +		may_report_brightness_keys = false;  	}  	mutex_unlock(®ister_count_mutex);  } @@ -2275,7 +2278,7 @@ void acpi_video_unregister_backlight(void)  bool acpi_video_handles_brightness_key_presses(void)  { -	return has_backlight && +	return may_report_brightness_keys &&  	       (report_key_events & REPORT_BRIGHTNESS_KEY_EVENTS);  }  EXPORT_SYMBOL(acpi_video_handles_brightness_key_presses); diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index 0e3ed5eb367b..0cb20324da16 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -493,13 +493,8 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)  		goto skip_probe;  	ret = amba_read_periphid(dev); -	if (ret) { -		if (ret != -EPROBE_DEFER) { -			amba_device_put(dev); -			goto err_out; -		} +	if (ret)  		goto err_release; -	}  skip_probe:  	ret = device_add(&dev->dev); @@ -546,6 +541,7 @@ static int amba_deferred_retry(void)  			continue;  		list_del_init(&ddev->node); +		amba_device_put(ddev->dev);  		kfree(ddev);  	} diff --git a/drivers/firmware/efi/reboot.c b/drivers/firmware/efi/reboot.c index 73089a24f04b..ceae84c19d22 100644 --- a/drivers/firmware/efi/reboot.c +++ b/drivers/firmware/efi/reboot.c @@ -6,7 +6,7 @@  #include <linux/efi.h>  #include <linux/reboot.h> -static void (*orig_pm_power_off)(void); +static struct sys_off_handler *efi_sys_off_handler;  int efi_reboot_quirk_mode = -1; @@ -51,15 +51,11 @@ bool __weak efi_poweroff_required(void)  	return false;  } -static void efi_power_off(void) +static int efi_power_off(struct sys_off_data *data)  {  	efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); -	/* -	 * The above call should not return, if it does fall back to -	 * the original power off method (typically ACPI poweroff). -	 */ -	if (orig_pm_power_off) -		orig_pm_power_off(); + +	return NOTIFY_DONE;  }  static int __init efi_shutdown_init(void) @@ -68,8 +64,13 @@ static int __init efi_shutdown_init(void)  		return -ENODEV;  	if (efi_poweroff_required()) { -		orig_pm_power_off = pm_power_off; -		pm_power_off = efi_power_off; +		/* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */ +		efi_sys_off_handler = +			register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, +						 SYS_OFF_PRIO_FIRMWARE + 1, +						 efi_power_off, NULL); +		if (IS_ERR(efi_sys_off_handler)) +			return PTR_ERR(efi_sys_off_handler);  	}  	return 0; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e88c497fa010..f65656df3619 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -256,7 +256,6 @@ config DRM_AMDGPU  	select HWMON  	select BACKLIGHT_CLASS_DEVICE  	select INTERVAL_TREE -	select DRM_BUDDY  	help  	  Choose this option if you have a recent AMD Radeon graphics card. diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 6546552e596c..acfa207cf970 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -30,15 +30,12 @@  #include <drm/ttm/ttm_resource.h>  #include <drm/ttm/ttm_range_manager.h> -#include "amdgpu_vram_mgr.h" -  /* state back for walking over vram_mgr and gtt_mgr allocations */  struct amdgpu_res_cursor {  	uint64_t		start;  	uint64_t		size;  	uint64_t		remaining; -	void			*node; -	uint32_t		mem_type; +	struct drm_mm_node	*node;  };  /** @@ -55,63 +52,27 @@ static inline void amdgpu_res_first(struct ttm_resource *res,  				    uint64_t start, uint64_t size,  				    struct amdgpu_res_cursor *cur)  { -	struct drm_buddy_block *block; -	struct list_head *head, *next;  	struct drm_mm_node *node; -	if (!res) -		goto fallback; - -	BUG_ON(start + size > res->num_pages << PAGE_SHIFT); - -	cur->mem_type = res->mem_type; - -	switch (cur->mem_type) { -	case TTM_PL_VRAM: -		head = &to_amdgpu_vram_mgr_resource(res)->blocks; - -		block = list_first_entry_or_null(head, -						 struct drm_buddy_block, -						 link); -		if (!block) -			goto fallback; - -		while (start >= amdgpu_vram_mgr_block_size(block)) { -			start -= amdgpu_vram_mgr_block_size(block); - -			next = block->link.next; -			if (next != head) -				block = list_entry(next, struct drm_buddy_block, link); -		} - -		cur->start = amdgpu_vram_mgr_block_start(block) + start; -		cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size); -		cur->remaining = size; -		cur->node = block; -		break; -	case TTM_PL_TT: -		node = to_ttm_range_mgr_node(res)->mm_nodes; -		while (start >= node->size << PAGE_SHIFT) -			start -= node++->size << PAGE_SHIFT; - -		cur->start = (node->start << PAGE_SHIFT) + start; -		cur->size = min((node->size << PAGE_SHIFT) - start, size); +	if (!res || res->mem_type == TTM_PL_SYSTEM) { +		cur->start = start; +		cur->size = size;  		cur->remaining = size; -		cur->node = node; -		break; -	default: -		goto fallback; +		cur->node = NULL; +		WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); +		return;  	} -	return; +	BUG_ON(start + size > res->num_pages << PAGE_SHIFT); -fallback: -	cur->start = start; -	cur->size = size; +	node = to_ttm_range_mgr_node(res)->mm_nodes; +	while (start >= node->size << PAGE_SHIFT) +		start -= node++->size << PAGE_SHIFT; + +	cur->start = (node->start << PAGE_SHIFT) + start; +	cur->size = min((node->size << PAGE_SHIFT) - start, size);  	cur->remaining = size; -	cur->node = NULL; -	WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); -	return; +	cur->node = node;  }  /** @@ -124,9 +85,7 @@ fallback:   */  static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)  { -	struct drm_buddy_block *block; -	struct drm_mm_node *node; -	struct list_head *next; +	struct drm_mm_node *node = cur->node;  	BUG_ON(size > cur->remaining); @@ -140,27 +99,9 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)  		return;  	} -	switch (cur->mem_type) { -	case TTM_PL_VRAM: -		block = cur->node; - -		next = block->link.next; -		block = list_entry(next, struct drm_buddy_block, link); - -		cur->node = block; -		cur->start = amdgpu_vram_mgr_block_start(block); -		cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining); -		break; -	case TTM_PL_TT: -		node = cur->node; - -		cur->node = ++node; -		cur->start = node->start << PAGE_SHIFT; -		cur->size = min(node->size << PAGE_SHIFT, cur->remaining); -		break; -	default: -		return; -	} +	cur->node = ++node; +	cur->start = node->start << PAGE_SHIFT; +	cur->size = min(node->size << PAGE_SHIFT, cur->remaining);  }  #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 6a70818039dd..9120ae80ef52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -26,7 +26,6 @@  #include <linux/dma-direction.h>  #include <drm/gpu_scheduler.h> -#include "amdgpu_vram_mgr.h"  #include "amdgpu.h"  #define AMDGPU_PL_GDS		(TTM_PL_PRIV + 0) @@ -39,6 +38,15 @@  #define AMDGPU_POISON	0xd0bed0be +struct amdgpu_vram_mgr { +	struct ttm_resource_manager manager; +	struct drm_mm mm; +	spinlock_t lock; +	struct list_head reservations_pending; +	struct list_head reserved_pages; +	atomic64_t vis_usage; +}; +  struct amdgpu_gtt_mgr {  	struct ttm_resource_manager manager;  	struct drm_mm mm; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 49e4092f447f..0a7611648573 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -32,10 +32,8 @@  #include "atom.h"  struct amdgpu_vram_reservation { -	u64 start; -	u64 size; -	struct list_head allocated; -	struct list_head blocks; +	struct list_head node; +	struct drm_mm_node mm_node;  };  static inline struct amdgpu_vram_mgr * @@ -188,18 +186,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = {  };  /** - * amdgpu_vram_mgr_vis_size - Calculate visible block size + * amdgpu_vram_mgr_vis_size - Calculate visible node size   *   * @adev: amdgpu_device pointer - * @block: DRM BUDDY block structure + * @node: MM node structure   * - * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM + * Calculate how many bytes of the MM node are inside visible VRAM   */  static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, -				    struct drm_buddy_block *block) +				    struct drm_mm_node *node)  { -	u64 start = amdgpu_vram_mgr_block_start(block); -	u64 end = start + amdgpu_vram_mgr_block_size(block); +	uint64_t start = node->start << PAGE_SHIFT; +	uint64_t end = (node->size + node->start) << PAGE_SHIFT;  	if (start >= adev->gmc.visible_vram_size)  		return 0; @@ -220,9 +218,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)  {  	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);  	struct ttm_resource *res = bo->tbo.resource; -	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); -	struct drm_buddy_block *block; -	u64 usage = 0; +	unsigned pages = res->num_pages; +	struct drm_mm_node *mm; +	u64 usage;  	if (amdgpu_gmc_vram_full_visible(&adev->gmc))  		return amdgpu_bo_size(bo); @@ -230,8 +228,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)  	if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)  		return 0; -	list_for_each_entry(block, &vres->blocks, link) -		usage += amdgpu_vram_mgr_vis_size(adev, block); +	mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0]; +	for (usage = 0; pages; pages -= mm->size, mm++) +		usage += amdgpu_vram_mgr_vis_size(adev, mm);  	return usage;  } @@ -241,30 +240,23 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)  {  	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);  	struct amdgpu_device *adev = to_amdgpu_device(mgr); -	struct drm_buddy *mm = &mgr->mm; +	struct drm_mm *mm = &mgr->mm;  	struct amdgpu_vram_reservation *rsv, *temp; -	struct drm_buddy_block *block;  	uint64_t vis_usage; -	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { -		if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, -					   rsv->size, mm->chunk_size, &rsv->allocated, -					   DRM_BUDDY_RANGE_ALLOCATION)) -			continue; - -		block = amdgpu_vram_mgr_first_block(&rsv->allocated); -		if (!block) +	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) { +		if (drm_mm_reserve_node(mm, &rsv->mm_node))  			continue;  		dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n", -			rsv->start, rsv->size); +			rsv->mm_node.start, rsv->mm_node.size); -		vis_usage = amdgpu_vram_mgr_vis_size(adev, block); +		vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);  		atomic64_add(vis_usage, &mgr->vis_usage);  		spin_lock(&man->bdev->lru_lock); -		man->usage += rsv->size; +		man->usage += rsv->mm_node.size << PAGE_SHIFT;  		spin_unlock(&man->bdev->lru_lock); -		list_move(&rsv->blocks, &mgr->reserved_pages); +		list_move(&rsv->node, &mgr->reserved_pages);  	}  } @@ -286,16 +278,14 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,  	if (!rsv)  		return -ENOMEM; -	INIT_LIST_HEAD(&rsv->allocated); -	INIT_LIST_HEAD(&rsv->blocks); +	INIT_LIST_HEAD(&rsv->node); +	rsv->mm_node.start = start >> PAGE_SHIFT; +	rsv->mm_node.size = size >> PAGE_SHIFT; -	rsv->start = start; -	rsv->size = size; - -	mutex_lock(&mgr->lock); -	list_add_tail(&rsv->blocks, &mgr->reservations_pending); +	spin_lock(&mgr->lock); +	list_add_tail(&rsv->node, &mgr->reservations_pending);  	amdgpu_vram_mgr_do_reserve(&mgr->manager); -	mutex_unlock(&mgr->lock); +	spin_unlock(&mgr->lock);  	return 0;  } @@ -317,19 +307,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,  	struct amdgpu_vram_reservation *rsv;  	int ret; -	mutex_lock(&mgr->lock); +	spin_lock(&mgr->lock); -	list_for_each_entry(rsv, &mgr->reservations_pending, blocks) { -		if (rsv->start <= start && -		    (start < (rsv->start + rsv->size))) { +	list_for_each_entry(rsv, &mgr->reservations_pending, node) { +		if ((rsv->mm_node.start <= start) && +		    (start < (rsv->mm_node.start + rsv->mm_node.size))) {  			ret = -EBUSY;  			goto out;  		}  	} -	list_for_each_entry(rsv, &mgr->reserved_pages, blocks) { -		if (rsv->start <= start && -		    (start < (rsv->start + rsv->size))) { +	list_for_each_entry(rsv, &mgr->reserved_pages, node) { +		if ((rsv->mm_node.start <= start) && +		    (start < (rsv->mm_node.start + rsv->mm_node.size))) {  			ret = 0;  			goto out;  		} @@ -337,11 +327,33 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,  	ret = -ENOENT;  out: -	mutex_unlock(&mgr->lock); +	spin_unlock(&mgr->lock);  	return ret;  }  /** + * amdgpu_vram_mgr_virt_start - update virtual start address + * + * @mem: ttm_resource to update + * @node: just allocated node + * + * Calculate a virtual BO start address to easily check if everything is CPU + * accessible. + */ +static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem, +				       struct drm_mm_node *node) +{ +	unsigned long start; + +	start = node->start + node->size; +	if (start > mem->num_pages) +		start -= mem->num_pages; +	else +		start = 0; +	mem->start = max(mem->start, start); +} + +/**   * amdgpu_vram_mgr_new - allocate new ranges   *   * @man: TTM memory type manager @@ -356,44 +368,46 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,  			       const struct ttm_place *place,  			       struct ttm_resource **res)  { -	u64 vis_usage = 0, max_bytes, cur_size, min_block_size; +	unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;  	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);  	struct amdgpu_device *adev = to_amdgpu_device(mgr); -	struct amdgpu_vram_mgr_resource *vres; -	u64 size, remaining_size, lpfn, fpfn; -	struct drm_buddy *mm = &mgr->mm; -	struct drm_buddy_block *block; -	unsigned long pages_per_block; +	uint64_t vis_usage = 0, mem_bytes, max_bytes; +	struct ttm_range_mgr_node *node; +	struct drm_mm *mm = &mgr->mm; +	enum drm_mm_insert_mode mode; +	unsigned i;  	int r; -	lpfn = place->lpfn << PAGE_SHIFT; +	lpfn = place->lpfn;  	if (!lpfn) -		lpfn = man->size; - -	fpfn = place->fpfn << PAGE_SHIFT; +		lpfn = man->size >> PAGE_SHIFT;  	max_bytes = adev->gmc.mc_vram_size;  	if (tbo->type != ttm_bo_type_kernel)  		max_bytes -= AMDGPU_VM_RESERVED_VRAM; +	mem_bytes = tbo->base.size;  	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { -		pages_per_block = ~0ul; +		pages_per_node = ~0ul; +		num_nodes = 1;  	} else {  #ifdef CONFIG_TRANSPARENT_HUGEPAGE -		pages_per_block = HPAGE_PMD_NR; +		pages_per_node = HPAGE_PMD_NR;  #else  		/* default to 2MB */ -		pages_per_block = 2UL << (20UL - PAGE_SHIFT); +		pages_per_node = 2UL << (20UL - PAGE_SHIFT);  #endif -		pages_per_block = max_t(uint32_t, pages_per_block, -					tbo->page_alignment); +		pages_per_node = max_t(uint32_t, pages_per_node, +				       tbo->page_alignment); +		num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);  	} -	vres = kzalloc(sizeof(*vres), GFP_KERNEL); -	if (!vres) +	node = kvmalloc(struct_size(node, mm_nodes, num_nodes), +			GFP_KERNEL | __GFP_ZERO); +	if (!node)  		return -ENOMEM; -	ttm_resource_init(tbo, place, &vres->base); +	ttm_resource_init(tbo, place, &node->base);  	/* bail out quickly if there's likely not enough VRAM for this BO */  	if (ttm_resource_manager_usage(man) > max_bytes) { @@ -401,130 +415,66 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,  		goto error_fini;  	} -	INIT_LIST_HEAD(&vres->blocks); - +	mode = DRM_MM_INSERT_BEST;  	if (place->flags & TTM_PL_FLAG_TOPDOWN) -		vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; - -	if (fpfn || lpfn != man->size) -		/* Allocate blocks in desired range */ -		vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; - -	remaining_size = vres->base.num_pages << PAGE_SHIFT; - -	mutex_lock(&mgr->lock); -	while (remaining_size) { -		if (tbo->page_alignment) -			min_block_size = tbo->page_alignment << PAGE_SHIFT; -		else -			min_block_size = mgr->default_page_size; - -		BUG_ON(min_block_size < mm->chunk_size); - -		/* Limit maximum size to 2GiB due to SG table limitations */ -		size = min(remaining_size, 2ULL << 30); - -		if (size >= pages_per_block << PAGE_SHIFT) -			min_block_size = pages_per_block << PAGE_SHIFT; - -		cur_size = size; - -		if (fpfn + size != place->lpfn << PAGE_SHIFT) { -			/* -			 * Except for actual range allocation, modify the size and -			 * min_block_size conforming to continuous flag enablement -			 */ -			if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { -				size = roundup_pow_of_two(size); -				min_block_size = size; -			/* -			 * Modify the size value if size is not -			 * aligned with min_block_size -			 */ -			} else if (!IS_ALIGNED(size, min_block_size)) { -				size = round_up(size, min_block_size); +		mode = DRM_MM_INSERT_HIGH; + +	pages_left = node->base.num_pages; + +	/* Limit maximum size to 2GB due to SG table limitations */ +	pages = min(pages_left, 2UL << (30 - PAGE_SHIFT)); + +	i = 0; +	spin_lock(&mgr->lock); +	while (pages_left) { +		uint32_t alignment = tbo->page_alignment; + +		if (pages >= pages_per_node) +			alignment = pages_per_node; + +		r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages, +						alignment, 0, place->fpfn, +						lpfn, mode); +		if (unlikely(r)) { +			if (pages > pages_per_node) { +				if (is_power_of_2(pages)) +					pages = pages / 2; +				else +					pages = rounddown_pow_of_two(pages); +				continue;  			} +			goto error_free;  		} -		r = drm_buddy_alloc_blocks(mm, fpfn, -					   lpfn, -					   size, -					   min_block_size, -					   &vres->blocks, -					   vres->flags); -		if (unlikely(r)) -			goto error_free_blocks; - -		if (size > remaining_size) -			remaining_size = 0; -		else -			remaining_size -= size; -	} -	mutex_unlock(&mgr->lock); - -	if (cur_size != size) { -		struct drm_buddy_block *block; -		struct list_head *trim_list; -		u64 original_size; -		LIST_HEAD(temp); - -		trim_list = &vres->blocks; -		original_size = vres->base.num_pages << PAGE_SHIFT; - -		/* -		 * If size value is rounded up to min_block_size, trim the last -		 * block to the required size -		 */ -		if (!list_is_singular(&vres->blocks)) { -			block = list_last_entry(&vres->blocks, typeof(*block), link); -			list_move_tail(&block->link, &temp); -			trim_list = &temp; -			/* -			 * Compute the original_size value by subtracting the -			 * last block size with (aligned size - original size) -			 */ -			original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size); -		} +		vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]); +		amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]); +		pages_left -= pages; +		++i; -		mutex_lock(&mgr->lock); -		drm_buddy_block_trim(mm, -				     original_size, -				     trim_list); -		mutex_unlock(&mgr->lock); - -		if (!list_empty(&temp)) -			list_splice_tail(trim_list, &vres->blocks); -	} - -	list_for_each_entry(block, &vres->blocks, link) -		vis_usage += amdgpu_vram_mgr_vis_size(adev, block); - -	block = amdgpu_vram_mgr_first_block(&vres->blocks); -	if (!block) { -		r = -EINVAL; -		goto error_fini; +		if (pages > pages_left) +			pages = pages_left;  	} +	spin_unlock(&mgr->lock); -	vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT; - -	if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks)) -		vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS; +	if (i == 1) +		node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;  	if (adev->gmc.xgmi.connected_to_cpu) -		vres->base.bus.caching = ttm_cached; +		node->base.bus.caching = ttm_cached;  	else -		vres->base.bus.caching = ttm_write_combined; +		node->base.bus.caching = ttm_write_combined;  	atomic64_add(vis_usage, &mgr->vis_usage); -	*res = &vres->base; +	*res = &node->base;  	return 0; -error_free_blocks: -	drm_buddy_free_list(mm, &vres->blocks); -	mutex_unlock(&mgr->lock); +error_free: +	while (i--) +		drm_mm_remove_node(&node->mm_nodes[i]); +	spin_unlock(&mgr->lock);  error_fini: -	ttm_resource_fini(man, &vres->base); -	kfree(vres); +	ttm_resource_fini(man, &node->base); +	kvfree(node);  	return r;  } @@ -540,26 +490,27 @@ error_fini:  static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,  				struct ttm_resource *res)  { -	struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res); +	struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);  	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);  	struct amdgpu_device *adev = to_amdgpu_device(mgr); -	struct drm_buddy *mm = &mgr->mm; -	struct drm_buddy_block *block;  	uint64_t vis_usage = 0; +	unsigned i, pages; -	mutex_lock(&mgr->lock); -	list_for_each_entry(block, &vres->blocks, link) -		vis_usage += amdgpu_vram_mgr_vis_size(adev, block); +	spin_lock(&mgr->lock); +	for (i = 0, pages = res->num_pages; pages; +	     pages -= node->mm_nodes[i].size, ++i) { +		struct drm_mm_node *mm = &node->mm_nodes[i]; +		drm_mm_remove_node(mm); +		vis_usage += amdgpu_vram_mgr_vis_size(adev, mm); +	}  	amdgpu_vram_mgr_do_reserve(man); - -	drm_buddy_free_list(mm, &vres->blocks); -	mutex_unlock(&mgr->lock); +	spin_unlock(&mgr->lock);  	atomic64_sub(vis_usage, &mgr->vis_usage);  	ttm_resource_fini(man, res); -	kfree(vres); +	kvfree(node);  }  /** @@ -591,7 +542,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,  	if (!*sgt)  		return -ENOMEM; -	/* Determine the number of DRM_BUDDY blocks to export */ +	/* Determine the number of DRM_MM nodes to export */  	amdgpu_res_first(res, offset, length, &cursor);  	while (cursor.remaining) {  		num_entries++; @@ -607,10 +558,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,  		sg->length = 0;  	/* -	 * Walk down DRM_BUDDY blocks to populate scatterlist nodes -	 * @note: Use iterator api to get first the DRM_BUDDY block +	 * Walk down DRM_MM nodes to populate scatterlist nodes +	 * @note: Use iterator api to get first the DRM_MM node  	 * and the number of bytes from it. Access the following -	 * DRM_BUDDY block(s) if more buffer needs to exported +	 * DRM_MM node(s) if more buffer needs to exported  	 */  	amdgpu_res_first(res, offset, length, &cursor);  	for_each_sgtable_sg((*sgt), sg, i) { @@ -697,22 +648,13 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,  				  struct drm_printer *printer)  {  	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man); -	struct drm_buddy *mm = &mgr->mm; -	struct drm_buddy_block *block;  	drm_printf(printer, "  vis usage:%llu\n",  		   amdgpu_vram_mgr_vis_usage(mgr)); -	mutex_lock(&mgr->lock); -	drm_printf(printer, "default_page_size: %lluKiB\n", -		   mgr->default_page_size >> 10); - -	drm_buddy_print(mm, printer); - -	drm_printf(printer, "reserved:\n"); -	list_for_each_entry(block, &mgr->reserved_pages, link) -		drm_buddy_block_print(mm, block, printer); -	mutex_unlock(&mgr->lock); +	spin_lock(&mgr->lock); +	drm_mm_print(&mgr->mm, printer); +	spin_unlock(&mgr->lock);  }  static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { @@ -732,21 +674,16 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)  {  	struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;  	struct ttm_resource_manager *man = &mgr->manager; -	int err;  	ttm_resource_manager_init(man, &adev->mman.bdev,  				  adev->gmc.real_vram_size);  	man->func = &amdgpu_vram_mgr_func; -	err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); -	if (err) -		return err; - -	mutex_init(&mgr->lock); +	drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT); +	spin_lock_init(&mgr->lock);  	INIT_LIST_HEAD(&mgr->reservations_pending);  	INIT_LIST_HEAD(&mgr->reserved_pages); -	mgr->default_page_size = PAGE_SIZE;  	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);  	ttm_resource_manager_set_used(man, true); @@ -774,16 +711,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)  	if (ret)  		return; -	mutex_lock(&mgr->lock); -	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) +	spin_lock(&mgr->lock); +	list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)  		kfree(rsv); -	list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { -		drm_buddy_free_list(&mgr->mm, &rsv->blocks); +	list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) { +		drm_mm_remove_node(&rsv->mm_node);  		kfree(rsv);  	} -	drm_buddy_fini(&mgr->mm); -	mutex_unlock(&mgr->lock); +	drm_mm_takedown(&mgr->mm); +	spin_unlock(&mgr->lock);  	ttm_resource_manager_cleanup(man);  	ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h deleted file mode 100644 index 9a2db87186c7..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h +++ /dev/null @@ -1,89 +0,0 @@ -/* SPDX-License-Identifier: MIT - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __AMDGPU_VRAM_MGR_H__ -#define __AMDGPU_VRAM_MGR_H__ - -#include <drm/drm_buddy.h> - -struct amdgpu_vram_mgr { -	struct ttm_resource_manager manager; -	struct drm_buddy mm; -	/* protects access to buffer objects */ -	struct mutex lock; -	struct list_head reservations_pending; -	struct list_head reserved_pages; -	atomic64_t vis_usage; -	u64 default_page_size; -}; - -struct amdgpu_vram_mgr_resource { -	struct ttm_resource base; -	struct list_head blocks; -	unsigned long flags; -}; - -static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block) -{ -	return drm_buddy_block_offset(block); -} - -static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block) -{ -	return PAGE_SIZE << drm_buddy_block_order(block); -} - -static inline struct drm_buddy_block * -amdgpu_vram_mgr_first_block(struct list_head *list) -{ -	return list_first_entry_or_null(list, struct drm_buddy_block, link); -} - -static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head) -{ -	struct drm_buddy_block *block; -	u64 start, size; - -	block = amdgpu_vram_mgr_first_block(head); -	if (!block) -		return false; - -	while (head != block->link.next) { -		start = amdgpu_vram_mgr_block_start(block); -		size = amdgpu_vram_mgr_block_size(block); - -		block = list_entry(block->link.next, struct drm_buddy_block, link); -		if (start + size != amdgpu_vram_mgr_block_start(block)) -			return false; -	} - -	return true; -} - -static inline struct amdgpu_vram_mgr_resource * -to_amdgpu_vram_mgr_resource(struct ttm_resource *res) -{ -	return container_of(res, struct amdgpu_vram_mgr_resource, base); -} - -#endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index bf4200457772..a08769c5e94b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -184,6 +184,8 @@ static void kfd_device_info_init(struct kfd_dev *kfd,  			/* Navi2x+, Navi1x+ */  			if (gc_version == IP_VERSION(10, 3, 6))  				kfd->device_info.no_atomic_fw_version = 14; +			else if (gc_version == IP_VERSION(10, 3, 7)) +				kfd->device_info.no_atomic_fw_version = 3;  			else if (gc_version >= IP_VERSION(10, 3, 0))  				kfd->device_info.no_atomic_fw_version = 92;  			else if (gc_version >= IP_VERSION(10, 1, 1)) diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index b4029c0d5d8c..0ba0598eba20 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -6,7 +6,7 @@ config DRM_AMD_DC  	bool "AMD DC - Enable new display engine"  	default y  	select SND_HDA_COMPONENT if SND_HDA_CORE -	select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) +	select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)  	help  	  Choose this option if you want to use the new display engine  	  support for AMDGPU. This adds required support for Vega and diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1c2984bbda51..93ac33a8de9a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -72,6 +72,7 @@  #include <linux/pci.h>  #include <linux/firmware.h>  #include <linux/component.h> +#include <linux/dmi.h>  #include <drm/display/drm_dp_mst_helper.h>  #include <drm/display/drm_hdmi_helper.h> @@ -462,6 +463,26 @@ static void dm_pflip_high_irq(void *interrupt_params)  		     vrr_active, (int) !e);  } +static void dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) +{ +	struct drm_crtc *crtc = &acrtc->base; +	struct drm_device *dev = crtc->dev; +	unsigned long flags; + +	drm_crtc_handle_vblank(crtc); + +	spin_lock_irqsave(&dev->event_lock, flags); + +	/* Send completion event for cursor-only commits */ +	if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { +		drm_crtc_send_vblank_event(crtc, acrtc->event); +		drm_crtc_vblank_put(crtc); +		acrtc->event = NULL; +	} + +	spin_unlock_irqrestore(&dev->event_lock, flags); +} +  static void dm_vupdate_high_irq(void *interrupt_params)  {  	struct common_irq_params *irq_params = interrupt_params; @@ -500,7 +521,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)  		 * if a pageflip happened inside front-porch.  		 */  		if (vrr_active) { -			drm_crtc_handle_vblank(&acrtc->base); +			dm_crtc_handle_vblank(acrtc);  			/* BTR processing for pre-DCE12 ASICs */  			if (acrtc->dm_irq_params.stream && @@ -552,7 +573,7 @@ static void dm_crtc_high_irq(void *interrupt_params)  	 * to dm_vupdate_high_irq after end of front-porch.  	 */  	if (!vrr_active) -		drm_crtc_handle_vblank(&acrtc->base); +		dm_crtc_handle_vblank(acrtc);  	/**  	 * Following stuff must happen at start of vblank, for crc @@ -1382,6 +1403,41 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)  	return false;  } +static const struct dmi_system_id hpd_disconnect_quirk_table[] = { +	{ +		.matches = { +			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), +			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), +		}, +	}, +	{ +		.matches = { +			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), +			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), +		}, +	}, +	{ +		.matches = { +			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), +			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), +		}, +	}, +	{} +}; + +static void retrieve_dmi_info(struct amdgpu_display_manager *dm) +{ +	const struct dmi_system_id *dmi_id; + +	dm->aux_hpd_discon_quirk = false; + +	dmi_id = dmi_first_match(hpd_disconnect_quirk_table); +	if (dmi_id) { +		dm->aux_hpd_discon_quirk = true; +		DRM_INFO("aux_hpd_discon_quirk attached\n"); +	} +} +  static int amdgpu_dm_init(struct amdgpu_device *adev)  {  	struct dc_init_data init_data; @@ -1508,6 +1564,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)  	}  	INIT_LIST_HEAD(&adev->dm.da_list); + +	retrieve_dmi_info(&adev->dm); +  	/* Display Core create. */  	adev->dm.dc = dc_create(&init_data); @@ -5407,7 +5466,7 @@ fill_blending_from_plane_state(const struct drm_plane_state *plane_state,  			}  		} -		if (per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) +		if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE)  			*pre_multiplied_alpha = false;  	} @@ -9135,6 +9194,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  	struct amdgpu_bo *abo;  	uint32_t target_vblank, last_flip_vblank;  	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); +	bool cursor_update = false;  	bool pflip_present = false;  	struct {  		struct dc_surface_update surface_updates[MAX_SURFACES]; @@ -9170,8 +9230,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);  		/* Cursor plane is handled after stream updates */ -		if (plane->type == DRM_PLANE_TYPE_CURSOR) +		if (plane->type == DRM_PLANE_TYPE_CURSOR) { +			if ((fb && crtc == pcrtc) || +			    (old_plane_state->fb && old_plane_state->crtc == pcrtc)) +				cursor_update = true; +  			continue; +		}  		if (!fb || !crtc || pcrtc != crtc)  			continue; @@ -9334,6 +9399,17 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,  				bundle->stream_update.vrr_infopacket =  					&acrtc_state->stream->vrr_infopacket;  		} +	} else if (cursor_update && acrtc_state->active_planes > 0 && +		   !acrtc_state->force_dpms_off && +		   acrtc_attach->base.state->event) { +		drm_crtc_vblank_get(pcrtc); + +		spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + +		acrtc_attach->event = acrtc_attach->base.state->event; +		acrtc_attach->base.state->event = NULL; + +		spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);  	}  	/* Update the planes if changed or disable if we don't have any. */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index aa34c0068f41..e80ef93f6550 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -540,6 +540,14 @@ struct amdgpu_display_manager {  	 * last successfully applied backlight values.  	 */  	u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; + +	/** +	 * @aux_hpd_discon_quirk: +	 * +	 * quirk for hpd discon while aux is on-going. +	 * occurred on certain intel platform +	 */ +	bool aux_hpd_discon_quirk;  };  enum dsc_clock_force_state { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 9221b6690a4a..2b9b095e5f03 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -56,6 +56,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,  	ssize_t result = 0;  	struct aux_payload payload;  	enum aux_return_code_type operation_result; +	struct amdgpu_device *adev; +	struct ddc_service *ddc;  	if (WARN_ON(msg->size > 16))  		return -E2BIG; @@ -74,6 +76,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,  	result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,  				      &operation_result); +	/* +	 * w/a on certain intel platform where hpd is unexpected to pull low during +	 * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON +	 * aux transaction is succuess in such case, therefore bypass the error +	 */ +	ddc = TO_DM_AUX(aux)->ddc_service; +	adev = ddc->ctx->driver_context; +	if (adev->dm.aux_hpd_discon_quirk) { +		if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && +			operation_result == AUX_RET_ERROR_HPD_DISCON) { +			result = 0; +			operation_result = AUX_RET_SUCCESS; +		} +	} +  	if (payload.write && result >= 0)  		result = msg->size; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 6774dd8bb53e..3fe3fbac1e63 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1117,12 +1117,13 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)  	 * on certain displays, such as the Sharp 4k. 36bpp is needed  	 * to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and  	 * SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc -	 * precision on at least DCN display engines. However, at least -	 * Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth, -	 * so use only 30 bpp on DCE_VERSION_11_0. Testing with DCE 11.2 and 8.3 -	 * did not show such problems, so this seems to be the exception. +	 * precision on DCN display engines, but apparently not for DCE, as +	 * far as testing on DCE-11.2 and DCE-8 showed. Various DCE parts have +	 * problems: Carrizo with DCE_VERSION_11_0 does not like 36 bpp lb depth, +	 * neither do DCE-8 at 4k resolution, or DCE-11.2 (broken identify pixel +	 * passthrough). Therefore only use 36 bpp on DCN where it is actually needed.  	 */ -	if (plane_state->ctx->dce_version > DCE_VERSION_11_0) +	if (plane_state->ctx->dce_version > DCE_VERSION_MAX)  		pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;  	else  		pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 5f8809f6990d..2fbd2926a531 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1228,6 +1228,8 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,  	uint32_t crystal_clock_freq = 2500;  	uint32_t tach_period; +	if (speed == 0) +		return -EINVAL;  	/*  	 * To prevent from possible overheat, some ASICs may have requirement  	 * for minimum fan speed: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 4c25d9b2f138..d30ebcaec8b9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -620,10 +620,15 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,  			 struct ttm_resource *res)  {  	struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); +	u64 page_alignment;  	if (!i915_ttm_gtt_binds_lmem(res))  		return i915_ttm_tt_get_st(bo->ttm); +	page_alignment = bo->page_alignment << PAGE_SHIFT; +	if (!page_alignment) +		page_alignment = obj->mm.region->min_page_size; +  	/*  	 * If CPU mapping differs, we need to add the ttm_tt pages to  	 * the resulting st. Might make sense for GGTT. @@ -634,7 +639,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,  			struct i915_refct_sgt *rsgt;  			rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, -								 res); +								 res, +								 page_alignment);  			if (IS_ERR(rsgt))  				return rsgt; @@ -643,7 +649,8 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,  		return i915_refct_sgt_get(obj->ttm.cached_io_rsgt);  	} -	return intel_region_ttm_resource_to_rsgt(obj->mm.region, res); +	return intel_region_ttm_resource_to_rsgt(obj->mm.region, res, +						 page_alignment);  }  static int i915_ttm_truncate(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index 319936f91ac5..e6e01c2a74a6 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -9,6 +9,7 @@  #include <linux/jiffies.h>  #include "gt/intel_engine.h" +#include "gt/intel_rps.h"  #include "i915_gem_ioctls.h"  #include "i915_gem_object.h" @@ -31,6 +32,37 @@ i915_gem_object_wait_fence(struct dma_fence *fence,  				      timeout);  } +static void +i915_gem_object_boost(struct dma_resv *resv, unsigned int flags) +{ +	struct dma_resv_iter cursor; +	struct dma_fence *fence; + +	/* +	 * Prescan all fences for potential boosting before we begin waiting. +	 * +	 * When we wait, we wait on outstanding fences serially. If the +	 * dma-resv contains a sequence such as 1:1, 1:2 instead of a reduced +	 * form 1:2, then as we look at each wait in turn we see that each +	 * request is currently executing and not worthy of boosting. But if +	 * we only happen to look at the final fence in the sequence (because +	 * of request coalescing or splitting between read/write arrays by +	 * the iterator), then we would boost. As such our decision to boost +	 * or not is delicately balanced on the order we wait on fences. +	 * +	 * So instead of looking for boosts sequentially, look for all boosts +	 * upfront and then wait on the outstanding fences. +	 */ + +	dma_resv_iter_begin(&cursor, resv, +			    dma_resv_usage_rw(flags & I915_WAIT_ALL)); +	dma_resv_for_each_fence_unlocked(&cursor, fence) +		if (dma_fence_is_i915(fence) && +		    !i915_request_started(to_request(fence))) +			intel_rps_boost(to_request(fence)); +	dma_resv_iter_end(&cursor); +} +  static long  i915_gem_object_wait_reservation(struct dma_resv *resv,  				 unsigned int flags, @@ -40,6 +72,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv,  	struct dma_fence *fence;  	long ret = timeout ?: 1; +	i915_gem_object_boost(resv, flags); +  	dma_resv_iter_begin(&cursor, resv,  			    dma_resv_usage_rw(flags & I915_WAIT_ALL));  	dma_resv_for_each_fence_unlocked(&cursor, fence) { diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 51a0fe60c050..531af6ad7007 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -1209,6 +1209,20 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)  	mutex_lock(>->tlb_invalidate_lock);  	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); +	spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */ + +	for_each_engine(engine, gt, id) { +		struct reg_and_bit rb; + +		rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); +		if (!i915_mmio_reg_offset(rb.reg)) +			continue; + +		intel_uncore_write_fw(uncore, rb.reg, rb.bit); +	} + +	spin_unlock_irq(&uncore->lock); +  	for_each_engine(engine, gt, id) {  		/*  		 * HW architecture suggest typical invalidation time at 40us, @@ -1223,7 +1237,6 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt)  		if (!i915_mmio_reg_offset(rb.reg))  			continue; -		intel_uncore_write_fw(uncore, rb.reg, rb.bit);  		if (__intel_wait_for_register_fw(uncore,  						 rb.reg, rb.bit, 0,  						 timeout_us, timeout_ms, diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index a5338c3fde7a..c68d36fb5bbd 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -300,9 +300,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)  	return err;  } -static int gen6_reset_engines(struct intel_gt *gt, -			      intel_engine_mask_t engine_mask, -			      unsigned int retry) +static int __gen6_reset_engines(struct intel_gt *gt, +				intel_engine_mask_t engine_mask, +				unsigned int retry)  {  	struct intel_engine_cs *engine;  	u32 hw_mask; @@ -321,6 +321,20 @@ static int gen6_reset_engines(struct intel_gt *gt,  	return gen6_hw_domain_reset(gt, hw_mask);  } +static int gen6_reset_engines(struct intel_gt *gt, +			      intel_engine_mask_t engine_mask, +			      unsigned int retry) +{ +	unsigned long flags; +	int ret; + +	spin_lock_irqsave(>->uncore->lock, flags); +	ret = __gen6_reset_engines(gt, engine_mask, retry); +	spin_unlock_irqrestore(>->uncore->lock, flags); + +	return ret; +} +  static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)  {  	int vecs_id; @@ -487,9 +501,9 @@ static void gen11_unlock_sfc(struct intel_engine_cs *engine)  	rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);  } -static int gen11_reset_engines(struct intel_gt *gt, -			       intel_engine_mask_t engine_mask, -			       unsigned int retry) +static int __gen11_reset_engines(struct intel_gt *gt, +				 intel_engine_mask_t engine_mask, +				 unsigned int retry)  {  	struct intel_engine_cs *engine;  	intel_engine_mask_t tmp; @@ -583,8 +597,11 @@ static int gen8_reset_engines(struct intel_gt *gt,  	struct intel_engine_cs *engine;  	const bool reset_non_ready = retry >= 1;  	intel_engine_mask_t tmp; +	unsigned long flags;  	int ret; +	spin_lock_irqsave(>->uncore->lock, flags); +  	for_each_engine_masked(engine, gt, engine_mask, tmp) {  		ret = gen8_engine_reset_prepare(engine);  		if (ret && !reset_non_ready) @@ -612,17 +629,19 @@ static int gen8_reset_engines(struct intel_gt *gt,  	 * This is best effort, so ignore any error from the initial reset.  	 */  	if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES) -		gen11_reset_engines(gt, gt->info.engine_mask, 0); +		__gen11_reset_engines(gt, gt->info.engine_mask, 0);  	if (GRAPHICS_VER(gt->i915) >= 11) -		ret = gen11_reset_engines(gt, engine_mask, retry); +		ret = __gen11_reset_engines(gt, engine_mask, retry);  	else -		ret = gen6_reset_engines(gt, engine_mask, retry); +		ret = __gen6_reset_engines(gt, engine_mask, retry);  skip_reset:  	for_each_engine_masked(engine, gt, engine_mask, tmp)  		gen8_engine_reset_cancel(engine); +	spin_unlock_irqrestore(>->uncore->lock, flags); +  	return ret;  } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 8b2c11dbe354..1109088fe8f6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -176,8 +176,8 @@ static int live_lrc_layout(void *arg)  			continue;  		hw = shmem_pin_map(engine->default_state); -		if (IS_ERR(hw)) { -			err = PTR_ERR(hw); +		if (!hw) { +			err = -ENOMEM;  			break;  		}  		hw += LRC_STATE_OFFSET / sizeof(*hw); @@ -365,8 +365,8 @@ static int live_lrc_fixed(void *arg)  			continue;  		hw = shmem_pin_map(engine->default_state); -		if (IS_ERR(hw)) { -			err = PTR_ERR(hw); +		if (!hw) { +			err = -ENOMEM;  			break;  		}  		hw += LRC_STATE_OFFSET / sizeof(*hw); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index b9eb75a2b400..1c35a41620ae 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -3117,9 +3117,9 @@ void intel_gvt_update_reg_whitelist(struct intel_vgpu *vgpu)  			continue;  		vaddr = shmem_pin_map(engine->default_state); -		if (IS_ERR(vaddr)) { -			gvt_err("failed to map %s->default state, err:%zd\n", -				engine->name, PTR_ERR(vaddr)); +		if (!vaddr) { +			gvt_err("failed to map %s->default state\n", +				engine->name);  			return;  		} diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index 159571b9bd24..f63b50b71e10 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -68,6 +68,7 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)   * drm_mm_node   * @node: The drm_mm_node.   * @region_start: An offset to add to the dma addresses of the sg list. + * @page_alignment: Required page alignment for each sg entry. Power of two.   *   * Create a struct sg_table, initializing it from a struct drm_mm_node,   * taking a maximum segment length into account, splitting into segments @@ -77,15 +78,18 @@ void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size)   * error code cast to an error pointer on failure.   */  struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, -					      u64 region_start) +					      u64 region_start, +					      u64 page_alignment)  { -	const u64 max_segment = SZ_1G; /* Do we have a limit on this? */ +	const u64 max_segment = round_down(UINT_MAX, page_alignment);  	u64 segment_pages = max_segment >> PAGE_SHIFT;  	u64 block_size, offset, prev_end;  	struct i915_refct_sgt *rsgt;  	struct sg_table *st;  	struct scatterlist *sg; +	GEM_BUG_ON(!max_segment); +  	rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);  	if (!rsgt)  		return ERR_PTR(-ENOMEM); @@ -112,6 +116,8 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,  				sg = __sg_next(sg);  			sg_dma_address(sg) = region_start + offset; +			GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg), +					       page_alignment));  			sg_dma_len(sg) = 0;  			sg->length = 0;  			st->nents++; @@ -138,6 +144,7 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,   * i915_buddy_block list   * @res: The struct i915_ttm_buddy_resource.   * @region_start: An offset to add to the dma addresses of the sg list. + * @page_alignment: Required page alignment for each sg entry. Power of two.   *   * Create a struct sg_table, initializing it from struct i915_buddy_block list,   * taking a maximum segment length into account, splitting into segments @@ -147,11 +154,12 @@ struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node,   * error code cast to an error pointer on failure.   */  struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, -						     u64 region_start) +						     u64 region_start, +						     u64 page_alignment)  {  	struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);  	const u64 size = res->num_pages << PAGE_SHIFT; -	const u64 max_segment = rounddown(UINT_MAX, PAGE_SIZE); +	const u64 max_segment = round_down(UINT_MAX, page_alignment);  	struct drm_buddy *mm = bman_res->mm;  	struct list_head *blocks = &bman_res->blocks;  	struct drm_buddy_block *block; @@ -161,6 +169,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,  	resource_size_t prev_end;  	GEM_BUG_ON(list_empty(blocks)); +	GEM_BUG_ON(!max_segment);  	rsgt = kmalloc(sizeof(*rsgt), GFP_KERNEL);  	if (!rsgt) @@ -191,6 +200,8 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res,  					sg = __sg_next(sg);  				sg_dma_address(sg) = region_start + offset; +				GEM_BUG_ON(!IS_ALIGNED(sg_dma_address(sg), +						       page_alignment));  				sg_dma_len(sg) = 0;  				sg->length = 0;  				st->nents++; diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index 12c6a1684081..b13e4cdea923 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -213,9 +213,11 @@ static inline void __i915_refct_sgt_init(struct i915_refct_sgt *rsgt,  void i915_refct_sgt_init(struct i915_refct_sgt *rsgt, size_t size);  struct i915_refct_sgt *i915_rsgt_from_mm_node(const struct drm_mm_node *node, -					      u64 region_start); +					      u64 region_start, +					      u64 page_alignment);  struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, -						     u64 region_start); +						     u64 region_start, +						     u64 page_alignment);  #endif diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index 62ff77445b01..6873808a7015 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -152,6 +152,7 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)   * Convert an opaque TTM resource manager resource to a refcounted sg_table.   * @mem: The memory region.   * @res: The resource manager resource obtained from the TTM resource manager. + * @page_alignment: Required page alignment for each sg entry. Power of two.   *   * The gem backends typically use sg-tables for operations on the underlying   * io_memory. So provide a way for the backends to translate the @@ -161,16 +162,19 @@ int intel_region_ttm_fini(struct intel_memory_region *mem)   */  struct i915_refct_sgt *  intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, -				  struct ttm_resource *res) +				  struct ttm_resource *res, +				  u64 page_alignment)  {  	if (mem->is_range_manager) {  		struct ttm_range_mgr_node *range_node =  			to_ttm_range_mgr_node(res);  		return i915_rsgt_from_mm_node(&range_node->mm_nodes[0], -					      mem->region.start); +					      mem->region.start, +					      page_alignment);  	} else { -		return i915_rsgt_from_buddy_resource(res, mem->region.start); +		return i915_rsgt_from_buddy_resource(res, mem->region.start, +						     page_alignment);  	}  } diff --git a/drivers/gpu/drm/i915/intel_region_ttm.h b/drivers/gpu/drm/i915/intel_region_ttm.h index cf9d86dcf409..98fba5155619 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.h +++ b/drivers/gpu/drm/i915/intel_region_ttm.h @@ -24,7 +24,8 @@ int intel_region_ttm_fini(struct intel_memory_region *mem);  struct i915_refct_sgt *  intel_region_ttm_resource_to_rsgt(struct intel_memory_region *mem, -				  struct ttm_resource *res); +				  struct ttm_resource *res, +				  u64 page_alignment);  void intel_region_ttm_resource_free(struct intel_memory_region *mem,  				    struct ttm_resource *res); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 8633bec18fa7..ab9f17fc85bc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -742,7 +742,7 @@ static int pot_hole(struct i915_address_space *vm,  		u64 addr;  		for (addr = round_up(hole_start + min_alignment, step) - min_alignment; -		     addr <= round_down(hole_end - (2 * min_alignment), step) - min_alignment; +		     hole_end > addr && hole_end - addr >= 2 * min_alignment;  		     addr += step) {  			err = i915_vma_pin(vma, 0, 0, addr | flags);  			if (err) { diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 73eb53edb8de..3b18e5905c86 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -451,7 +451,6 @@ out_put:  static int igt_mock_max_segment(void *arg)  { -	const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE);  	struct intel_memory_region *mem = arg;  	struct drm_i915_private *i915 = mem->i915;  	struct i915_ttm_buddy_resource *res; @@ -460,7 +459,10 @@ static int igt_mock_max_segment(void *arg)  	struct drm_buddy *mm;  	struct list_head *blocks;  	struct scatterlist *sg; +	I915_RND_STATE(prng);  	LIST_HEAD(objects); +	unsigned int max_segment; +	unsigned int ps;  	u64 size;  	int err = 0; @@ -472,7 +474,13 @@ static int igt_mock_max_segment(void *arg)  	 */  	size = SZ_8G; -	mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0, 0); +	ps = PAGE_SIZE; +	if (i915_prandom_u64_state(&prng) & 1) +		ps = SZ_64K; /* For something like DG2 */ + +	max_segment = round_down(UINT_MAX, ps); + +	mem = mock_region_create(i915, 0, size, ps, 0, 0);  	if (IS_ERR(mem))  		return PTR_ERR(mem); @@ -498,12 +506,21 @@ static int igt_mock_max_segment(void *arg)  	}  	for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) { +		dma_addr_t daddr = sg_dma_address(sg); +  		if (sg->length > max_segment) {  			pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",  			       __func__, sg->length, max_segment);  			err = -EINVAL;  			goto out_close;  		} + +		if (!IS_ALIGNED(daddr, ps)) { +			pr_err("%s: Created an unaligned scatterlist entry, addr=%pa, ps=%u\n", +			       __func__,  &daddr, ps); +			err = -EINVAL; +			goto out_close; +		}  	}  out_close: diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 670557ce1024..bac21fe84ca5 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -33,7 +33,8 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)  		return PTR_ERR(obj->mm.res);  	obj->mm.rsgt = intel_region_ttm_resource_to_rsgt(obj->mm.region, -							 obj->mm.res); +							 obj->mm.res, +							 obj->mm.region->min_page_size);  	if (IS_ERR(obj->mm.rsgt)) {  		err = PTR_ERR(obj->mm.rsgt);  		goto err_free_resource; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 831833911a52..8647125d60ae 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -379,7 +379,7 @@ static void aq_pci_shutdown(struct pci_dev *pdev)  	}  } -static int aq_suspend_common(struct device *dev, bool deep) +static int aq_suspend_common(struct device *dev)  {  	struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev)); @@ -392,17 +392,15 @@ static int aq_suspend_common(struct device *dev, bool deep)  	if (netif_running(nic->ndev))  		aq_nic_stop(nic); -	if (deep) { -		aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); -		aq_nic_set_power(nic); -	} +	aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); +	aq_nic_set_power(nic);  	rtnl_unlock();  	return 0;  } -static int atl_resume_common(struct device *dev, bool deep) +static int atl_resume_common(struct device *dev)  {  	struct pci_dev *pdev = to_pci_dev(dev);  	struct aq_nic_s *nic; @@ -415,11 +413,6 @@ static int atl_resume_common(struct device *dev, bool deep)  	pci_set_power_state(pdev, PCI_D0);  	pci_restore_state(pdev); -	if (deep) { -		/* Reinitialize Nic/Vecs objects */ -		aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); -	} -  	if (netif_running(nic->ndev)) {  		ret = aq_nic_init(nic);  		if (ret) @@ -444,22 +437,22 @@ err_exit:  static int aq_pm_freeze(struct device *dev)  { -	return aq_suspend_common(dev, true); +	return aq_suspend_common(dev);  }  static int aq_pm_suspend_poweroff(struct device *dev)  { -	return aq_suspend_common(dev, true); +	return aq_suspend_common(dev);  }  static int aq_pm_thaw(struct device *dev)  { -	return atl_resume_common(dev, true); +	return atl_resume_common(dev);  }  static int aq_pm_resume_restore(struct device *dev)  { -	return atl_resume_common(dev, true); +	return atl_resume_common(dev);  }  static const struct dev_pm_ops aq_pm_ops = { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 56b46b8206a7..cf9b00576ed3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -7790,7 +7790,7 @@ hwrm_dbg_qcaps_exit:  static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); -static int bnxt_hwrm_func_qcaps(struct bnxt *bp) +int bnxt_hwrm_func_qcaps(struct bnxt *bp)  {  	int rc; @@ -10065,7 +10065,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)  	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)  		resc_reinit = true; -	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE) +	if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE || +	    test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))  		fw_reset = true;  	else  		bnxt_remap_fw_health_regs(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a1dca8c58f54..075c6206325c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2314,6 +2314,7 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);  int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);  int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);  int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all); +int bnxt_hwrm_func_qcaps(struct bnxt *bp);  int bnxt_hwrm_fw_set_time(struct bnxt *);  int bnxt_open_nic(struct bnxt *, bool, bool);  int bnxt_half_open_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 3528ce9849e6..6b3d4f4c2a75 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -979,9 +979,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,  	if (rc)  		return rc; -	rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH); -	if (rc) -		return rc; +	if (BNXT_CHIP_P5(bp)) { +		rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH); +		if (rc) +			return rc; +	}  	return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);  } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 562f8f68a47d..7f3c0875b6f5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -76,14 +76,23 @@ static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts,  			    u64 *ns)  {  	struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; +	u32 high_before, high_now, low;  	if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))  		return -EIO; +	high_before = readl(bp->bar0 + ptp->refclk_mapped_regs[1]);  	ptp_read_system_prets(sts); -	*ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); +	low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]);  	ptp_read_system_postts(sts); -	*ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32; +	high_now = readl(bp->bar0 + ptp->refclk_mapped_regs[1]); +	if (high_now != high_before) { +		ptp_read_system_prets(sts); +		low = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); +		ptp_read_system_postts(sts); +	} +	*ns = ((u64)high_now << 32) | low; +  	return 0;  } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index ddf2f3963abe..a1a2c7a64fd5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -823,8 +823,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)  		goto err_out2;  	rc = pci_enable_sriov(bp->pdev, *num_vfs); -	if (rc) +	if (rc) { +		bnxt_ulp_sriov_cfg(bp, 0);  		goto err_out2; +	}  	return 0; @@ -832,6 +834,9 @@ err_out2:  	/* Free the resources reserved for various VF's */  	bnxt_hwrm_func_vf_resource_free(bp, *num_vfs); +	/* Restore the max resources */ +	bnxt_hwrm_func_qcaps(bp); +  err_out1:  	bnxt_free_vf_resources(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index f02fe906dedb..f53387ed0167 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -28,7 +28,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,  				   struct xdp_buff *xdp)  {  	struct skb_shared_info *sinfo; -	struct bnxt_sw_tx_bd *tx_buf, *first_buf; +	struct bnxt_sw_tx_bd *tx_buf;  	struct tx_bd *txbd;  	int num_frags = 0;  	u32 flags; @@ -43,13 +43,14 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,  	/* fill up the first buffer */  	prod = txr->tx_prod;  	tx_buf = &txr->tx_buf_ring[prod]; -	first_buf = tx_buf;  	tx_buf->nr_frags = num_frags;  	if (xdp)  		tx_buf->page = virt_to_head_page(xdp->data);  	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; -	flags = ((len) << TX_BD_LEN_SHIFT) | ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT); +	flags = (len << TX_BD_LEN_SHIFT) | +		((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) | +		bnxt_lhint_arr[len >> 9];  	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);  	txbd->tx_bd_opaque = prod;  	txbd->tx_bd_haddr = cpu_to_le64(mapping); @@ -82,7 +83,6 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,  		flags = frag_len << TX_BD_LEN_SHIFT;  		txbd->tx_bd_len_flags_type = cpu_to_le32(flags); -		txbd->tx_bd_opaque = prod;  		txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);  		len = frag_len; @@ -96,7 +96,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,  	prod = NEXT_TX(prod);  	txr->tx_prod = prod; -	return first_buf; +	return tx_buf;  }  static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr, diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index 4af5561cbfc5..7c760aa65540 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -1392,7 +1392,7 @@ static void chtls_pass_accept_request(struct sock *sk,  	th_ecn = tcph->ece && tcph->cwr;  	if (th_ecn) {  		ect = !INET_ECN_is_not_ect(ip_dsfield); -		ecn_ok = sock_net(sk)->ipv4.sysctl_tcp_ecn; +		ecn_ok = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn);  		if ((!ect && ecn_ok) || tcp_ca_needs_ecn(sk))  			inet_rsk(oreq)->ecn_ok = 1;  	} diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 5231818943c6..c03663785a8d 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1764,6 +1764,19 @@ cleanup_clk:  	return rc;  } +static bool ftgmac100_has_child_node(struct device_node *np, const char *name) +{ +	struct device_node *child_np = of_get_child_by_name(np, name); +	bool ret = false; + +	if (child_np) { +		ret = true; +		of_node_put(child_np); +	} + +	return ret; +} +  static int ftgmac100_probe(struct platform_device *pdev)  {  	struct resource *res; @@ -1883,7 +1896,7 @@ static int ftgmac100_probe(struct platform_device *pdev)  		/* Display what we found */  		phy_attached_info(phy); -	} else if (np && !of_get_child_by_name(np, "mdio")) { +	} else if (np && !ftgmac100_has_child_node(np, "mdio")) {  		/* Support legacy ASPEED devicetree descriptions that decribe a  		 * MAC with an embedded MDIO controller but have no "mdio"  		 * child node. Automatically scan the MDIO bus for available diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h index 61dd2f18dee8..b41bc3dc1745 100644 --- a/drivers/net/ethernet/intel/ice/ice_devids.h +++ b/drivers/net/ethernet/intel/ice/ice_devids.h @@ -5,6 +5,7 @@  #define _ICE_DEVIDS_H_  /* Device IDs */ +#define ICE_DEV_ID_E822_SI_DFLT         0x1888  /* Intel(R) Ethernet Connection E823-L for backplane */  #define ICE_DEV_ID_E823L_BACKPLANE	0x124C  /* Intel(R) Ethernet Connection E823-L for SFP */ diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 3991d62473bf..3337314a7b35 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -814,6 +814,8 @@ void ice_devlink_destroy_vf_port(struct ice_vf *vf)  	devlink_port_unregister(devlink_port);  } +#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024) +  /**   * ice_devlink_nvm_snapshot - Capture a snapshot of the NVM flash contents   * @devlink: the devlink instance @@ -840,8 +842,9 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,  	struct ice_pf *pf = devlink_priv(devlink);  	struct device *dev = ice_pf_to_dev(pf);  	struct ice_hw *hw = &pf->hw; -	void *nvm_data; -	u32 nvm_size; +	u8 *nvm_data, *tmp, i; +	u32 nvm_size, left; +	s8 num_blks;  	int status;  	nvm_size = hw->flash.flash_size; @@ -849,26 +852,44 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,  	if (!nvm_data)  		return -ENOMEM; -	status = ice_acquire_nvm(hw, ICE_RES_READ); -	if (status) { -		dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", -			status, hw->adminq.sq_last_status); -		NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); -		vfree(nvm_data); -		return status; -	} -	status = ice_read_flat_nvm(hw, 0, &nvm_size, nvm_data, false); -	if (status) { -		dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", -			nvm_size, status, hw->adminq.sq_last_status); -		NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); +	num_blks = DIV_ROUND_UP(nvm_size, ICE_DEVLINK_READ_BLK_SIZE); +	tmp = nvm_data; +	left = nvm_size; + +	/* Some systems take longer to read the NVM than others which causes the +	 * FW to reclaim the NVM lock before the entire NVM has been read. Fix +	 * this by breaking the reads of the NVM into smaller chunks that will +	 * probably not take as long. This has some overhead since we are +	 * increasing the number of AQ commands, but it should always work +	 */ +	for (i = 0; i < num_blks; i++) { +		u32 read_sz = min_t(u32, ICE_DEVLINK_READ_BLK_SIZE, left); + +		status = ice_acquire_nvm(hw, ICE_RES_READ); +		if (status) { +			dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n", +				status, hw->adminq.sq_last_status); +			NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore"); +			vfree(nvm_data); +			return -EIO; +		} + +		status = ice_read_flat_nvm(hw, i * ICE_DEVLINK_READ_BLK_SIZE, +					   &read_sz, tmp, false); +		if (status) { +			dev_dbg(dev, "ice_read_flat_nvm failed after reading %u bytes, err %d aq_err %d\n", +				read_sz, status, hw->adminq.sq_last_status); +			NL_SET_ERR_MSG_MOD(extack, "Failed to read NVM contents"); +			ice_release_nvm(hw); +			vfree(nvm_data); +			return -EIO; +		}  		ice_release_nvm(hw); -		vfree(nvm_data); -		return status; -	} -	ice_release_nvm(hw); +		tmp += read_sz; +		left -= read_sz; +	}  	*data = nvm_data; diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c index 665a344fb9c0..3dc5662d62a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_fw_update.c +++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c @@ -736,7 +736,87 @@ static int ice_finalize_update(struct pldmfw *context)  	return 0;  } -static const struct pldmfw_ops ice_fwu_ops = { +struct ice_pldm_pci_record_id { +	u32 vendor; +	u32 device; +	u32 subsystem_vendor; +	u32 subsystem_device; +}; + +/** + * ice_op_pci_match_record - Check if a PCI device matches the record + * @context: PLDM fw update structure + * @record: list of records extracted from the PLDM image + * + * Determine if the PCI device associated with this device matches the record + * data provided. + * + * Searches the descriptor TLVs and extracts the relevant descriptor data into + * a pldm_pci_record_id. This is then compared against the PCI device ID + * information. + * + * Returns: true if the device matches the record, false otherwise. + */ +static bool +ice_op_pci_match_record(struct pldmfw *context, struct pldmfw_record *record) +{ +	struct pci_dev *pdev = to_pci_dev(context->dev); +	struct ice_pldm_pci_record_id id = { +		.vendor = PCI_ANY_ID, +		.device = PCI_ANY_ID, +		.subsystem_vendor = PCI_ANY_ID, +		.subsystem_device = PCI_ANY_ID, +	}; +	struct pldmfw_desc_tlv *desc; + +	list_for_each_entry(desc, &record->descs, entry) { +		u16 value; +		int *ptr; + +		switch (desc->type) { +		case PLDM_DESC_ID_PCI_VENDOR_ID: +			ptr = &id.vendor; +			break; +		case PLDM_DESC_ID_PCI_DEVICE_ID: +			ptr = &id.device; +			break; +		case PLDM_DESC_ID_PCI_SUBVENDOR_ID: +			ptr = &id.subsystem_vendor; +			break; +		case PLDM_DESC_ID_PCI_SUBDEV_ID: +			ptr = &id.subsystem_device; +			break; +		default: +			/* Skip unrelated TLVs */ +			continue; +		} + +		value = get_unaligned_le16(desc->data); +		/* A value of zero for one of the descriptors is sometimes +		 * used when the record should ignore this field when matching +		 * device. For example if the record applies to any subsystem +		 * device or vendor. +		 */ +		if (value) +			*ptr = value; +		else +			*ptr = PCI_ANY_ID; +	} + +	/* the E822 device can have a generic device ID so check for that */ +	if ((id.vendor == PCI_ANY_ID || id.vendor == pdev->vendor) && +	    (id.device == PCI_ANY_ID || id.device == pdev->device || +	    id.device == ICE_DEV_ID_E822_SI_DFLT) && +	    (id.subsystem_vendor == PCI_ANY_ID || +	    id.subsystem_vendor == pdev->subsystem_vendor) && +	    (id.subsystem_device == PCI_ANY_ID || +	    id.subsystem_device == pdev->subsystem_device)) +		return true; + +	return false; +} + +static const struct pldmfw_ops ice_fwu_ops_e810 = {  	.match_record = &pldmfw_op_pci_match_record,  	.send_package_data = &ice_send_package_data,  	.send_component_table = &ice_send_component_table, @@ -744,6 +824,14 @@ static const struct pldmfw_ops ice_fwu_ops = {  	.finalize_update = &ice_finalize_update,  }; +static const struct pldmfw_ops ice_fwu_ops_e822 = { +	.match_record = &ice_op_pci_match_record, +	.send_package_data = &ice_send_package_data, +	.send_component_table = &ice_send_component_table, +	.flash_component = &ice_flash_component, +	.finalize_update = &ice_finalize_update, +}; +  /**   * ice_get_pending_updates - Check if the component has a pending update   * @pf: the PF driver structure @@ -921,7 +1009,11 @@ int ice_devlink_flash_update(struct devlink *devlink,  	memset(&priv, 0, sizeof(priv)); -	priv.context.ops = &ice_fwu_ops; +	/* the E822 device needs a slightly different ops */ +	if (hw->mac_type == ICE_MAC_GENERIC) +		priv.context.ops = &ice_fwu_ops_e822; +	else +		priv.context.ops = &ice_fwu_ops_e810;  	priv.context.dev = dev;  	priv.extack = extack;  	priv.pf = pf; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c1ac2f746714..ff2eac2f8c64 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5413,6 +5413,7 @@ static const struct pci_device_id ice_pci_tbl[] = {  	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },  	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },  	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 }, +	{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },  	/* required last entry */  	{ 0, }  }; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c index 3754d8aec76d..3c8116f16b4d 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_router.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c @@ -588,6 +588,7 @@ err_router_lib_init:  void prestera_router_fini(struct prestera_switch *sw)  { +	unregister_fib_notifier(&init_net, &sw->router->fib_nb);  	unregister_inetaddr_notifier(&sw->router->inetaddr_nb);  	unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);  	rhashtable_destroy(&sw->router->kern_fib_cache_ht); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 25f51f80a9b4..ba171c7f0a67 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -76,6 +76,7 @@ struct mlx5_tc_ct_priv {  	struct mlx5_ct_fs *fs;  	struct mlx5_ct_fs_ops *fs_ops;  	spinlock_t ht_lock; /* protects ft entries */ +	struct workqueue_struct *wq;  	struct mlx5_tc_ct_debugfs debugfs;  }; @@ -941,14 +942,11 @@ static void mlx5_tc_ct_entry_del_work(struct work_struct *work)  static void  __mlx5_tc_ct_entry_put(struct mlx5_ct_entry *entry)  { -	struct mlx5e_priv *priv; -  	if (!refcount_dec_and_test(&entry->refcnt))  		return; -	priv = netdev_priv(entry->ct_priv->netdev);  	INIT_WORK(&entry->work, mlx5_tc_ct_entry_del_work); -	queue_work(priv->wq, &entry->work); +	queue_work(entry->ct_priv->wq, &entry->work);  }  static struct mlx5_ct_counter * @@ -1759,19 +1757,16 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)  static void  mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)  { -	struct mlx5e_priv *priv; -  	if (!refcount_dec_and_test(&ft->refcount))  		return; +	flush_workqueue(ct_priv->wq);  	nf_flow_table_offload_del_cb(ft->nf_ft,  				     mlx5_tc_ct_block_flow_offload, ft);  	rhashtable_remove_fast(&ct_priv->zone_ht, &ft->node, zone_params);  	rhashtable_free_and_destroy(&ft->ct_entries_ht,  				    mlx5_tc_ct_flush_ft_entry,  				    ct_priv); -	priv = netdev_priv(ct_priv->netdev); -	flush_workqueue(priv->wq);  	mlx5_tc_ct_free_pre_ct_tables(ft);  	mapping_remove(ct_priv->zone_mapping, ft->zone_restore_id);  	kfree(ft); @@ -2176,6 +2171,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,  	if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))  		goto err_ct_tuples_nat_ht; +	ct_priv->wq = alloc_ordered_workqueue("mlx5e_ct_priv_wq", 0); +	if (!ct_priv->wq) { +		err = -ENOMEM; +		goto err_wq; +	} +  	err = mlx5_tc_ct_fs_init(ct_priv);  	if (err)  		goto err_init_fs; @@ -2184,6 +2185,8 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,  	return ct_priv;  err_init_fs: +	destroy_workqueue(ct_priv->wq); +err_wq:  	rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);  err_ct_tuples_nat_ht:  	rhashtable_destroy(&ct_priv->ct_tuples_ht); @@ -2213,6 +2216,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)  	if (!ct_priv)  		return; +	destroy_workqueue(ct_priv->wq);  	mlx5_ct_tc_remove_dbgfs(ct_priv);  	chains = ct_priv->chains; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 0bb0633b7542..27483aa7be8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -231,8 +231,7 @@ mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx,  	struct mlx5e_ktls_offload_context_rx **ctx =  		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); -	BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) > -		     TLS_OFFLOAD_CONTEXT_SIZE_RX); +	BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX);  	*ctx = priv_rx;  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 4b6f0d1ea59a..f239fb2e832f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -68,8 +68,7 @@ mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx,  	struct mlx5e_ktls_offload_context_tx **ctx =  		__tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); -	BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) > -		     TLS_OFFLOAD_CONTEXT_SIZE_TX); +	BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX);  	*ctx = priv_tx;  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 57fa0489eeb8..1e87bb2b7541 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -688,7 +688,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)  	u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {};  	struct mlx5_core_dev *mdev = priv->mdev; -	if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) +	if (!mlx5e_stats_grp_vnic_env_num_stats(priv))  		return;  	MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3a39a50146dd..9ca2c8763237 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3793,7 +3793,7 @@ static bool is_lag_dev(struct mlx5e_priv *priv,  static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)  { -	if (mlx5e_eswitch_uplink_rep(out_dev) && +	if (same_hw_reps(priv, out_dev) &&  	    MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&  	    MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))  		return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 50d14cec4894..9a7250be229f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -341,6 +341,26 @@ static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)  	}  } +static void mlx5e_tx_flush(struct mlx5e_txqsq *sq) +{ +	struct mlx5e_tx_wqe_info *wi; +	struct mlx5e_tx_wqe *wqe; +	u16 pi; + +	/* Must not be called when a MPWQE session is active but empty. */ +	mlx5e_tx_mpwqe_ensure_complete(sq); + +	pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); +	wi = &sq->db.wqe_info[pi]; + +	*wi = (struct mlx5e_tx_wqe_info) { +		.num_wqebbs = 1, +	}; + +	wqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); +	mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &wqe->ctrl); +} +  static inline void  mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,  		     const struct mlx5e_tx_attr *attr, @@ -459,6 +479,7 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,  err_drop:  	stats->dropped++;  	dev_kfree_skb_any(skb); +	mlx5e_tx_flush(sq);  }  static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr) @@ -560,6 +581,13 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,  	struct mlx5_wqe_ctrl_seg *cseg;  	struct mlx5e_xmit_data txd; +	txd.data = skb->data; +	txd.len = skb->len; + +	txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE); +	if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr))) +		goto err_unmap; +  	if (!mlx5e_tx_mpwqe_session_is_active(sq)) {  		mlx5e_tx_mpwqe_session_start(sq, eseg);  	} else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) { @@ -569,18 +597,9 @@ mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,  	sq->stats->xmit_more += xmit_more; -	txd.data = skb->data; -	txd.len = skb->len; - -	txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE); -	if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr))) -		goto err_unmap;  	mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE); -  	mlx5e_skb_fifo_push(&sq->db.skb_fifo, skb); -  	mlx5e_tx_mpwqe_add_dseg(sq, &txd); -  	mlx5e_tx_skb_update_hwts_flags(skb);  	if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe, sq->max_sq_mpw_wqebbs))) { @@ -602,6 +621,7 @@ err_unmap:  	mlx5e_dma_unmap_wqe_err(sq, 1);  	sq->stats->dropped++;  	dev_kfree_skb_any(skb); +	mlx5e_tx_flush(sq);  }  void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) @@ -1006,5 +1026,6 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,  err_drop:  	stats->dropped++;  	dev_kfree_skb_any(skb); +	mlx5e_tx_flush(sq);  }  #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index 9d17206d1625..fabe49a35a5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -11,6 +11,7 @@  #include "mlx5_core.h"  #include "eswitch.h"  #include "fs_core.h" +#include "fs_ft_pool.h"  #include "esw/qos.h"  enum { @@ -95,8 +96,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)  	if (!flow_group_in)  		return -ENOMEM; -	table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); -	ft_attr.max_fte = table_size; +	ft_attr.max_fte = POOL_NEXT_SIZE;  	ft_attr.prio = LEGACY_FDB_PRIO;  	fdb = mlx5_create_flow_table(root_ns, &ft_attr);  	if (IS_ERR(fdb)) { @@ -105,6 +105,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)  		goto out;  	}  	esw->fdb_table.legacy.fdb = fdb; +	table_size = fdb->max_fte;  	/* Addresses group : Full match unicast/multicast addresses */  	MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c index 15e41dc84d53..b8feaf0f5c4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/debugfs.c @@ -44,7 +44,7 @@ static int port_sel_mode_show(struct seq_file *file, void *priv)  	ldev = dev->priv.lag;  	mutex_lock(&ldev->lock);  	if (__mlx5_lag_is_active(ldev)) -		mode = mlx5_get_str_port_sel_mode(ldev); +		mode = mlx5_get_str_port_sel_mode(ldev->mode, ldev->mode_flags);  	else  		ret = -EINVAL;  	mutex_unlock(&ldev->lock); @@ -72,6 +72,7 @@ static int state_show(struct seq_file *file, void *priv)  static int flags_show(struct seq_file *file, void *priv)  {  	struct mlx5_core_dev *dev = file->private; +	bool fdb_sel_mode_native;  	struct mlx5_lag *ldev;  	bool shared_fdb;  	bool lag_active; @@ -79,14 +80,21 @@ static int flags_show(struct seq_file *file, void *priv)  	ldev = dev->priv.lag;  	mutex_lock(&ldev->lock);  	lag_active = __mlx5_lag_is_active(ldev); -	if (lag_active) -		shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); +	if (!lag_active) +		goto unlock; + +	shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags); +	fdb_sel_mode_native = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, +				       &ldev->mode_flags); +unlock:  	mutex_unlock(&ldev->lock);  	if (!lag_active)  		return -EINVAL;  	seq_printf(file, "%s:%s\n", "shared_fdb", shared_fdb ? "on" : "off"); +	seq_printf(file, "%s:%s\n", "fdb_selection_mode", +		   fdb_sel_mode_native ? "native" : "affinity");  	return 0;  } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 2a8fc547eb37..5d41e19378e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -68,14 +68,15 @@ static int get_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)  static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode,  			       unsigned long flags)  { -	bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags); +	bool fdb_sel_mode = test_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, +				     &flags);  	int port_sel_mode = get_port_sel_mode(mode, flags);  	u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};  	void *lag_ctx;  	lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);  	MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG); -	MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb); +	MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode);  	if (port_sel_mode == MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY) {  		MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]);  		MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]); @@ -471,8 +472,13 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,  	bool roce_lag = mode == MLX5_LAG_MODE_ROCE;  	*flags = 0; -	if (shared_fdb) +	if (shared_fdb) {  		set_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, flags); +		set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags); +	} + +	if (mode == MLX5_LAG_MODE_MPESW) +		set_bit(MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE, flags);  	if (roce_lag)  		return mlx5_lag_set_port_sel_mode_roce(ldev, flags); @@ -481,9 +487,9 @@ static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,  	return 0;  } -char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev) +char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)  { -	int port_sel_mode = get_port_sel_mode(ldev->mode, ldev->mode_flags); +	int port_sel_mode = get_port_sel_mode(mode, flags);  	switch (port_sel_mode) {  	case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: return "queue_affinity"; @@ -507,7 +513,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,  	if (tracker)  		mlx5_lag_print_mapping(dev0, ldev, tracker, flags);  	mlx5_core_info(dev0, "shared_fdb:%d mode:%s\n", -		       shared_fdb, mlx5_get_str_port_sel_mode(ldev)); +		       shared_fdb, mlx5_get_str_port_sel_mode(mode, flags));  	err = mlx5_cmd_create_lag(dev0, ldev->v2p_map, mode, flags);  	if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index c81b173156d2..ce2ce8ccbd70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -24,6 +24,7 @@ enum {  enum {  	MLX5_LAG_MODE_FLAG_HASH_BASED,  	MLX5_LAG_MODE_FLAG_SHARED_FDB, +	MLX5_LAG_MODE_FLAG_FDB_SEL_MODE_NATIVE,  };  enum mlx5_lag_mode { @@ -114,7 +115,7 @@ bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);  void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev);  int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev); -char *mlx5_get_str_port_sel_mode(struct mlx5_lag *ldev); +char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);  void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,  			   u8 *ports, int *num_enabled); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c index ee4b25a50315..f643202b29c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c @@ -41,7 +41,6 @@ void mlx5_lag_del_mpesw_rule(struct mlx5_core_dev *dev)  int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)  {  	struct mlx5_lag *ldev = dev->priv.lag; -	bool shared_fdb;  	int err = 0;  	if (!ldev) @@ -55,8 +54,8 @@ int mlx5_lag_add_mpesw_rule(struct mlx5_core_dev *dev)  		err = -EINVAL;  		goto out;  	} -	shared_fdb = mlx5_shared_fdb_supported(ldev); -	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, shared_fdb); + +	err = mlx5_activate_lag(ldev, NULL, MLX5_LAG_MODE_MPESW, false);  	if (err)  		mlx5_core_warn(dev, "Failed to create LAG in MPESW mode (%d)\n", err); diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c index 083fddd263ec..8e3894cf5f7c 100644 --- a/drivers/net/ethernet/mscc/ocelot_fdma.c +++ b/drivers/net/ethernet/mscc/ocelot_fdma.c @@ -94,19 +94,18 @@ static void ocelot_fdma_activate_chan(struct ocelot *ocelot, dma_addr_t dma,  	ocelot_fdma_writel(ocelot, MSCC_FDMA_CH_ACTIVATE, BIT(chan));  } +static u32 ocelot_fdma_read_ch_safe(struct ocelot *ocelot) +{ +	return ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE); +} +  static int ocelot_fdma_wait_chan_safe(struct ocelot *ocelot, int chan)  { -	unsigned long timeout;  	u32 safe; -	timeout = jiffies + usecs_to_jiffies(OCELOT_FDMA_CH_SAFE_TIMEOUT_US); -	do { -		safe = ocelot_fdma_readl(ocelot, MSCC_FDMA_CH_SAFE); -		if (safe & BIT(chan)) -			return 0; -	} while (time_after(jiffies, timeout)); - -	return -ETIMEDOUT; +	return readx_poll_timeout_atomic(ocelot_fdma_read_ch_safe, ocelot, safe, +					 safe & BIT(chan), 0, +					 OCELOT_FDMA_CH_SAFE_TIMEOUT_US);  }  static void ocelot_fdma_dcb_set_data(struct ocelot_fdma_dcb *dcb, diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 6bf3ec448e7e..97dcf8db7ed2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -447,7 +447,8 @@ void nfp_tun_unlink_and_update_nn_entries(struct nfp_app *app,  static void  nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app, -		    void *flow, struct neighbour *neigh, bool is_ipv6) +		    void *flow, struct neighbour *neigh, bool is_ipv6, +		    bool override)  {  	bool neigh_invalid = !(neigh->nud_state & NUD_VALID) || neigh->dead;  	size_t neigh_size = is_ipv6 ? sizeof(struct nfp_tun_neigh_v6) : @@ -546,6 +547,13 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,  		if (nn_entry->flow)  			list_del(&nn_entry->list_head);  		kfree(nn_entry); +	} else if (nn_entry && !neigh_invalid && override) { +		mtype = is_ipv6 ? NFP_FLOWER_CMSG_TYPE_TUN_NEIGH_V6 : +				NFP_FLOWER_CMSG_TYPE_TUN_NEIGH; +		nfp_tun_link_predt_entries(app, nn_entry); +		nfp_flower_xmit_tun_conf(app, mtype, neigh_size, +					 nn_entry->payload, +					 GFP_ATOMIC);  	}  	spin_unlock_bh(&priv->predt_lock); @@ -610,7 +618,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,  			dst_release(dst);  		} -		nfp_tun_write_neigh(n->dev, app, &flow6, n, true); +		nfp_tun_write_neigh(n->dev, app, &flow6, n, true, false);  #else  		return NOTIFY_DONE;  #endif /* CONFIG_IPV6 */ @@ -633,7 +641,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,  			ip_rt_put(rt);  		} -		nfp_tun_write_neigh(n->dev, app, &flow4, n, false); +		nfp_tun_write_neigh(n->dev, app, &flow4, n, false, false);  	}  #else  	return NOTIFY_DONE; @@ -676,7 +684,7 @@ void nfp_tunnel_request_route_v4(struct nfp_app *app, struct sk_buff *skb)  	ip_rt_put(rt);  	if (!n)  		goto fail_rcu_unlock; -	nfp_tun_write_neigh(n->dev, app, &flow, n, false); +	nfp_tun_write_neigh(n->dev, app, &flow, n, false, true);  	neigh_release(n);  	rcu_read_unlock();  	return; @@ -718,7 +726,7 @@ void nfp_tunnel_request_route_v6(struct nfp_app *app, struct sk_buff *skb)  	if (!n)  		goto fail_rcu_unlock; -	nfp_tun_write_neigh(n->dev, app, &flow, n, true); +	nfp_tun_write_neigh(n->dev, app, &flow, n, true, true);  	neigh_release(n);  	rcu_read_unlock();  	return; diff --git a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c index e509d6dcba5c..805071d64a20 100644 --- a/drivers/net/ethernet/netronome/nfp/nfdk/dp.c +++ b/drivers/net/ethernet/netronome/nfp/nfdk/dp.c @@ -125,17 +125,18 @@ nfp_nfdk_tx_csum(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,  static int  nfp_nfdk_tx_maybe_close_block(struct nfp_net_tx_ring *tx_ring, -			      unsigned int nr_frags, struct sk_buff *skb) +			      struct sk_buff *skb)  {  	unsigned int n_descs, wr_p, nop_slots;  	const skb_frag_t *frag, *fend;  	struct nfp_nfdk_tx_desc *txd; +	unsigned int nr_frags;  	unsigned int wr_idx;  	int err;  recount_descs:  	n_descs = nfp_nfdk_headlen_to_segs(skb_headlen(skb)); - +	nr_frags = skb_shinfo(skb)->nr_frags;  	frag = skb_shinfo(skb)->frags;  	fend = frag + nr_frags;  	for (; frag < fend; frag++) @@ -281,10 +282,13 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)  	if (unlikely((int)metadata < 0))  		goto err_flush; -	nr_frags = skb_shinfo(skb)->nr_frags; -	if (nfp_nfdk_tx_maybe_close_block(tx_ring, nr_frags, skb)) +	if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))  		goto err_flush; +	/* nr_frags will change after skb_linearize so we get nr_frags after +	 * nfp_nfdk_tx_maybe_close_block function +	 */ +	nr_frags = skb_shinfo(skb)->nr_frags;  	/* DMA map all */  	wr_idx = D_IDX(tx_ring, tx_ring->wr_p);  	txd = &tx_ring->ktxds[wr_idx]; @@ -310,7 +314,16 @@ netdev_tx_t nfp_nfdk_tx(struct sk_buff *skb, struct net_device *netdev)  	/* FIELD_PREP() implicitly truncates to chunk */  	dma_len -= 1; -	dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) | + +	/* We will do our best to pass as much data as we can in descriptor +	 * and we need to make sure the first descriptor includes whole head +	 * since there is limitation in firmware side. Sometimes the value of +	 * dma_len bitwise and NFDK_DESC_TX_DMA_LEN_HEAD will less than +	 * headlen. +	 */ +	dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, +			       dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ? +			       NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |  		    FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);  	txd->dma_len_type = cpu_to_le16(dlen_type); @@ -925,7 +938,9 @@ nfp_nfdk_tx_xdp_buf(struct nfp_net_dp *dp, struct nfp_net_rx_ring *rx_ring,  	/* FIELD_PREP() implicitly truncates to chunk */  	dma_len -= 1; -	dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) | +	dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, +			       dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ? +			       NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |  		    FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);  	txd->dma_len_type = cpu_to_le16(dlen_type); @@ -1303,7 +1318,7 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,  				   skb_push(skb, 4));  	} -	if (nfp_nfdk_tx_maybe_close_block(tx_ring, 0, skb)) +	if (nfp_nfdk_tx_maybe_close_block(tx_ring, skb))  		goto err_free;  	/* DMA map all */ @@ -1328,7 +1343,9 @@ nfp_nfdk_ctrl_tx_one(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,  	txbuf++;  	dma_len -= 1; -	dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, dma_len) | +	dlen_type = FIELD_PREP(NFDK_DESC_TX_DMA_LEN_HEAD, +			       dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ? +			       NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |  		    FIELD_PREP(NFDK_DESC_TX_TYPE_HEAD, type);  	txd->dma_len_type = cpu_to_le16(dlen_type); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 186cb28c03bd..8b62ce21aff3 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1932,7 +1932,10 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)  	efx_update_sw_stats(efx, stats);  out: +	/* releasing a DMA coherent buffer with BH disabled can panic */ +	spin_unlock_bh(&efx->stats_lock);  	efx_nic_free_buffer(efx, &stats_buf); +	spin_lock_bh(&efx->stats_lock);  	return rc;  } diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c index 7f5aa4a8c451..92550c7e85ce 100644 --- a/drivers/net/ethernet/sfc/ef10_sriov.c +++ b/drivers/net/ethernet/sfc/ef10_sriov.c @@ -408,8 +408,9 @@ fail1:  static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)  {  	struct pci_dev *dev = efx->pci_dev; +	struct efx_ef10_nic_data *nic_data = efx->nic_data;  	unsigned int vfs_assigned = pci_vfs_assigned(dev); -	int rc = 0; +	int i, rc = 0;  	if (vfs_assigned && !force) {  		netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " @@ -417,10 +418,13 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)  		return -EBUSY;  	} -	if (!vfs_assigned) +	if (!vfs_assigned) { +		for (i = 0; i < efx->vf_count; i++) +			nic_data->vf[i].pci_dev = NULL;  		pci_disable_sriov(dev); -	else +	} else {  		rc = -EBUSY; +	}  	efx_ef10_sriov_free_vf_vswitching(efx);  	efx->vf_count = 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index bc91fd867dcd..358fc26f8d1f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c @@ -361,6 +361,7 @@ bypass_clk_reset_gpio:  	data->fix_mac_speed = tegra_eqos_fix_speed;  	data->init = tegra_eqos_init;  	data->bsp_priv = eqos; +	data->sph_disable = 1;  	err = tegra_eqos_init(pdev, eqos);  	if (err < 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c index 9a6d819b84ae..378b4dd826bb 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c @@ -273,7 +273,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)  			mac->tx_delay = tx_delay_ps * 1000;  		} else {  			dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps); -			return -EINVAL; +			ret = -EINVAL; +			goto err_remove_config_dt;  		}  	} @@ -283,7 +284,8 @@ static int ingenic_mac_probe(struct platform_device *pdev)  			mac->rx_delay = rx_delay_ps * 1000;  		} else {  			dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps); -			return -EINVAL; +			ret = -EINVAL; +			goto err_remove_config_dt;  		}  	} diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 77e5dffb558f..8594ee839628 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -545,43 +545,24 @@ static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)  static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)  { -	printk(KERN_INFO "%s: Link is up using ", hp->dev->name); -	if (hp->tcvr_type == external) -		printk("external "); -	else -		printk("internal "); -	printk("transceiver at ");  	hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); -	if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) { -		if (hp->sw_lpa & LPA_100FULL) -			printk("100Mb/s, Full Duplex.\n"); -		else -			printk("100Mb/s, Half Duplex.\n"); -	} else { -		if (hp->sw_lpa & LPA_10FULL) -			printk("10Mb/s, Full Duplex.\n"); -		else -			printk("10Mb/s, Half Duplex.\n"); -	} + +	netdev_info(hp->dev, +		    "Link is up using %s transceiver at %dMb/s, %s Duplex.\n", +		    hp->tcvr_type == external ? "external" : "internal", +		    hp->sw_lpa & (LPA_100HALF | LPA_100FULL) ? 100 : 10, +		    hp->sw_lpa & (LPA_100FULL | LPA_10FULL) ? "Full" : "Half");  }  static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)  { -	printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name); -	if (hp->tcvr_type == external) -		printk("external "); -	else -		printk("internal "); -	printk("transceiver at ");  	hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); -	if (hp->sw_bmcr & BMCR_SPEED100) -		printk("100Mb/s, "); -	else -		printk("10Mb/s, "); -	if (hp->sw_bmcr & BMCR_FULLDPLX) -		printk("Full Duplex.\n"); -	else -		printk("Half Duplex.\n"); + +	netdev_info(hp->dev, +		    "Link has been forced up using %s transceiver at %dMb/s, %s Duplex.\n", +		    hp->tcvr_type == external ? "external" : "internal", +		    hp->sw_bmcr & BMCR_SPEED100 ? 100 : 10, +		    hp->sw_bmcr & BMCR_FULLDPLX ? "Full" : "Half");  }  static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index fb92d4c1547d..f4a6b590a1e3 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2467,7 +2467,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)  				port->port_id, ret);  			goto dl_port_unreg;  		} -		devlink_port_type_eth_set(dl_port, port->ndev);  	}  	devlink_register(common->devlink);  	return ret; @@ -2511,6 +2510,7 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)  static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)  {  	struct device *dev = common->dev; +	struct devlink_port *dl_port;  	struct am65_cpsw_port *port;  	int ret = 0, i; @@ -2527,6 +2527,10 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)  		return ret;  	} +	ret = am65_cpsw_nuss_register_devlink(common); +	if (ret) +		return ret; +  	for (i = 0; i < common->port_num; i++) {  		port = &common->ports[i]; @@ -2539,25 +2543,24 @@ static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)  				i, ret);  			goto err_cleanup_ndev;  		} + +		dl_port = &port->devlink_port; +		devlink_port_type_eth_set(dl_port, port->ndev);  	}  	ret = am65_cpsw_register_notifiers(common);  	if (ret)  		goto err_cleanup_ndev; -	ret = am65_cpsw_nuss_register_devlink(common); -	if (ret) -		goto clean_unregister_notifiers; -  	/* can't auto unregister ndev using devm_add_action() due to  	 * devres release sequence in DD core for DMA  	 */  	return 0; -clean_unregister_notifiers: -	am65_cpsw_unregister_notifiers(common); +  err_cleanup_ndev:  	am65_cpsw_nuss_cleanup_ndev(common); +	am65_cpsw_unregister_devlink(common);  	return ret;  } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 7389d6ef8569..b082819509e1 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -2156,7 +2156,7 @@ static inline void rtl_rx_vlan_tag(struct rx_desc *desc, struct sk_buff *skb)  }  static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, -			 struct sk_buff *skb, u32 len, u32 transport_offset) +			 struct sk_buff *skb, u32 len)  {  	u32 mss = skb_shinfo(skb)->gso_size;  	u32 opts1, opts2 = 0; @@ -2167,6 +2167,8 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,  	opts1 = len | TX_FS | TX_LS;  	if (mss) { +		u32 transport_offset = (u32)skb_transport_offset(skb); +  		if (transport_offset > GTTCPHO_MAX) {  			netif_warn(tp, tx_err, tp->netdev,  				   "Invalid transport offset 0x%x for TSO\n", @@ -2197,6 +2199,7 @@ static int r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc,  		opts1 |= transport_offset << GTTCPHO_SHIFT;  		opts2 |= min(mss, MSS_MAX) << MSS_SHIFT;  	} else if (skb->ip_summed == CHECKSUM_PARTIAL) { +		u32 transport_offset = (u32)skb_transport_offset(skb);  		u8 ip_protocol;  		if (transport_offset > TCPHO_MAX) { @@ -2260,7 +2263,6 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)  		struct tx_desc *tx_desc;  		struct sk_buff *skb;  		unsigned int len; -		u32 offset;  		skb = __skb_dequeue(&skb_head);  		if (!skb) @@ -2276,9 +2278,7 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)  		tx_data = tx_agg_align(tx_data);  		tx_desc = (struct tx_desc *)tx_data; -		offset = (u32)skb_transport_offset(skb); - -		if (r8152_tx_csum(tp, tx_desc, skb, skb->len, offset)) { +		if (r8152_tx_csum(tp, tx_desc, skb, skb->len)) {  			r8152_csum_workaround(tp, skb, &skb_head);  			continue;  		} @@ -2759,9 +2759,9 @@ rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,  {  	u32 mss = skb_shinfo(skb)->gso_size;  	int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX; -	int offset = skb_transport_offset(skb); -	if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset) +	if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && +	    skb_transport_offset(skb) > max_offset)  		features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);  	else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)  		features &= ~NETIF_F_GSO_MASK; diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 84d1c7054013..7b1dc19c565e 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -3822,7 +3822,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk  	switch (ev->evt_type) {  	case WMI_BSS_COLOR_COLLISION_DETECTION: -		ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap); +		ieeee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap, +						       GFP_KERNEL);  		ath11k_dbg(ab, ATH11K_DBG_WMI,  			   "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",  			   ev->vdev_id, ev->evt_type, ev->obss_color_bitmap); diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 2f746eb64507..6f83af849f2e 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -4912,6 +4912,8 @@ static int hwsim_virtio_probe(struct virtio_device *vdev)  	if (err)  		return err; +	virtio_device_ready(vdev); +  	err = fill_vq(hwsim_vqs[HWSIM_VQ_RX]);  	if (err)  		goto out_remove; diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index c02be4ac159e..7db627fc26be 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1233,9 +1233,6 @@ struct rtw_chip_info {  	const struct wiphy_wowlan_support *wowlan_stub;  	const u8 max_sched_scan_ssids; -	/* for 8821c set channel */ -	u32 ch_param[3]; -  	/* coex paras */  	u32 coex_para_ver;  	u8 bt_desired_ver; @@ -1937,6 +1934,9 @@ struct rtw_hal {  	enum rtw_sar_bands sar_band;  	struct rtw_sar sar; + +	/* for 8821c set channel */ +	u32 ch_param[3];  };  struct rtw_path_div { diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index ffee39ea5df6..488a7ddd507c 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -125,6 +125,7 @@ static void rtw8821c_phy_bf_init(struct rtw_dev *rtwdev)  static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)  { +	struct rtw_hal *hal = &rtwdev->hal;  	u8 crystal_cap, val;  	/* power on BB/RF domain */ @@ -159,9 +160,9 @@ static void rtw8821c_phy_set_param(struct rtw_dev *rtwdev)  	/* post init after header files config */  	rtw_write32_set(rtwdev, REG_RXPSEL, BIT_RX_PSEL_RST); -	rtwdev->chip->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD); -	rtwdev->chip->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD); -	rtwdev->chip->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD); +	hal->ch_param[0] = rtw_read32_mask(rtwdev, REG_TXSF2, MASKDWORD); +	hal->ch_param[1] = rtw_read32_mask(rtwdev, REG_TXSF6, MASKDWORD); +	hal->ch_param[2] = rtw_read32_mask(rtwdev, REG_TXFILTER, MASKDWORD);  	rtw_phy_init(rtwdev);  	rtwdev->dm_info.cck_pd_default = rtw_read8(rtwdev, REG_CSRATIO) & 0x1f; @@ -351,6 +352,7 @@ static void rtw8821c_set_channel_rxdfir(struct rtw_dev *rtwdev, u8 bw)  static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,  				    u8 primary_ch_idx)  { +	struct rtw_hal *hal = &rtwdev->hal;  	u32 val32;  	if (channel <= 14) { @@ -367,11 +369,11 @@ static void rtw8821c_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,  			rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, 0x00003667);  		} else {  			rtw_write32_mask(rtwdev, REG_TXSF2, MASKDWORD, -					 rtwdev->chip->ch_param[0]); +					 hal->ch_param[0]);  			rtw_write32_mask(rtwdev, REG_TXSF6, MASKLWORD, -					 rtwdev->chip->ch_param[1] & MASKLWORD); +					 hal->ch_param[1] & MASKLWORD);  			rtw_write32_mask(rtwdev, REG_TXFILTER, MASKDWORD, -					 rtwdev->chip->ch_param[2]); +					 hal->ch_param[2]);  		}  	} else if (channel > 35) {  		rtw_write32_mask(rtwdev, REG_ENTXCCK, BIT(18), 0x1); diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index dbac4c03d21a..a0335407be42 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -495,6 +495,7 @@ void xenvif_rx_action(struct xenvif_queue *queue)  	queue->rx_copy.completed = &completed_skbs;  	while (xenvif_rx_ring_slots_available(queue) && +	       !skb_queue_empty(&queue->rx_queue) &&  	       work_done < RX_BATCH_SIZE) {  		xenvif_rx_skb(queue);  		work_done++; diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index f11d18beac18..700eb19e8450 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -91,6 +91,8 @@  #define AMD_CPU_ID_PCO			AMD_CPU_ID_RV  #define AMD_CPU_ID_CZN			AMD_CPU_ID_RN  #define AMD_CPU_ID_YC			0x14B5 +#define AMD_CPU_ID_CB			0x14D8 +#define AMD_CPU_ID_PS			0x14E8  #define PMC_MSG_DELAY_MIN_US		50  #define RESPONSE_REGISTER_LOOP_MAX	20000 @@ -318,6 +320,8 @@ static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,  		val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);  		break;  	case AMD_CPU_ID_YC: +	case AMD_CPU_ID_CB: +	case AMD_CPU_ID_PS:  		val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);  		break;  	default: @@ -491,7 +495,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)  			    &amd_pmc_idlemask_fops);  	/* Enable STB only when the module_param is set */  	if (enable_stb) { -		if (dev->cpu_id == AMD_CPU_ID_YC) +		if (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB || +		    dev->cpu_id == AMD_CPU_ID_PS)  			debugfs_create_file("stb_read", 0644, dev->dbgfs_dir, dev,  					    &amd_pmc_stb_debugfs_fops_v2);  		else @@ -615,6 +620,8 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)  		return MSG_OS_HINT_PCO;  	case AMD_CPU_ID_RN:  	case AMD_CPU_ID_YC: +	case AMD_CPU_ID_CB: +	case AMD_CPU_ID_PS:  		return MSG_OS_HINT_RN;  	}  	return -EINVAL; @@ -735,6 +742,8 @@ static struct acpi_s2idle_dev_ops amd_pmc_s2idle_dev_ops = {  #endif  static const struct pci_device_id pmc_pci_ids[] = { +	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) }, +	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CB) },  	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_YC) },  	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_CZN) },  	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RN) }, @@ -877,7 +886,7 @@ static int amd_pmc_probe(struct platform_device *pdev)  	mutex_init(&dev->lock); -	if (enable_stb && dev->cpu_id == AMD_CPU_ID_YC) { +	if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) {  		err = amd_pmc_s2d_init(dev);  		if (err)  			return err; @@ -915,6 +924,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = {  	{"AMDI0005", 0},  	{"AMDI0006", 0},  	{"AMDI0007", 0}, +	{"AMDI0008", 0},  	{"AMD0004", 0},  	{"AMD0005", 0},  	{ } diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 57a07db659cb..478dd300b9c9 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -522,6 +522,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {  	{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },  	{ KE_KEY, 0x32, { KEY_MUTE } },  	{ KE_KEY, 0x35, { KEY_SCREENLOCK } }, +	{ KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */  	{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },  	{ KE_KEY, 0x41, { KEY_NEXTSONG } },  	{ KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */ @@ -574,6 +575,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {  	{ KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */  	{ KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */  	{ KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */ +	{ KE_KEY, 0xB3, { KEY_PROG4 } }, /* AURA */  	{ KE_KEY, 0xB5, { KEY_CALC } },  	{ KE_KEY, 0xC4, { KEY_KBDILLUMUP } },  	{ KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } }, diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c index 497ad2f64a51..5e7e6659a849 100644 --- a/drivers/platform/x86/gigabyte-wmi.c +++ b/drivers/platform/x86/gigabyte-wmi.c @@ -150,6 +150,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = {  	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M AORUS PRO-P"),  	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550M DS3H"),  	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660 GAMING X DDR4"), +	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B660I AORUS PRO DDR4"),  	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z390 I AORUS PRO WIFI-CF"),  	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z490 AORUS ELITE AC"),  	DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 AORUS ELITE"), diff --git a/drivers/platform/x86/intel/atomisp2/led.c b/drivers/platform/x86/intel/atomisp2/led.c index 5935dfca166f..10077a61d8c5 100644 --- a/drivers/platform/x86/intel/atomisp2/led.c +++ b/drivers/platform/x86/intel/atomisp2/led.c @@ -50,7 +50,8 @@ static const struct dmi_system_id atomisp2_led_systems[] __initconst = {  	{  		.matches = {  			DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), -			DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), +			/* Non exact match to also match T100TAF */ +			DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),  		},  		.driver_data = &asus_t100ta_lookup,  	}, diff --git a/drivers/platform/x86/intel/ifs/Kconfig b/drivers/platform/x86/intel/ifs/Kconfig index 7ce896434b8f..c341a27cc1a3 100644 --- a/drivers/platform/x86/intel/ifs/Kconfig +++ b/drivers/platform/x86/intel/ifs/Kconfig @@ -1,6 +1,9 @@  config INTEL_IFS  	tristate "Intel In Field Scan"  	depends on X86 && CPU_SUP_INTEL && 64BIT && SMP +	# Discussion on the list has shown that the sysfs API needs a bit +	# more work, mark this as broken for now +	depends on BROKEN  	select INTEL_IFS_DEVICE  	help  	  Enable support for the In Field Scan capability in select diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c index f446be72e539..480375977435 100644 --- a/drivers/platform/x86/x86-android-tablets.c +++ b/drivers/platform/x86/x86-android-tablets.c @@ -27,8 +27,8 @@  #include <linux/pinctrl/machine.h>  #include <linux/platform_data/lp855x.h>  #include <linux/platform_device.h> -#include <linux/pm.h>  #include <linux/power/bq24190_charger.h> +#include <linux/reboot.h>  #include <linux/rmi.h>  #include <linux/serdev.h>  #include <linux/spi/spi.h> @@ -889,6 +889,7 @@ static const struct pinctrl_map lenovo_yoga_tab2_830_1050_codec_pinctrl_map =  			  "INT33FC:02", "pmu_clk2_grp", "pmu_clk");  static struct pinctrl *lenovo_yoga_tab2_830_1050_codec_pinctrl; +static struct sys_off_handler *lenovo_yoga_tab2_830_1050_sys_off_handler;  static int __init lenovo_yoga_tab2_830_1050_init_codec(void)  { @@ -933,9 +934,11 @@ err_put_device:   * followed by a normal 3 second press to recover. Avoid this by doing an EFI   * poweroff instead.   */ -static void lenovo_yoga_tab2_830_1050_power_off(void) +static int lenovo_yoga_tab2_830_1050_power_off(struct sys_off_data *data)  {  	efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); + +	return NOTIFY_DONE;  }  static int __init lenovo_yoga_tab2_830_1050_init(void) @@ -950,13 +953,19 @@ static int __init lenovo_yoga_tab2_830_1050_init(void)  	if (ret)  		return ret; -	pm_power_off = lenovo_yoga_tab2_830_1050_power_off; +	/* SYS_OFF_PRIO_FIRMWARE + 1 so that it runs before acpi_power_off */ +	lenovo_yoga_tab2_830_1050_sys_off_handler = +		register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_FIRMWARE + 1, +					 lenovo_yoga_tab2_830_1050_power_off, NULL); +	if (IS_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler)) +		return PTR_ERR(lenovo_yoga_tab2_830_1050_sys_off_handler); +  	return 0;  }  static void lenovo_yoga_tab2_830_1050_exit(void)  { -	pm_power_off = NULL; /* Just turn poweroff into halt on module unload */ +	unregister_sys_off_handler(lenovo_yoga_tab2_830_1050_sys_off_handler);  	if (lenovo_yoga_tab2_830_1050_codec_pinctrl) {  		pinctrl_put(lenovo_yoga_tab2_830_1050_codec_pinctrl); diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index cba6a4486c24..efdcbe6c4c26 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -33,6 +33,7 @@  #define AMD_SPI_RX_COUNT_REG	0x4B  #define AMD_SPI_STATUS_REG	0x4C +#define AMD_SPI_FIFO_SIZE	70  #define AMD_SPI_MEM_SIZE	200  /* M_CMD OP codes for SPI */ @@ -270,6 +271,11 @@ static int amd_spi_master_transfer(struct spi_master *master,  	return 0;  } +static size_t amd_spi_max_transfer_size(struct spi_device *spi) +{ +	return AMD_SPI_FIFO_SIZE; +} +  static int amd_spi_probe(struct platform_device *pdev)  {  	struct device *dev = &pdev->dev; @@ -302,6 +308,8 @@ static int amd_spi_probe(struct platform_device *pdev)  	master->flags = SPI_MASTER_HALF_DUPLEX;  	master->setup = amd_spi_master_setup;  	master->transfer_one_message = amd_spi_master_transfer; +	master->max_transfer_size = amd_spi_max_transfer_size; +	master->max_message_size = amd_spi_max_transfer_size;  	/* Register the controller with SPI framework */  	err = devm_spi_register_master(dev, master); diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c index 496f3e1e9079..3e891bf22470 100644 --- a/drivers/spi/spi-aspeed-smc.c +++ b/drivers/spi/spi-aspeed-smc.c @@ -558,6 +558,14 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)  	u32 ctl_val;  	int ret = 0; +	dev_dbg(aspi->dev, +		"CE%d %s dirmap [ 0x%.8llx - 0x%.8llx ] OP %#x mode:%d.%d.%d.%d naddr:%#x ndummies:%#x\n", +		chip->cs, op->data.dir == SPI_MEM_DATA_IN ? "read" : "write", +		desc->info.offset, desc->info.offset + desc->info.length, +		op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth, +		op->dummy.buswidth, op->data.buswidth, +		op->addr.nbytes, op->dummy.nbytes); +  	chip->clk_freq = desc->mem->spi->max_speed_hz;  	/* Only for reads */ @@ -574,9 +582,11 @@ static int aspeed_spi_dirmap_create(struct spi_mem_dirmap_desc *desc)  	ctl_val = readl(chip->ctl) & ~CTRL_IO_CMD_MASK;  	ctl_val |= aspeed_spi_get_io_mode(op) |  		op->cmd.opcode << CTRL_COMMAND_SHIFT | -		CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth) |  		CTRL_IO_MODE_READ; +	if (op->dummy.nbytes) +		ctl_val |= CTRL_IO_DUMMY_SET(op->dummy.nbytes / op->dummy.buswidth); +  	/* Tune 4BYTE address mode */  	if (op->addr.nbytes) {  		u32 addr_mode = readl(aspi->regs + CE_CTRL_REG); diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 2b9fc8449a62..72b1a5a2298c 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1578,8 +1578,7 @@ static int cqspi_probe(struct platform_device *pdev)  	ret = cqspi_of_get_pdata(cqspi);  	if (ret) {  		dev_err(dev, "Cannot get mandatory OF data.\n"); -		ret = -ENODEV; -		goto probe_master_put; +		return -ENODEV;  	}  	/* Obtain QSPI clock. */ @@ -1587,7 +1586,7 @@ static int cqspi_probe(struct platform_device *pdev)  	if (IS_ERR(cqspi->clk)) {  		dev_err(dev, "Cannot claim QSPI clock.\n");  		ret = PTR_ERR(cqspi->clk); -		goto probe_master_put; +		return ret;  	}  	/* Obtain and remap controller address. */ @@ -1596,7 +1595,7 @@ static int cqspi_probe(struct platform_device *pdev)  	if (IS_ERR(cqspi->iobase)) {  		dev_err(dev, "Cannot remap controller address.\n");  		ret = PTR_ERR(cqspi->iobase); -		goto probe_master_put; +		return ret;  	}  	/* Obtain and remap AHB address. */ @@ -1605,7 +1604,7 @@ static int cqspi_probe(struct platform_device *pdev)  	if (IS_ERR(cqspi->ahb_base)) {  		dev_err(dev, "Cannot remap AHB address.\n");  		ret = PTR_ERR(cqspi->ahb_base); -		goto probe_master_put; +		return ret;  	}  	cqspi->mmap_phys_base = (dma_addr_t)res_ahb->start;  	cqspi->ahb_size = resource_size(res_ahb); @@ -1614,15 +1613,13 @@ static int cqspi_probe(struct platform_device *pdev)  	/* Obtain IRQ line. */  	irq = platform_get_irq(pdev, 0); -	if (irq < 0) { -		ret = -ENXIO; -		goto probe_master_put; -	} +	if (irq < 0) +		return -ENXIO;  	pm_runtime_enable(dev);  	ret = pm_runtime_resume_and_get(dev);  	if (ret < 0) -		goto probe_master_put; +		return ret;  	ret = clk_prepare_enable(cqspi->clk);  	if (ret) { @@ -1716,8 +1713,6 @@ probe_reset_failed:  probe_clk_failed:  	pm_runtime_put_sync(dev);  	pm_runtime_disable(dev); -probe_master_put: -	spi_master_put(master);  	return ret;  } diff --git a/drivers/tee/optee/optee_smc.h b/drivers/tee/optee/optee_smc.h index c60896cf71cb..73b5e7760d10 100644 --- a/drivers/tee/optee/optee_smc.h +++ b/drivers/tee/optee/optee_smc.h @@ -189,7 +189,7 @@ struct optee_smc_call_get_os_revision_result {   * Have config return register usage:   * a0	OPTEE_SMC_RETURN_OK   * a1	Physical address of start of SHM - * a2	Size of of SHM + * a2	Size of SHM   * a3	Cache settings of memory, as defined by the   *	OPTEE_SMC_SHM_* values above   * a4-7	Preserved diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 385cb0aee610..a1c1fa1a9c28 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -884,8 +884,8 @@ static int optee_smc_do_call_with_arg(struct tee_context *ctx,  		rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);  		rpc_arg = tee_shm_get_va(shm, offs + rpc_arg_offs); -		if (IS_ERR(arg)) -			return PTR_ERR(arg); +		if (IS_ERR(rpc_arg)) +			return PTR_ERR(rpc_arg);  	}  	if  (rpc_arg && tee_shm_is_dynamic(shm)) { diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index af0f7c603fa4..98da206cd761 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -1073,7 +1073,7 @@ EXPORT_SYMBOL_GPL(tee_device_unregister);  /**   * tee_get_drvdata() - Return driver_data pointer   * @teedev:	Device containing the driver_data pointer - * @returns the driver_data pointer supplied to tee_register(). + * @returns the driver_data pointer supplied to tee_device_alloc().   */  void *tee_get_drvdata(struct tee_device *teedev)  { diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 4b56c39f766d..84b143eef395 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -396,13 +396,15 @@ static void __unmap_grant_pages_done(int result,  	unsigned int offset = data->unmap_ops - map->unmap_ops;  	for (i = 0; i < data->count; i++) { -		WARN_ON(map->unmap_ops[offset+i].status); +		WARN_ON(map->unmap_ops[offset + i].status != GNTST_okay && +			map->unmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);  		pr_debug("unmap handle=%d st=%d\n",  			map->unmap_ops[offset+i].handle,  			map->unmap_ops[offset+i].status);  		map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE;  		if (use_ptemod) { -			WARN_ON(map->kunmap_ops[offset+i].status); +			WARN_ON(map->kunmap_ops[offset + i].status != GNTST_okay && +				map->kunmap_ops[offset + i].handle != INVALID_GRANT_HANDLE);  			pr_debug("kunmap handle=%u st=%d\n",  				 map->kunmap_ops[offset+i].handle,  				 map->kunmap_ops[offset+i].status); diff --git a/fs/afs/file.c b/fs/afs/file.c index 42118a4f3383..d1cfb235c4b9 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -375,7 +375,7 @@ static int afs_begin_cache_operation(struct netfs_io_request *rreq)  }  static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len, -				 struct folio *folio, void **_fsdata) +				 struct folio **foliop, void **_fsdata)  {  	struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 6dee88815491..d6e5916138e4 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -63,7 +63,7 @@  	 (CONGESTION_ON_THRESH(congestion_kb) >> 2))  static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, -					struct folio *folio, void **_fsdata); +					struct folio **foliop, void **_fsdata);  static inline struct ceph_snap_context *page_snap_context(struct page *page)  { @@ -1288,18 +1288,19 @@ ceph_find_incompatible(struct page *page)  }  static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len, -					struct folio *folio, void **_fsdata) +					struct folio **foliop, void **_fsdata)  {  	struct inode *inode = file_inode(file);  	struct ceph_inode_info *ci = ceph_inode(inode);  	struct ceph_snap_context *snapc; -	snapc = ceph_find_incompatible(folio_page(folio, 0)); +	snapc = ceph_find_incompatible(folio_page(*foliop, 0));  	if (snapc) {  		int r; -		folio_unlock(folio); -		folio_put(folio); +		folio_unlock(*foliop); +		folio_put(*foliop); +		*foliop = NULL;  		if (IS_ERR(snapc))  			return PTR_ERR(snapc); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fa29c9aae24b..386bb523c69e 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1918,7 +1918,6 @@ void cifs_put_smb_ses(struct cifs_ses *ses)  	list_del_init(&ses->smb_ses_list);  	spin_unlock(&cifs_tcp_ses_lock); -	spin_lock(&ses->chan_lock);  	chan_count = ses->chan_count;  	/* close any extra channels */ @@ -1934,7 +1933,6 @@ void cifs_put_smb_ses(struct cifs_ses *ses)  			ses->chans[i].server = NULL;  		}  	} -	spin_unlock(&ses->chan_lock);  	sesInfoFree(ses);  	cifs_put_tcp_session(server, 0); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index b85718f32b53..02c8b2906196 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -474,6 +474,14 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,  out:  	if (rc && chan->server) { +		/* +		 * we should avoid race with these delayed works before we +		 * remove this channel +		 */ +		cancel_delayed_work_sync(&chan->server->echo); +		cancel_delayed_work_sync(&chan->server->resolve); +		cancel_delayed_work_sync(&chan->server->reconnect); +  		spin_lock(&ses->chan_lock);  		/* we rely on all bits beyond chan_count to be clear */  		cifs_chan_clear_need_reconnect(ses, chan->server); @@ -484,10 +492,9 @@ out:  		 */  		WARN_ON(ses->chan_count < 1);  		spin_unlock(&ses->chan_lock); -	} -	if (rc && chan->server)  		cifs_put_tcp_session(chan->server, 0); +	}  	return rc;  } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 12b4dddaedb0..c705de32e225 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -571,10 +571,6 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,  	*total_len += ctxt_len;  	pneg_ctxt += ctxt_len; -	build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); -	*total_len += sizeof(struct smb2_posix_neg_context); -	pneg_ctxt += sizeof(struct smb2_posix_neg_context); -  	/*  	 * secondary channels don't have the hostname field populated  	 * use the hostname field in the primary channel instead @@ -586,9 +582,14 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,  					      hostname);  		*total_len += ctxt_len;  		pneg_ctxt += ctxt_len; -		neg_context_count = 4; -	} else /* second channels do not have a hostname */  		neg_context_count = 3; +	} else +		neg_context_count = 2; + +	build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt); +	*total_len += sizeof(struct smb2_posix_neg_context); +	pneg_ctxt += sizeof(struct smb2_posix_neg_context); +	neg_context_count++;  	if (server->compress_algorithm) {  		build_compression_ctxt((struct smb2_compression_capabilities_context *) diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 0a22a2faf552..e1c4617de771 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -176,7 +176,7 @@ nlm_delete_file(struct nlm_file *file)  	}  } -static int nlm_unlock_files(struct nlm_file *file) +static int nlm_unlock_files(struct nlm_file *file, fl_owner_t owner)  {  	struct file_lock lock; @@ -184,6 +184,7 @@ static int nlm_unlock_files(struct nlm_file *file)  	lock.fl_type  = F_UNLCK;  	lock.fl_start = 0;  	lock.fl_end   = OFFSET_MAX; +	lock.fl_owner = owner;  	if (file->f_file[O_RDONLY] &&  	    vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL))  		goto out_err; @@ -225,7 +226,7 @@ again:  		if (match(lockhost, host)) {  			spin_unlock(&flctx->flc_lock); -			if (nlm_unlock_files(file)) +			if (nlm_unlock_files(file, fl->fl_owner))  				return 1;  			goto again;  		} @@ -282,11 +283,10 @@ nlm_file_inuse(struct nlm_file *file)  static void nlm_close_files(struct nlm_file *file)  { -	struct file *f; - -	for (f = file->f_file[0]; f <= file->f_file[1]; f++) -		if (f) -			nlmsvc_ops->fclose(f); +	if (file->f_file[O_RDONLY]) +		nlmsvc_ops->fclose(file->f_file[O_RDONLY]); +	if (file->f_file[O_WRONLY]) +		nlmsvc_ops->fclose(file->f_file[O_WRONLY]);  }  /* diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 42f892c5712e..0ce535852151 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -319,8 +319,9 @@ zero_out:   * conflicting writes once the folio is grabbed and locked.  It is passed a   * pointer to the fsdata cookie that gets returned to the VM to be passed to   * write_end.  It is permitted to sleep.  It should return 0 if the request - * should go ahead; unlock the folio and return -EAGAIN to cause the folio to - * be regot; or return an error. + * should go ahead or it may return an error.  It may also unlock and put the + * folio, provided it sets ``*foliop`` to NULL, in which case a return of 0 + * will cause the folio to be re-got and the process to be retried.   *   * The calling netfs must initialise a netfs context contiguous to the vfs   * inode before calling this. @@ -348,13 +349,13 @@ retry:  	if (ctx->ops->check_write_begin) {  		/* Allow the netfs (eg. ceph) to flush conflicts. */ -		ret = ctx->ops->check_write_begin(file, pos, len, folio, _fsdata); +		ret = ctx->ops->check_write_begin(file, pos, len, &folio, _fsdata);  		if (ret < 0) {  			trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin); -			if (ret == -EAGAIN) -				goto retry;  			goto error;  		} +		if (!folio) +			goto retry;  	}  	if (folio_test_uptodate(folio)) @@ -416,8 +417,10 @@ have_folio_no_wait:  error_put:  	netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);  error: -	folio_unlock(folio); -	folio_put(folio); +	if (folio) { +		folio_unlock(folio); +		folio_put(folio); +	}  	_leave(" = %d", ret);  	return ret;  } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 61b2aae81abb..2acea7792bb2 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -470,6 +470,15 @@ nfsd4_decode_fattr4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen,  			return nfserr_bad_xdr;  		}  	} +	if (bmval[1] & FATTR4_WORD1_TIME_CREATE) { +		struct timespec64 ts; + +		/* No Linux filesystem supports setting this attribute. */ +		bmval[1] &= ~FATTR4_WORD1_TIME_CREATE; +		status = nfsd4_decode_nfstime4(argp, &ts); +		if (status) +			return status; +	}  	if (bmval[1] & FATTR4_WORD1_TIME_MODIFY_SET) {  		u32 set_it; diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 847b482155ae..9a8b09afc173 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -465,7 +465,8 @@ static inline bool nfsd_attrs_supported(u32 minorversion, const u32 *bmval)  	(FATTR4_WORD0_SIZE | FATTR4_WORD0_ACL)  #define NFSD_WRITEABLE_ATTRS_WORD1 \  	(FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \ -	| FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET) +	| FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_CREATE \ +	| FATTR4_WORD1_TIME_MODIFY_SET)  #ifdef CONFIG_NFSD_V4_SECURITY_LABEL  #define MAYBE_FATTR4_WORD2_SECURITY_LABEL \  	FATTR4_WORD2_SECURITY_LABEL diff --git a/fs/remap_range.c b/fs/remap_range.c index 5e0d97e02f96..881a306ee247 100644 --- a/fs/remap_range.c +++ b/fs/remap_range.c @@ -547,7 +547,7 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)  		else if (deduped < 0)  			info->status = deduped;  		else -			info->bytes_deduped = deduped; +			info->bytes_deduped = len;  next_fdput:  		fdput(dst_fd); diff --git a/include/linux/kexec.h b/include/linux/kexec.h index ce6536f1d269..475683cd67f1 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -452,6 +452,12 @@ static inline int kexec_crash_loaded(void) { return 0; }  #define kexec_in_progress false  #endif /* CONFIG_KEXEC_CORE */ +#ifdef CONFIG_KEXEC_SIG +void set_kexec_sig_enforced(void); +#else +static inline void set_kexec_sig_enforced(void) {} +#endif +  #endif /* !defined(__ASSEBMLY__) */  #endif /* LINUX_KEXEC_H */ diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 1773e5df8e65..1b18dfa52e48 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -214,7 +214,7 @@ struct netfs_request_ops {  	void (*issue_read)(struct netfs_io_subrequest *subreq);  	bool (*is_still_valid)(struct netfs_io_request *rreq);  	int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, -				 struct folio *folio, void **_fsdata); +				 struct folio **foliop, void **_fsdata);  	void (*done)(struct netfs_io_request *rreq);  }; diff --git a/include/linux/reset.h b/include/linux/reset.h index 8a21b5756c3e..514ddf003efc 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -731,7 +731,7 @@ static inline int __must_check  devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs,  					       struct reset_control_bulk_data *rstcs)  { -	return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, true); +	return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true);  }  /** diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 6d02e12e4702..80f41446b1f0 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -8462,11 +8462,12 @@ int cfg80211_bss_color_notify(struct net_device *dev, gfp_t gfp,   * cfg80211_obss_color_collision_notify - notify about bss color collision   * @dev: network device   * @color_bitmap: representations of the colors that the local BSS is aware of + * @gfp: allocation flags   */  static inline int cfg80211_obss_color_collision_notify(struct net_device *dev, -						       u64 color_bitmap) +						       u64 color_bitmap, gfp_t gfp)  { -	return cfg80211_bss_color_notify(dev, GFP_KERNEL, +	return cfg80211_bss_color_notify(dev, gfp,  					 NL80211_CMD_OBSS_COLOR_COLLISION,  					 0, color_bitmap);  } diff --git a/include/net/mac80211.h b/include/net/mac80211.h index ebadb2103968..47642b020706 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -6960,10 +6960,11 @@ ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,   * @vif: &struct ieee80211_vif pointer from the add_interface callback.   * @color_bitmap: a 64 bit bitmap representing the colors that the local BSS is   *	aware of. + * @gfp: allocation flags   */  void  ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif, -				       u64 color_bitmap); +				       u64 color_bitmap, gfp_t gfp);  /**   * ieee80211_is_tx_data - check if frame is a data frame diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5c4e5a96a984..64cf655c818c 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -657,18 +657,22 @@ static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)  	tmpl->len = sizeof(struct nft_set_ext);  } -static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id, -					  unsigned int len) +static inline int nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id, +					 unsigned int len)  {  	tmpl->len	 = ALIGN(tmpl->len, nft_set_ext_types[id].align); -	BUG_ON(tmpl->len > U8_MAX); +	if (tmpl->len > U8_MAX) +		return -EINVAL; +  	tmpl->offset[id] = tmpl->len;  	tmpl->len	+= nft_set_ext_types[id].len + len; + +	return 0;  } -static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id) +static inline int nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)  { -	nft_set_ext_add_length(tmpl, id, 0); +	return nft_set_ext_add_length(tmpl, id, 0);  }  static inline void nft_set_ext_init(struct nft_set_ext *ext, diff --git a/include/net/raw.h b/include/net/raw.h index 8ad8df594853..c51a635671a7 100644 --- a/include/net/raw.h +++ b/include/net/raw.h @@ -75,7 +75,7 @@ static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if,  				       int dif, int sdif)  {  #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) -	return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept, +	return inet_bound_dev_eq(READ_ONCE(net->ipv4.sysctl_raw_l3mdev_accept),  				 bound_dev_if, dif, sdif);  #else  	return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); diff --git a/include/net/sock.h b/include/net/sock.h index 72ca97ccb460..9fa54762e077 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1529,7 +1529,7 @@ void __sk_mem_reclaim(struct sock *sk, int amount);  /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */  static inline long sk_prot_mem_limits(const struct sock *sk, int index)  { -	long val = sk->sk_prot->sysctl_mem[index]; +	long val = READ_ONCE(sk->sk_prot->sysctl_mem[index]);  #if PAGE_SIZE > SK_MEM_QUANTUM  	val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT; diff --git a/include/net/tls.h b/include/net/tls.h index 8017f1703447..8bd938f98bdd 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -704,7 +704,7 @@ int tls_sw_fallback_init(struct sock *sk,  			 struct tls_crypto_info *crypto_info);  #ifdef CONFIG_TLS_DEVICE -void tls_device_init(void); +int tls_device_init(void);  void tls_device_cleanup(void);  void tls_device_sk_destruct(struct sock *sk);  int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); @@ -724,7 +724,7 @@ static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)  	return tls_get_ctx(sk)->rx_conf == TLS_HW;  }  #else -static inline void tls_device_init(void) {} +static inline int tls_device_init(void) { return 0; }  static inline void tls_device_cleanup(void) {}  static inline int diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h index 12c315782766..777ee6cbe933 100644 --- a/include/trace/events/sock.h +++ b/include/trace/events/sock.h @@ -98,7 +98,7 @@ TRACE_EVENT(sock_exceed_buf_limit,  	TP_STRUCT__entry(  		__array(char, name, 32) -		__field(long *, sysctl_mem) +		__array(long, sysctl_mem, 3)  		__field(long, allocated)  		__field(int, sysctl_rmem)  		__field(int, rmem_alloc) @@ -110,7 +110,9 @@ TRACE_EVENT(sock_exceed_buf_limit,  	TP_fast_assign(  		strncpy(__entry->name, prot->name, 32); -		__entry->sysctl_mem = prot->sysctl_mem; +		__entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]); +		__entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]); +		__entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]);  		__entry->allocated = allocated;  		__entry->sysctl_rmem = sk_get_rmem0(sk, prot);  		__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index f4009dbdf62d..ef78e0e1a754 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -5222,22 +5222,25 @@ union bpf_attr {   *	Return   *		Nothing. Always succeeds.   * - * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset) + * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags)   *	Description   *		Read *len* bytes from *src* into *dst*, starting from *offset*   *		into *src*. + *		*flags* is currently unused.   *	Return   *		0 on success, -E2BIG if *offset* + *len* exceeds the length - *		of *src*'s data, -EINVAL if *src* is an invalid dynptr. + *		of *src*'s data, -EINVAL if *src* is an invalid dynptr or if + *		*flags* is not 0.   * - * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len) + * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)   *	Description   *		Write *len* bytes from *src* into *dst*, starting from *offset*   *		into *dst*. + *		*flags* is currently unused.   *	Return   *		0 on success, -E2BIG if *offset* + *len* exceeds the length   *		of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* - *		is a read-only dynptr. + *		is a read-only dynptr or if *flags* is not 0.   *   * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)   *	Description diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 5f6f3f829b36..e7961508a47d 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -68,11 +68,13 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns  {  	u8 *ptr = NULL; -	if (k >= SKF_NET_OFF) +	if (k >= SKF_NET_OFF) {  		ptr = skb_network_header(skb) + k - SKF_NET_OFF; -	else if (k >= SKF_LL_OFF) +	} else if (k >= SKF_LL_OFF) { +		if (unlikely(!skb_mac_header_was_set(skb))) +			return NULL;  		ptr = skb_mac_header(skb) + k - SKF_LL_OFF; - +	}  	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))  		return ptr; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 225806a02efb..bb1254f07667 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1497,11 +1497,12 @@ const struct bpf_func_proto bpf_dynptr_from_mem_proto = {  	.arg4_type	= ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,  }; -BPF_CALL_4(bpf_dynptr_read, void *, dst, u32, len, struct bpf_dynptr_kern *, src, u32, offset) +BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, struct bpf_dynptr_kern *, src, +	   u32, offset, u64, flags)  {  	int err; -	if (!src->data) +	if (!src->data || flags)  		return -EINVAL;  	err = bpf_dynptr_check_off_len(src, offset, len); @@ -1521,13 +1522,15 @@ const struct bpf_func_proto bpf_dynptr_read_proto = {  	.arg2_type	= ARG_CONST_SIZE_OR_ZERO,  	.arg3_type	= ARG_PTR_TO_DYNPTR,  	.arg4_type	= ARG_ANYTHING, +	.arg5_type	= ARG_ANYTHING,  }; -BPF_CALL_4(bpf_dynptr_write, struct bpf_dynptr_kern *, dst, u32, offset, void *, src, u32, len) +BPF_CALL_5(bpf_dynptr_write, struct bpf_dynptr_kern *, dst, u32, offset, void *, src, +	   u32, len, u64, flags)  {  	int err; -	if (!dst->data || bpf_dynptr_is_rdonly(dst)) +	if (!dst->data || flags || bpf_dynptr_is_rdonly(dst))  		return -EINVAL;  	err = bpf_dynptr_check_off_len(dst, offset, len); @@ -1547,6 +1550,7 @@ const struct bpf_func_proto bpf_dynptr_write_proto = {  	.arg2_type	= ARG_ANYTHING,  	.arg3_type	= ARG_PTR_TO_MEM | MEM_RDONLY,  	.arg4_type	= ARG_CONST_SIZE_OR_ZERO, +	.arg5_type	= ARG_ANYTHING,  };  BPF_CALL_3(bpf_dynptr_data, struct bpf_dynptr_kern *, ptr, u32, offset, u32, len) diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 145321a5e798..f9261c07b048 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -29,6 +29,15 @@  #include <linux/vmalloc.h>  #include "kexec_internal.h" +#ifdef CONFIG_KEXEC_SIG +static bool sig_enforce = IS_ENABLED(CONFIG_KEXEC_SIG_FORCE); + +void set_kexec_sig_enforced(void) +{ +	sig_enforce = true; +} +#endif +  static int kexec_calculate_store_digests(struct kimage *image);  /* @@ -159,7 +168,7 @@ kimage_validate_signature(struct kimage *image)  					   image->kernel_buf_len);  	if (ret) { -		if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) { +		if (sig_enforce) {  			pr_notice("Enforced kernel signature verification failed (%d).\n", ret);  			return ret;  		} diff --git a/kernel/sysctl.c b/kernel/sysctl.c index e52b6e372c60..35d034219513 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -446,14 +446,14 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp,  		if (*negp) {  			if (*lvalp > (unsigned long) INT_MAX + 1)  				return -EINVAL; -			*valp = -*lvalp; +			WRITE_ONCE(*valp, -*lvalp);  		} else {  			if (*lvalp > (unsigned long) INT_MAX)  				return -EINVAL; -			*valp = *lvalp; +			WRITE_ONCE(*valp, *lvalp);  		}  	} else { -		int val = *valp; +		int val = READ_ONCE(*valp);  		if (val < 0) {  			*negp = true;  			*lvalp = -(unsigned long)val; @@ -472,9 +472,9 @@ static int do_proc_douintvec_conv(unsigned long *lvalp,  	if (write) {  		if (*lvalp > UINT_MAX)  			return -EINVAL; -		*valp = *lvalp; +		WRITE_ONCE(*valp, *lvalp);  	} else { -		unsigned int val = *valp; +		unsigned int val = READ_ONCE(*valp);  		*lvalp = (unsigned long)val;  	}  	return 0; @@ -857,7 +857,7 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp,  		if ((param->min && *param->min > tmp) ||  		    (param->max && *param->max < tmp))  			return -EINVAL; -		*valp = tmp; +		WRITE_ONCE(*valp, tmp);  	}  	return 0; @@ -923,7 +923,7 @@ static int do_proc_douintvec_minmax_conv(unsigned long *lvalp,  		    (param->max && *param->max < tmp))  			return -ERANGE; -		*valp = tmp; +		WRITE_ONCE(*valp, tmp);  	}  	return 0; @@ -1007,13 +1007,13 @@ int proc_dou8vec_minmax(struct ctl_table *table, int write,  	tmp.maxlen = sizeof(val);  	tmp.data = &val; -	val = *data; +	val = READ_ONCE(*data);  	res = do_proc_douintvec(&tmp, write, buffer, lenp, ppos,  				do_proc_douintvec_minmax_conv, ¶m);  	if (res)  		return res;  	if (write) -		*data = val; +		WRITE_ONCE(*data, val);  	return 0;  }  EXPORT_SYMBOL_GPL(proc_dou8vec_minmax); @@ -1090,9 +1090,9 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table,  				err = -EINVAL;  				break;  			} -			*i = val; +			WRITE_ONCE(*i, val);  		} else { -			val = convdiv * (*i) / convmul; +			val = convdiv * READ_ONCE(*i) / convmul;  			if (!first)  				proc_put_char(&buffer, &left, '\t');  			proc_put_long(&buffer, &left, val, false); @@ -1173,9 +1173,12 @@ static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp,  	if (write) {  		if (*lvalp > INT_MAX / HZ)  			return 1; -		*valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ); +		if (*negp) +			WRITE_ONCE(*valp, -*lvalp * HZ); +		else +			WRITE_ONCE(*valp, *lvalp * HZ);  	} else { -		int val = *valp; +		int val = READ_ONCE(*valp);  		unsigned long lval;  		if (val < 0) {  			*negp = true; @@ -1221,9 +1224,9 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,  		if (jif > INT_MAX)  			return 1; -		*valp = (int)jif; +		WRITE_ONCE(*valp, (int)jif);  	} else { -		int val = *valp; +		int val = READ_ONCE(*valp);  		unsigned long lval;  		if (val < 0) {  			*negp = true; @@ -1291,8 +1294,8 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write,   * @ppos: the current position in the file   *   * Reads/writes up to table->maxlen/sizeof(unsigned int) integer - * values from/to the user buffer, treated as an ASCII string.  - * The values read are assumed to be in 1/1000 seconds, and  + * values from/to the user buffer, treated as an ASCII string. + * The values read are assumed to be in 1/1000 seconds, and   * are converted into jiffies.   *   * Returns 0 on success. @@ -2091,6 +2094,17 @@ static struct ctl_table vm_table[] = {  		.extra1		= SYSCTL_ZERO,  		.extra2		= SYSCTL_TWO_HUNDRED,  	}, +#ifdef CONFIG_NUMA +	{ +		.procname	= "numa_stat", +		.data		= &sysctl_vm_numa_stat, +		.maxlen		= sizeof(int), +		.mode		= 0644, +		.proc_handler	= sysctl_vm_numa_stat_handler, +		.extra1		= SYSCTL_ZERO, +		.extra2		= SYSCTL_ONE, +	}, +#endif  #ifdef CONFIG_HUGETLB_PAGE  	{  		.procname	= "nr_hugepages", @@ -2107,15 +2121,6 @@ static struct ctl_table vm_table[] = {  		.mode           = 0644,  		.proc_handler   = &hugetlb_mempolicy_sysctl_handler,  	}, -	{ -		.procname		= "numa_stat", -		.data			= &sysctl_vm_numa_stat, -		.maxlen			= sizeof(int), -		.mode			= 0644, -		.proc_handler	= sysctl_vm_numa_stat_handler, -		.extra1			= SYSCTL_ZERO, -		.extra2			= SYSCTL_ONE, -	},  #endif  	 {  		.procname	= "hugetlb_shm_group", diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index a9f7eb047768..fd15230a703b 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -84,6 +84,9 @@ config UBSAN_SHIFT  config UBSAN_DIV_ZERO  	bool "Perform checking for integer divide-by-zero"  	depends on $(cc-option,-fsanitize=integer-divide-by-zero) +	# https://github.com/ClangBuiltLinux/linux/issues/1657 +	# https://github.com/llvm/llvm-project/issues/56289 +	depends on !CC_IS_CLANG  	help  	  This option enables -fsanitize=integer-divide-by-zero which checks  	  for integer division by zero. This is effectively redundant with the diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c index 53b1955b027f..214532173536 100644 --- a/net/8021q/vlan_netlink.c +++ b/net/8021q/vlan_netlink.c @@ -182,10 +182,14 @@ static int vlan_newlink(struct net *src_net, struct net_device *dev,  	else if (dev->mtu > max_mtu)  		return -EINVAL; +	/* Note: If this initial vlan_changelink() fails, we need +	 * to call vlan_dev_free_egress_priority() to free memory. +	 */  	err = vlan_changelink(dev, tb, data, extack); -	if (err) -		return err; -	err = register_vlan_dev(dev, extack); + +	if (!err) +		err = register_vlan_dev(dev, extack); +  	if (err)  		vlan_dev_free_egress_priority(dev);  	return err; diff --git a/net/core/dev.c b/net/core/dev.c index 8e6f22961206..30a1603a7225 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4863,7 +4863,10 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,  }  /* When doing generic XDP we have to bypass the qdisc layer and the - * network taps in order to match in-driver-XDP behavior. + * network taps in order to match in-driver-XDP behavior. This also means + * that XDP packets are able to starve other packets going through a qdisc, + * and DDOS attacks will be more effective. In-driver-XDP use dedicated TX + * queues, so they do not have this starvation issue.   */  void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)  { @@ -4875,7 +4878,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)  	txq = netdev_core_pick_tx(dev, skb, NULL);  	cpu = smp_processor_id();  	HARD_TX_LOCK(dev, txq, cpu); -	if (!netif_xmit_stopped(txq)) { +	if (!netif_xmit_frozen_or_drv_stopped(txq)) {  		rc = netdev_start_xmit(skb, dev, txq, 0);  		if (dev_xmit_complete(rc))  			free_skb = false; @@ -4883,6 +4886,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)  	HARD_TX_UNLOCK(dev, txq);  	if (free_skb) {  		trace_xdp_exception(dev, xdp_prog, XDP_TX); +		dev_core_stats_tx_dropped_inc(dev);  		kfree_skb(skb);  	}  } diff --git a/net/core/filter.c b/net/core/filter.c index 5d16d66727fc..2a6a0b0ce43e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -6158,7 +6158,6 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len  	if (err)  		return err; -	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));  	skb_set_transport_header(skb, sizeof(struct ipv6hdr));  	return seg6_lookup_nexthop(skb, NULL, 0); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 93da9f783bec..ac67f6b4ec70 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1246,7 +1246,7 @@ static int inet_sk_reselect_saddr(struct sock *sk)  	if (new_saddr == old_saddr)  		return 0; -	if (sock_net(sk)->ipv4.sysctl_ip_dynaddr > 1) { +	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) {  		pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n",  			__func__, &old_saddr, &new_saddr);  	} @@ -1301,7 +1301,7 @@ int inet_sk_rebuild_header(struct sock *sk)  		 * Other protocols have to map its equivalent state to TCP_SYN_SENT.  		 * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme  		 */ -		if (!sock_net(sk)->ipv4.sysctl_ip_dynaddr || +		if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) ||  		    sk->sk_state != TCP_SYN_SENT ||  		    (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||  		    (err = inet_sk_reselect_saddr(sk)) != 0) diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 62d5f99760aa..6cd3b6c559f0 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -239,7 +239,7 @@ static int cipso_v4_cache_check(const unsigned char *key,  	struct cipso_v4_map_cache_entry *prev_entry = NULL;  	u32 hash; -	if (!cipso_v4_cache_enabled) +	if (!READ_ONCE(cipso_v4_cache_enabled))  		return -ENOENT;  	hash = cipso_v4_map_cache_hash(key, key_len); @@ -296,13 +296,14 @@ static int cipso_v4_cache_check(const unsigned char *key,  int cipso_v4_cache_add(const unsigned char *cipso_ptr,  		       const struct netlbl_lsm_secattr *secattr)  { +	int bkt_size = READ_ONCE(cipso_v4_cache_bucketsize);  	int ret_val = -EPERM;  	u32 bkt;  	struct cipso_v4_map_cache_entry *entry = NULL;  	struct cipso_v4_map_cache_entry *old_entry = NULL;  	u32 cipso_ptr_len; -	if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) +	if (!READ_ONCE(cipso_v4_cache_enabled) || bkt_size <= 0)  		return 0;  	cipso_ptr_len = cipso_ptr[1]; @@ -322,7 +323,7 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr,  	bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);  	spin_lock_bh(&cipso_v4_cache[bkt].lock); -	if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { +	if (cipso_v4_cache[bkt].size < bkt_size) {  		list_add(&entry->list, &cipso_v4_cache[bkt].list);  		cipso_v4_cache[bkt].size += 1;  	} else { @@ -1199,7 +1200,8 @@ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,  		/* This will send packets using the "optimized" format when  		 * possible as specified in  section 3.4.2.6 of the  		 * CIPSO draft. */ -		if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) +		if (READ_ONCE(cipso_v4_rbm_optfmt) && ret_val > 0 && +		    ret_val <= 10)  			tag_len = 14;  		else  			tag_len = 4 + ret_val; @@ -1603,7 +1605,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)  			 * all the CIPSO validations here but it doesn't  			 * really specify _exactly_ what we need to validate  			 * ... so, just make it a sysctl tunable. */ -			if (cipso_v4_rbm_strictvalid) { +			if (READ_ONCE(cipso_v4_rbm_strictvalid)) {  				if (cipso_v4_map_lvl_valid(doi_def,  							   tag[3]) < 0) {  					err_offset = opt_iter + 3; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index a57ba23571c9..d9fdcbae16ee 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1230,7 +1230,7 @@ static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh,  	nh->fib_nh_dev = in_dev->dev;  	dev_hold_track(nh->fib_nh_dev, &nh->fib_nh_dev_tracker, GFP_ATOMIC); -	nh->fib_nh_scope = RT_SCOPE_HOST; +	nh->fib_nh_scope = RT_SCOPE_LINK;  	if (!netif_carrier_ok(nh->fib_nh_dev))  		nh->fib_nh_flags |= RTNH_F_LINKDOWN;  	err = 0; @@ -1811,7 +1811,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,  			goto nla_put_failure;  		if (nexthop_is_blackhole(fi->nh))  			rtm->rtm_type = RTN_BLACKHOLE; -		if (!fi->fib_net->ipv4.sysctl_nexthop_compat_mode) +		if (!READ_ONCE(fi->fib_net->ipv4.sysctl_nexthop_compat_mode))  			goto offload;  	} diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 2734c3af7e24..46e8a5125853 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -498,7 +498,7 @@ static void tnode_free(struct key_vector *tn)  		tn = container_of(head, struct tnode, rcu)->kv;  	} -	if (tnode_free_size >= sysctl_fib_sync_mem) { +	if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) {  		tnode_free_size = 0;  		synchronize_rcu();  	} diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index efea0e796f06..57c4f0d87a7a 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -253,11 +253,12 @@ bool icmp_global_allow(void)  	spin_lock(&icmp_global.lock);  	delta = min_t(u32, now - icmp_global.stamp, HZ);  	if (delta >= HZ / 50) { -		incr = sysctl_icmp_msgs_per_sec * delta / HZ ; +		incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ;  		if (incr)  			WRITE_ONCE(icmp_global.stamp, now);  	} -	credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst); +	credit = min_t(u32, icmp_global.credit + incr, +		       READ_ONCE(sysctl_icmp_msgs_burst));  	if (credit) {  		/* We want to use a credit of one in average, but need to randomize  		 * it for security reasons. @@ -281,7 +282,7 @@ static bool icmpv4_mask_allow(struct net *net, int type, int code)  		return true;  	/* Limit if icmp type is enabled in ratemask. */ -	if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask)) +	if (!((1 << type) & READ_ONCE(net->ipv4.sysctl_icmp_ratemask)))  		return true;  	return false; @@ -319,7 +320,8 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,  	vif = l3mdev_master_ifindex(dst->dev);  	peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1); -	rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit); +	rc = inet_peer_xrlim_allow(peer, +				   READ_ONCE(net->ipv4.sysctl_icmp_ratelimit));  	if (peer)  		inet_putpeer(peer);  out: @@ -692,7 +694,7 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,  		rcu_read_lock();  		if (rt_is_input_route(rt) && -		    net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr) +		    READ_ONCE(net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr))  			dev = dev_get_by_index_rcu(net, inet_iif(skb_in));  		if (dev) @@ -932,7 +934,7 @@ static enum skb_drop_reason icmp_unreach(struct sk_buff *skb)  	 *	get the other vendor to fix their kit.  	 */ -	if (!net->ipv4.sysctl_icmp_ignore_bogus_error_responses && +	if (!READ_ONCE(net->ipv4.sysctl_icmp_ignore_bogus_error_responses) &&  	    inet_addr_type_dev_table(net, skb->dev, iph->daddr) == RTN_BROADCAST) {  		net_warn_ratelimited("%pI4 sent an invalid ICMP type %u, code %u error to a broadcast: %pI4 on %s\n",  				     &ip_hdr(skb)->saddr, @@ -992,7 +994,7 @@ static enum skb_drop_reason icmp_echo(struct sk_buff *skb)  	net = dev_net(skb_dst(skb)->dev);  	/* should there be an ICMP stat for ignored echos? */ -	if (net->ipv4.sysctl_icmp_echo_ignore_all) +	if (READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_all))  		return SKB_NOT_DROPPED_YET;  	icmp_param.data.icmph	   = *icmp_hdr(skb); @@ -1027,7 +1029,7 @@ bool icmp_build_probe(struct sk_buff *skb, struct icmphdr *icmphdr)  	u16 ident_len;  	u8 status; -	if (!net->ipv4.sysctl_icmp_echo_enable_probe) +	if (!READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe))  		return false;  	/* We currently only support probing interfaces on the proxy node @@ -1248,7 +1250,7 @@ int icmp_rcv(struct sk_buff *skb)  		 */  		if ((icmph->type == ICMP_ECHO ||  		     icmph->type == ICMP_TIMESTAMP) && -		    net->ipv4.sysctl_icmp_echo_ignore_broadcasts) { +		    READ_ONCE(net->ipv4.sysctl_icmp_echo_ignore_broadcasts)) {  			reason = SKB_DROP_REASON_INVALID_PROTO;  			goto error;  		} diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 0ec501845cb3..47ccc343c9fb 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -156,7 +156,8 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,  {  	struct inet_timewait_sock *tw; -	if (refcount_read(&dr->tw_refcount) - 1 >= dr->sysctl_max_tw_buckets) +	if (refcount_read(&dr->tw_refcount) - 1 >= +	    READ_ONCE(dr->sysctl_max_tw_buckets))  		return NULL;  	tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab, diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index da21dfce24d7..e9fed83e9b3c 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -141,16 +141,20 @@ static void inet_peer_gc(struct inet_peer_base *base,  			 struct inet_peer *gc_stack[],  			 unsigned int gc_cnt)  { +	int peer_threshold, peer_maxttl, peer_minttl;  	struct inet_peer *p;  	__u32 delta, ttl;  	int i; -	if (base->total >= inet_peer_threshold) +	peer_threshold = READ_ONCE(inet_peer_threshold); +	peer_maxttl = READ_ONCE(inet_peer_maxttl); +	peer_minttl = READ_ONCE(inet_peer_minttl); + +	if (base->total >= peer_threshold)  		ttl = 0; /* be aggressive */  	else -		ttl = inet_peer_maxttl -				- (inet_peer_maxttl - inet_peer_minttl) / HZ * -					base->total / inet_peer_threshold * HZ; +		ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * +			base->total / peer_threshold * HZ;  	for (i = 0; i < gc_cnt; i++) {  		p = gc_stack[i]; diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index e459a391e607..853a75a8fbaf 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -1858,7 +1858,7 @@ static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)  		/* __ip6_del_rt does a release, so do a hold here */  		fib6_info_hold(f6i);  		ipv6_stub->ip6_del_rt(net, f6i, -				      !net->ipv4.sysctl_nexthop_compat_mode); +				      !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));  	}  } @@ -2361,7 +2361,8 @@ out:  	if (!rc) {  		nh_base_seq_inc(net);  		nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo); -		if (replace_notify && net->ipv4.sysctl_nexthop_compat_mode) +		if (replace_notify && +		    READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))  			nexthop_replace_notify(net, new_nh, &cfg->nlinfo);  	} diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index f33c31dd7366..b387c4835155 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -273,7 +273,7 @@ bool cookie_ecn_ok(const struct tcp_options_received *tcp_opt,  	if (!ecn_ok)  		return false; -	if (net->ipv4.sysctl_tcp_ecn) +	if (READ_ONCE(net->ipv4.sysctl_tcp_ecn))  		return true;  	return dst_feature(dst, RTAX_FEATURE_ECN); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index cd448cdd3b38..108fd86f2718 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -599,6 +599,8 @@ static struct ctl_table ipv4_net_table[] = {  		.maxlen		= sizeof(u8),  		.mode		= 0644,  		.proc_handler	= proc_dou8vec_minmax, +		.extra1		= SYSCTL_ZERO, +		.extra2		= SYSCTL_ONE  	},  	{  		.procname	= "icmp_echo_enable_probe", @@ -615,6 +617,8 @@ static struct ctl_table ipv4_net_table[] = {  		.maxlen		= sizeof(u8),  		.mode		= 0644,  		.proc_handler	= proc_dou8vec_minmax, +		.extra1		= SYSCTL_ZERO, +		.extra2		= SYSCTL_ONE  	},  	{  		.procname	= "icmp_ignore_bogus_error_responses", @@ -622,6 +626,8 @@ static struct ctl_table ipv4_net_table[] = {  		.maxlen		= sizeof(u8),  		.mode		= 0644,  		.proc_handler	= proc_dou8vec_minmax, +		.extra1		= SYSCTL_ZERO, +		.extra2		= SYSCTL_ONE  	},  	{  		.procname	= "icmp_errors_use_inbound_ifaddr", @@ -629,6 +635,8 @@ static struct ctl_table ipv4_net_table[] = {  		.maxlen		= sizeof(u8),  		.mode		= 0644,  		.proc_handler	= proc_dou8vec_minmax, +		.extra1		= SYSCTL_ZERO, +		.extra2		= SYSCTL_ONE  	},  	{  		.procname	= "icmp_ratelimit", @@ -668,6 +676,8 @@ static struct ctl_table ipv4_net_table[] = {  		.maxlen		= sizeof(u8),  		.mode		= 0644,  		.proc_handler	= proc_dou8vec_minmax, +		.extra1		= SYSCTL_ZERO, +		.extra2		= SYSCTL_TWO,  	},  	{  		.procname	= "tcp_ecn_fallback", @@ -675,6 +685,8 @@ static struct ctl_table ipv4_net_table[] = {  		.maxlen		= sizeof(u8),  		.mode		= 0644,  		.proc_handler	= proc_dou8vec_minmax, +		.extra1		= SYSCTL_ZERO, +		.extra2		= SYSCTL_ONE,  	},  	{  		.procname	= "ip_dynaddr", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 028513d3e2a2..2222dfdde316 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2715,7 +2715,8 @@ static void tcp_orphan_update(struct timer_list *unused)  static bool tcp_too_many_orphans(int shift)  { -	return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans; +	return READ_ONCE(tcp_orphan_cache) << shift > +		READ_ONCE(sysctl_tcp_max_orphans);  }  bool tcp_check_oom(struct sock *sk, int shift) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2e2a9ece9af2..3ec4edc37313 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -6729,7 +6729,7 @@ static void tcp_ecn_create_request(struct request_sock *req,  	ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);  	ecn_ok_dst = dst_feature(dst, DST_FEATURE_ECN_MASK); -	ecn_ok = net->ipv4.sysctl_tcp_ecn || ecn_ok_dst; +	ecn_ok = READ_ONCE(net->ipv4.sysctl_tcp_ecn) || ecn_ok_dst;  	if (((!ect || th->res1) && ecn_ok) || tcp_ca_needs_ecn(listen_sk) ||  	    (ecn_ok_dst & DST_FEATURE_ECN_CA) || diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 1c054431e358..11aa0ab10bba 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -324,7 +324,7 @@ static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)  {  	struct tcp_sock *tp = tcp_sk(sk);  	bool bpf_needs_ecn = tcp_bpf_ca_needs_ecn(sk); -	bool use_ecn = sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 || +	bool use_ecn = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn) == 1 ||  		tcp_ca_needs_ecn(sk) || bpf_needs_ecn;  	if (!use_ecn) { @@ -346,7 +346,7 @@ static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)  static void tcp_ecn_clear_syn(struct sock *sk, struct sk_buff *skb)  { -	if (sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback) +	if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_ecn_fallback))  		/* tp->ecn_flags are cleared at a later point in time when  		 * SYN ACK is ultimatively being received.  		 */ diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 61770220774e..9d92d51c4757 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -925,7 +925,7 @@ static int icmpv6_rcv(struct sk_buff *skb)  		break;  	case ICMPV6_EXT_ECHO_REQUEST:  		if (!net->ipv6.sysctl.icmpv6_echo_ignore_all && -		    net->ipv4.sysctl_icmp_echo_enable_probe) +		    READ_ONCE(net->ipv4.sysctl_icmp_echo_enable_probe))  			icmpv6_echo_reply(skb);  		break; diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 828355710c57..916417944ec8 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -5741,7 +5741,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,  		if (nexthop_is_blackhole(rt->nh))  			rtm->rtm_type = RTN_BLACKHOLE; -		if (net->ipv4.sysctl_nexthop_compat_mode && +		if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) &&  		    rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)  			goto nla_put_failure; diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index d64855010948..e756ba705fd9 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -189,6 +189,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)  	}  #endif +	hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); +  	skb_postpush_rcsum(skb, hdr, tot_len);  	return 0; @@ -241,6 +243,8 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)  	}  #endif +	hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); +  	skb_postpush_rcsum(skb, hdr, sizeof(struct ipv6hdr) + hdrlen);  	return 0; @@ -302,7 +306,6 @@ static int seg6_do_srh(struct sk_buff *skb)  		break;  	} -	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));  	skb_set_transport_header(skb, sizeof(struct ipv6hdr));  	nf_reset_ct(skb); diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index 98a34287439c..2cd4a8d3b30a 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -826,7 +826,6 @@ static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt)  	if (err)  		goto drop; -	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));  	skb_set_transport_header(skb, sizeof(struct ipv6hdr));  	seg6_lookup_nexthop(skb, NULL, 0); @@ -858,7 +857,6 @@ static int input_action_end_b6_encap(struct sk_buff *skb,  	if (err)  		goto drop; -	ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));  	skb_set_transport_header(skb, sizeof(struct ipv6hdr));  	seg6_lookup_nexthop(skb, NULL, 0); diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index f7896f257e1b..4ddf297f40f2 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -4468,14 +4468,14 @@ EXPORT_SYMBOL_GPL(ieee80211_color_change_finish);  void  ieeee80211_obss_color_collision_notify(struct ieee80211_vif *vif, -				       u64 color_bitmap) +				       u64 color_bitmap, gfp_t gfp)  {  	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);  	if (sdata->vif.color_change_active || sdata->vif.csa_active)  		return; -	cfg80211_obss_color_collision_notify(sdata->dev, color_bitmap); +	cfg80211_obss_color_collision_notify(sdata->dev, color_bitmap, gfp);  }  EXPORT_SYMBOL_GPL(ieeee80211_obss_color_collision_notify); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 41531478437c..15a73b7fdd75 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -377,7 +377,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do  	bool cancel_scan;  	struct cfg80211_nan_func *func; +	spin_lock_bh(&local->fq.lock);  	clear_bit(SDATA_STATE_RUNNING, &sdata->state); +	spin_unlock_bh(&local->fq.lock);  	cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;  	if (cancel_scan) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 3c08ae04ddbc..1675f8cb87f1 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3217,7 +3217,8 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx)  				      IEEE80211_HE_OPERATION_BSS_COLOR_MASK);  		if (color == bss_conf->he_bss_color.color)  			ieeee80211_obss_color_collision_notify(&rx->sdata->vif, -							       BIT_ULL(color)); +							       BIT_ULL(color), +							       GFP_ATOMIC);  	}  } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 0e4efc08c762..c425f4fb7c2e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2818,19 +2818,10 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,  	/*  	 * If the skb is shared we need to obtain our own copy.  	 */ -	if (skb_shared(skb)) { -		struct sk_buff *tmp_skb = skb; - -		/* can't happen -- skb is a clone if info_id != 0 */ -		WARN_ON(info_id); - -		skb = skb_clone(skb, GFP_ATOMIC); -		kfree_skb(tmp_skb); - -		if (!skb) { -			ret = -ENOMEM; -			goto free; -		} +	skb = skb_share_check(skb, GFP_ATOMIC); +	if (unlikely(!skb)) { +		ret = -ENOMEM; +		goto free;  	}  	hdr.frame_control = fc; @@ -3539,15 +3530,9 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,  	/* after this point (skb is modified) we cannot return false */ -	if (skb_shared(skb)) { -		struct sk_buff *tmp_skb = skb; - -		skb = skb_clone(skb, GFP_ATOMIC); -		kfree_skb(tmp_skb); - -		if (!skb) -			return true; -	} +	skb = skb_share_check(skb, GFP_ATOMIC); +	if (unlikely(!skb)) +		return true;  	if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&  	    ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb)) @@ -4437,7 +4422,7 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,  				struct net_device *dev, struct sta_info *sta,  				struct ieee80211_key *key, struct sk_buff *skb)  { -	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); +	struct ieee80211_tx_info *info;  	struct ieee80211_local *local = sdata->local;  	struct tid_ampdu_tx *tid_tx;  	u8 tid; @@ -4452,6 +4437,11 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,  	    test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))  		goto out_free; +	skb = skb_share_check(skb, GFP_ATOMIC); +	if (unlikely(!skb)) +		return; + +	info = IEEE80211_SKB_CB(skb);  	memset(info, 0, sizeof(*info));  	ieee80211_aggr_check(sdata, sta, skb); diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 1e26b5235add..dad42d42aa84 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -301,6 +301,9 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)  	local_bh_disable();  	spin_lock(&fq->lock); +	if (!test_bit(SDATA_STATE_RUNNING, &sdata->state)) +		goto out; +  	if (sdata->vif.type == NL80211_IFTYPE_AP)  		ps = &sdata->bss->ps; diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 62c6733e0792..d50480b31750 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -147,8 +147,8 @@ u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,  	bool qos;  	/* all mesh/ocb stations are required to support WME */ -	if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT || -	    sdata->vif.type == NL80211_IFTYPE_OCB) +	if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT || +		    sdata->vif.type == NL80211_IFTYPE_OCB))  		qos = true;  	else if (sta)  		qos = sta->sta.wme; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index cc21fafd9726..21a3ed64226e 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2919,12 +2919,12 @@ static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)  static int mptcp_disconnect(struct sock *sk, int flags)  { -	struct mptcp_subflow_context *subflow; +	struct mptcp_subflow_context *subflow, *tmp;  	struct mptcp_sock *msk = mptcp_sk(sk);  	inet_sk_state_store(sk, TCP_CLOSE); -	mptcp_for_each_subflow(msk, subflow) { +	list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {  		struct sock *ssk = mptcp_subflow_tcp_sock(subflow);  		__mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_FASTCLOSE); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 082a2fd8d85b..369aeabb94fe 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -729,6 +729,9 @@ static void nf_ct_gc_expired(struct nf_conn *ct)  	if (!refcount_inc_not_zero(&ct->ct_general.use))  		return; +	/* load ->status after refcount increase */ +	smp_acquire__after_ctrl_dep(); +  	if (nf_ct_should_gc(ct))  		nf_ct_kill(ct); @@ -795,6 +798,9 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,  		 */  		ct = nf_ct_tuplehash_to_ctrack(h);  		if (likely(refcount_inc_not_zero(&ct->ct_general.use))) { +			/* re-check key after refcount */ +			smp_acquire__after_ctrl_dep(); +  			if (likely(nf_ct_key_equal(h, tuple, zone, net)))  				goto found; @@ -1387,6 +1393,9 @@ static unsigned int early_drop_list(struct net *net,  		if (!refcount_inc_not_zero(&tmp->ct_general.use))  			continue; +		/* load ->ct_net and ->status after refcount increase */ +		smp_acquire__after_ctrl_dep(); +  		/* kill only if still in same netns -- might have moved due to  		 * SLAB_TYPESAFE_BY_RCU rules.  		 * @@ -1536,6 +1545,9 @@ static void gc_worker(struct work_struct *work)  			if (!refcount_inc_not_zero(&tmp->ct_general.use))  				continue; +			/* load ->status after refcount increase */ +			smp_acquire__after_ctrl_dep(); +  			if (gc_worker_skip_ct(tmp)) {  				nf_ct_put(tmp);  				continue; @@ -1775,6 +1787,16 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,  	if (!exp)  		__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC); +	/* Other CPU might have obtained a pointer to this object before it was +	 * released.  Because refcount is 0, refcount_inc_not_zero() will fail. +	 * +	 * After refcount_set(1) it will succeed; ensure that zeroing of +	 * ct->status and the correct ct->net pointer are visible; else other +	 * core might observe CONFIRMED bit which means the entry is valid and +	 * in the hash table, but its not (anymore). +	 */ +	smp_wmb(); +  	/* Now it is going to be associated with an sk_buff, set refcount to 1. */  	refcount_set(&ct->ct_general.use, 1); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 722af5e309ba..f5905b5201a7 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1203,6 +1203,7 @@ restart:  					   hnnode) {  			ct = nf_ct_tuplehash_to_ctrack(h);  			if (nf_ct_is_expired(ct)) { +				/* need to defer nf_ct_kill() until lock is released */  				if (i < ARRAY_SIZE(nf_ct_evict) &&  				    refcount_inc_not_zero(&ct->ct_general.use))  					nf_ct_evict[i++] = ct; diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 6ad7bbc90d38..05895878610c 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -306,6 +306,9 @@ static int ct_seq_show(struct seq_file *s, void *v)  	if (unlikely(!refcount_inc_not_zero(&ct->ct_general.use)))  		return 0; +	/* load ->status after refcount increase */ +	smp_acquire__after_ctrl_dep(); +  	if (nf_ct_should_gc(ct)) {  		nf_ct_kill(ct);  		goto release; diff --git a/net/netfilter/nf_log_syslog.c b/net/netfilter/nf_log_syslog.c index 77bcb10fc586..cb894f0d63e9 100644 --- a/net/netfilter/nf_log_syslog.c +++ b/net/netfilter/nf_log_syslog.c @@ -67,7 +67,7 @@ dump_arp_packet(struct nf_log_buf *m,  	unsigned int logflags;  	struct arphdr _arph; -	ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); +	ah = skb_header_pointer(skb, nhoff, sizeof(_arph), &_arph);  	if (!ah) {  		nf_log_buf_add(m, "TRUNCATED");  		return; @@ -96,7 +96,7 @@ dump_arp_packet(struct nf_log_buf *m,  	    ah->ar_pln != sizeof(__be32))  		return; -	ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp); +	ap = skb_header_pointer(skb, nhoff + sizeof(_arph), sizeof(_arpp), &_arpp);  	if (!ap) {  		nf_log_buf_add(m, " INCOMPLETE [%zu bytes]",  			       skb->len - sizeof(_arph)); @@ -149,7 +149,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf,  	nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,  				  prefix); -	dump_arp_packet(m, loginfo, skb, 0); +	dump_arp_packet(m, loginfo, skb, skb_network_offset(skb));  	nf_log_buf_close(m);  } @@ -850,7 +850,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,  	if (in)  		dump_mac_header(m, loginfo, skb); -	dump_ipv4_packet(net, m, loginfo, skb, 0); +	dump_ipv4_packet(net, m, loginfo, skb, skb_network_offset(skb));  	nf_log_buf_close(m);  } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index d6b59beab3a9..646d5fd53604 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5833,8 +5833,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,  	if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))  		return -EINVAL; -	if (flags != 0) -		nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS); +	if (flags != 0) { +		err = nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS); +		if (err < 0) +			return err; +	}  	if (set->flags & NFT_SET_MAP) {  		if (nla[NFTA_SET_ELEM_DATA] == NULL && @@ -5943,7 +5946,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,  		if (err < 0)  			goto err_set_elem_expr; -		nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); +		err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); +		if (err < 0) +			goto err_parse_key;  	}  	if (nla[NFTA_SET_ELEM_KEY_END]) { @@ -5952,22 +5957,31 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,  		if (err < 0)  			goto err_parse_key; -		nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen); +		err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen); +		if (err < 0) +			goto err_parse_key_end;  	}  	if (timeout > 0) { -		nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION); -		if (timeout != set->timeout) -			nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT); +		err = nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION); +		if (err < 0) +			goto err_parse_key_end; + +		if (timeout != set->timeout) { +			err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT); +			if (err < 0) +				goto err_parse_key_end; +		}  	}  	if (num_exprs) {  		for (i = 0; i < num_exprs; i++)  			size += expr_array[i]->ops->size; -		nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS, -				       sizeof(struct nft_set_elem_expr) + -				       size); +		err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS, +					     sizeof(struct nft_set_elem_expr) + size); +		if (err < 0) +			goto err_parse_key_end;  	}  	if (nla[NFTA_SET_ELEM_OBJREF] != NULL) { @@ -5982,7 +5996,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,  			err = PTR_ERR(obj);  			goto err_parse_key_end;  		} -		nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF); +		err = nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF); +		if (err < 0) +			goto err_parse_key_end;  	}  	if (nla[NFTA_SET_ELEM_DATA] != NULL) { @@ -6016,7 +6032,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,  							  NFT_VALIDATE_NEED);  		} -		nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len); +		err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len); +		if (err < 0) +			goto err_parse_data;  	}  	/* The full maximum length of userdata can exceed the maximum @@ -6026,9 +6044,12 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,  	ulen = 0;  	if (nla[NFTA_SET_ELEM_USERDATA] != NULL) {  		ulen = nla_len(nla[NFTA_SET_ELEM_USERDATA]); -		if (ulen > 0) -			nft_set_ext_add_length(&tmpl, NFT_SET_EXT_USERDATA, -					       ulen); +		if (ulen > 0) { +			err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_USERDATA, +						     ulen); +			if (err < 0) +				goto err_parse_data; +		}  	}  	err = -ENOMEM; @@ -6256,8 +6277,11 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,  	nft_set_ext_prepare(&tmpl); -	if (flags != 0) -		nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS); +	if (flags != 0) { +		err = nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS); +		if (err < 0) +			return err; +	}  	if (nla[NFTA_SET_ELEM_KEY]) {  		err = nft_setelem_parse_key(ctx, set, &elem.key.val, @@ -6265,16 +6289,20 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,  		if (err < 0)  			return err; -		nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); +		err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); +		if (err < 0) +			goto fail_elem;  	}  	if (nla[NFTA_SET_ELEM_KEY_END]) {  		err = nft_setelem_parse_key(ctx, set, &elem.key_end.val,  					    nla[NFTA_SET_ELEM_KEY_END]);  		if (err < 0) -			return err; +			goto fail_elem; -		nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen); +		err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen); +		if (err < 0) +			goto fail_elem_key_end;  	}  	err = -ENOMEM; @@ -6282,7 +6310,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,  				      elem.key_end.val.data, NULL, 0, 0,  				      GFP_KERNEL_ACCOUNT);  	if (elem.priv == NULL) -		goto fail_elem; +		goto fail_elem_key_end;  	ext = nft_set_elem_ext(set, elem.priv);  	if (flags) @@ -6306,6 +6334,8 @@ fail_ops:  	kfree(trans);  fail_trans:  	kfree(elem.priv); +fail_elem_key_end: +	nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);  fail_elem:  	nft_data_release(&elem.key.val, NFT_DATA_VALUE);  	return err; diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index ec6f4b699a2b..ce827e79c66a 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -1419,9 +1419,9 @@ static struct notifier_block tls_dev_notifier = {  	.notifier_call	= tls_dev_event,  }; -void __init tls_device_init(void) +int __init tls_device_init(void)  { -	register_netdevice_notifier(&tls_dev_notifier); +	return register_netdevice_notifier(&tls_dev_notifier);  }  void __exit tls_device_cleanup(void) diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 2ffede463e4a..d80ab3d1764e 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -1048,7 +1048,12 @@ static int __init tls_register(void)  	if (err)  		return err; -	tls_device_init(); +	err = tls_device_init(); +	if (err) { +		unregister_pernet_subsys(&tls_proc_ops); +		return err; +	} +  	tcp_register_ulp(&tcp_tls_ulp_ops);  	return 0; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index ff4d48fcbfb2..607a68911047 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -1031,7 +1031,8 @@ void __cfg80211_port_authorized(struct wireless_dev *wdev, const u8 *bssid)  {  	ASSERT_WDEV_LOCK(wdev); -	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION)) +	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION && +		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))  		return;  	if (WARN_ON(!wdev->current_bss) || diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index a733aff02006..708de9656bbd 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -75,7 +75,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)  {  	long rc;  	const char *algo; -	struct crypto_shash **tfm, *tmp_tfm = NULL; +	struct crypto_shash **tfm, *tmp_tfm;  	struct shash_desc *desc;  	if (type == EVM_XATTR_HMAC) { @@ -120,16 +120,13 @@ unlock:  alloc:  	desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),  			GFP_KERNEL); -	if (!desc) { -		crypto_free_shash(tmp_tfm); +	if (!desc)  		return ERR_PTR(-ENOMEM); -	}  	desc->tfm = *tfm;  	rc = crypto_shash_init(desc);  	if (rc) { -		crypto_free_shash(tmp_tfm);  		kfree(desc);  		return ERR_PTR(rc);  	} diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index cdb84dccd24e..bde74fcecee3 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -514,7 +514,8 @@ int ima_appraise_measurement(enum ima_hooks func,  		goto out;  	} -	status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint); +	status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, +				 rc < 0 ? 0 : rc, iint);  	switch (status) {  	case INTEGRITY_PASS:  	case INTEGRITY_PASS_IMMUTABLE: diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index a7206cc1d7d1..64499056648a 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -205,6 +205,7 @@ out_array:  		crypto_free_shash(ima_algo_array[i].tfm);  	} +	kfree(ima_algo_array);  out:  	crypto_free_shash(ima_shash_tfm);  	return rc; diff --git a/security/integrity/ima/ima_efi.c b/security/integrity/ima/ima_efi.c index 71786d01946f..9db66fe310d4 100644 --- a/security/integrity/ima/ima_efi.c +++ b/security/integrity/ima/ima_efi.c @@ -67,6 +67,8 @@ const char * const *arch_get_ima_policy(void)  	if (IS_ENABLED(CONFIG_IMA_ARCH_POLICY) && arch_ima_get_secureboot()) {  		if (IS_ENABLED(CONFIG_MODULE_SIG))  			set_module_sig_enforced(); +		if (IS_ENABLED(CONFIG_KEXEC_SIG)) +			set_kexec_sig_enforced();  		return sb_arch_rules;  	}  	return NULL; diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c index c877f01a5471..7bf9b1507220 100644 --- a/security/integrity/ima/ima_template_lib.c +++ b/security/integrity/ima/ima_template_lib.c @@ -323,10 +323,10 @@ static int ima_eventdigest_init_common(const u8 *digest, u32 digestsize,  	else  		/*  		 * If digest is NULL, the event being recorded is a violation. -		 * Make room for the digest by increasing the offset of -		 * IMA_DIGEST_SIZE. +		 * Make room for the digest by increasing the offset by the +		 * hash algorithm digest size.  		 */ -		offset += IMA_DIGEST_SIZE; +		offset += hash_digest_size[hash_algo];  	return ima_write_template_field_data(buffer, offset + digestsize,  					     fmt, field_data); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 3e541a4c0423..83ae21a01bbf 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -944,6 +944,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {  	SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),  	SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), +	SND_PCI_QUIRK(0x103c, 0x82b4, "HP ProDesk 600 G3", CXT_FIXUP_HP_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),  	SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),  	SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK), diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 007dd8b5e1f2..2f55bc43bfa9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6901,6 +6901,7 @@ enum {  	ALC298_FIXUP_LENOVO_SPK_VOLUME,  	ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER,  	ALC269_FIXUP_ATIV_BOOK_8, +	ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE,  	ALC221_FIXUP_HP_MIC_NO_PRESENCE,  	ALC256_FIXUP_ASUS_HEADSET_MODE,  	ALC256_FIXUP_ASUS_MIC, @@ -7837,6 +7838,16 @@ static const struct hda_fixup alc269_fixups[] = {  		.chained = true,  		.chain_id = ALC269_FIXUP_NO_SHUTUP  	}, +	[ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE] = { +		.type = HDA_FIXUP_PINS, +		.v.pins = (const struct hda_pintbl[]) { +			{ 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ +			{ 0x1a, 0x01813030 }, /* use as headphone mic, without its own jack detect */ +			{ } +		}, +		.chained = true, +		.chain_id = ALC269_FIXUP_HEADSET_MODE +	},  	[ALC221_FIXUP_HP_MIC_NO_PRESENCE] = {  		.type = HDA_FIXUP_PINS,  		.v.pins = (const struct hda_pintbl[]) { @@ -8886,6 +8897,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {  	SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),  	SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),  	SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC), +	SND_PCI_QUIRK(0x1025, 0x129d, "Acer SWIFT SF313-51", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x1025, 0x1300, "Acer SWIFT SF314-56", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),  	SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), @@ -8895,6 +8907,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {  	SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC),  	SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), +	SND_PCI_QUIRK(0x1028, 0x053c, "Dell Latitude E5430", ALC292_FIXUP_DELL_E7X),  	SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),  	SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),  	SND_PCI_QUIRK(0x1028, 0x05be, "Dell Latitude E6540", ALC292_FIXUP_DELL_E7X), @@ -9010,6 +9023,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {  	SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),  	SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),  	SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), +	SND_PCI_QUIRK(0x103c, 0x2b5e, "HP 288 Pro G2 MT", ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC), @@ -9096,6 +9110,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {  	SND_PCI_QUIRK(0x103c, 0x89c6, "Zbook Fury 17 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),  	SND_PCI_QUIRK(0x103c, 0x89ca, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),  	SND_PCI_QUIRK(0x103c, 0x8a78, "HP Dev One", ALC285_FIXUP_HP_LIMIT_INT_MIC_BOOST), +	SND_PCI_QUIRK(0x103c, 0x8aa0, "HP ProBook 440 G9 (MB 8A9E)", ALC236_FIXUP_HP_GPIO_LED), +	SND_PCI_QUIRK(0x103c, 0x8aa3, "HP ProBook 450 G9 (MB 8AA1)", ALC236_FIXUP_HP_GPIO_LED), +	SND_PCI_QUIRK(0x103c, 0x8aa8, "HP EliteBook 640 G9 (MB 8AA6)", ALC236_FIXUP_HP_GPIO_LED), +	SND_PCI_QUIRK(0x103c, 0x8aab, "HP EliteBook 650 G9 (MB 8AA9)", ALC236_FIXUP_HP_GPIO_LED),  	SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),  	SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),  	SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -9355,6 +9373,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {  	SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),  	SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),  	SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), +	SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC),  	SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),  	SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),  	SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED), @@ -11217,6 +11236,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {  	SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),  	SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB),  	SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), +	SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2),  	SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2),  	SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),  	SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50), diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index e32871b3f68a..7434aeeda292 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c @@ -1760,8 +1760,8 @@ static bool arizona_aif_cfg_changed(struct snd_soc_component *component,  	if (bclk != (val & ARIZONA_AIF1_BCLK_FREQ_MASK))  		return true; -	val = snd_soc_component_read(component, base + ARIZONA_AIF_TX_BCLK_RATE); -	if (lrclk != (val & ARIZONA_AIF1TX_BCPF_MASK)) +	val = snd_soc_component_read(component, base + ARIZONA_AIF_RX_BCLK_RATE); +	if (lrclk != (val & ARIZONA_AIF1RX_BCPF_MASK))  		return true;  	val = snd_soc_component_read(component, base + ARIZONA_AIF_FRAME_CTRL_1); diff --git a/sound/soc/codecs/cs47l92.c b/sound/soc/codecs/cs47l92.c index a1b8dcdb9f7b..444026b7d54b 100644 --- a/sound/soc/codecs/cs47l92.c +++ b/sound/soc/codecs/cs47l92.c @@ -119,7 +119,13 @@ static int cs47l92_put_demux(struct snd_kcontrol *kcontrol,  end:  	snd_soc_dapm_mutex_unlock(dapm); -	return snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); +	ret = snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); +	if (ret < 0) { +		dev_err(madera->dev, "Failed to update demux power state: %d\n", ret); +		return ret; +	} + +	return change;  }  static SOC_ENUM_SINGLE_DECL(cs47l92_outdemux_enum, diff --git a/sound/soc/codecs/max98396.c b/sound/soc/codecs/max98396.c index 56eb62bb041f..34db38812807 100644 --- a/sound/soc/codecs/max98396.c +++ b/sound/soc/codecs/max98396.c @@ -342,12 +342,15 @@ static int max98396_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)  {  	struct snd_soc_component *component = codec_dai->component;  	struct max98396_priv *max98396 = snd_soc_component_get_drvdata(component); -	unsigned int format = 0; +	unsigned int format_mask, format = 0;  	unsigned int bclk_pol = 0;  	int ret, status;  	int reg;  	bool update = false; +	format_mask = MAX98396_PCM_MODE_CFG_FORMAT_MASK | +		      MAX98396_PCM_MODE_CFG_LRCLKEDGE; +  	dev_dbg(component->dev, "%s: fmt 0x%08X\n", __func__, fmt);  	switch (fmt & SND_SOC_DAIFMT_INV_MASK) { @@ -395,7 +398,7 @@ static int max98396_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)  		ret = regmap_read(max98396->regmap, MAX98396_R2041_PCM_MODE_CFG, ®);  		if (ret < 0)  			return -EINVAL; -		if (format != (reg & MAX98396_PCM_BCLKEDGE_BSEL_MASK)) { +		if (format != (reg & format_mask)) {  			update = true;  		} else {  			ret = regmap_read(max98396->regmap, @@ -412,8 +415,7 @@ static int max98396_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)  	regmap_update_bits(max98396->regmap,  			   MAX98396_R2041_PCM_MODE_CFG, -			   MAX98396_PCM_BCLKEDGE_BSEL_MASK, -			   format); +			   format_mask, format);  	regmap_update_bits(max98396->regmap,  			   MAX98396_R2042_PCM_CLK_SETUP, diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c index 69c80d80ed9d..18b3da9211e3 100644 --- a/sound/soc/codecs/rt5640.c +++ b/sound/soc/codecs/rt5640.c @@ -1984,7 +1984,12 @@ static int rt5640_set_bias_level(struct snd_soc_component *component,  		snd_soc_component_write(component, RT5640_PWR_DIG2, 0x0000);  		snd_soc_component_write(component, RT5640_PWR_VOL, 0x0000);  		snd_soc_component_write(component, RT5640_PWR_MIXER, 0x0000); -		snd_soc_component_write(component, RT5640_PWR_ANLG1, 0x0000); +		if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER) +			snd_soc_component_write(component, RT5640_PWR_ANLG1, +				0x0018); +		else +			snd_soc_component_write(component, RT5640_PWR_ANLG1, +				0x0000);  		snd_soc_component_write(component, RT5640_PWR_ANLG2, 0x0000);  		break; @@ -2393,9 +2398,15 @@ static void rt5640_jack_work(struct work_struct *work)  static irqreturn_t rt5640_irq(int irq, void *data)  {  	struct rt5640_priv *rt5640 = data; +	int delay = 0; + +	if (rt5640->jd_src == RT5640_JD_SRC_HDA_HEADER) { +		cancel_delayed_work_sync(&rt5640->jack_work); +		delay = 100; +	}  	if (rt5640->jack) -		queue_delayed_work(system_long_wq, &rt5640->jack_work, 0); +		queue_delayed_work(system_long_wq, &rt5640->jack_work, delay);  	return IRQ_HANDLED;  } @@ -2580,6 +2591,12 @@ static void rt5640_enable_hda_jack_detect(  	snd_soc_component_update_bits(component, RT5640_DUMMY1, 0x400, 0x0); +	snd_soc_component_update_bits(component, RT5640_PWR_ANLG1, +		RT5640_PWR_VREF2, RT5640_PWR_VREF2); +	usleep_range(10000, 15000); +	snd_soc_component_update_bits(component, RT5640_PWR_ANLG1, +		RT5640_PWR_FV2, RT5640_PWR_FV2); +  	rt5640->jack = jack;  	ret = request_irq(rt5640->irq, rt5640_irq, @@ -2696,16 +2713,13 @@ static int rt5640_probe(struct snd_soc_component *component)  	if (device_property_read_u32(component->dev,  				     "realtek,jack-detect-source", &val) == 0) { -		if (val <= RT5640_JD_SRC_GPIO4) { +		if (val <= RT5640_JD_SRC_GPIO4)  			rt5640->jd_src = val << RT5640_JD_SFT; -		} else if (val == RT5640_JD_SRC_HDA_HEADER) { +		else if (val == RT5640_JD_SRC_HDA_HEADER)  			rt5640->jd_src = RT5640_JD_SRC_HDA_HEADER; -			snd_soc_component_update_bits(component, RT5640_DUMMY1, -				0x0300, 0x0); -		} else { +		else  			dev_warn(component->dev, "Warning: Invalid jack-detect-source value: %d, leaving jack-detect disabled\n",  				 val); -		}  	}  	if (!device_property_read_bool(component->dev, "realtek,jack-detect-not-inverted")) diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 2aa48aef6a97..3363d1696ad7 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c @@ -1795,6 +1795,9 @@ static int sgtl5000_i2c_remove(struct i2c_client *client)  {  	struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client); +	regmap_write(sgtl5000->regmap, SGTL5000_CHIP_DIG_POWER, SGTL5000_DIG_POWER_DEFAULT); +	regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, SGTL5000_ANA_POWER_DEFAULT); +  	clk_disable_unprepare(sgtl5000->mclk);  	regulator_bulk_disable(sgtl5000->num_supplies, sgtl5000->supplies);  	regulator_bulk_free(sgtl5000->num_supplies, sgtl5000->supplies); @@ -1802,6 +1805,11 @@ static int sgtl5000_i2c_remove(struct i2c_client *client)  	return 0;  } +static void sgtl5000_i2c_shutdown(struct i2c_client *client) +{ +	sgtl5000_i2c_remove(client); +} +  static const struct i2c_device_id sgtl5000_id[] = {  	{"sgtl5000", 0},  	{}, @@ -1822,6 +1830,7 @@ static struct i2c_driver sgtl5000_i2c_driver = {  	},  	.probe_new = sgtl5000_i2c_probe,  	.remove = sgtl5000_i2c_remove, +	.shutdown = sgtl5000_i2c_shutdown,  	.id_table = sgtl5000_id,  }; diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h index 56ec5863f250..3a808c762299 100644 --- a/sound/soc/codecs/sgtl5000.h +++ b/sound/soc/codecs/sgtl5000.h @@ -80,6 +80,7 @@  /*   * SGTL5000_CHIP_DIG_POWER   */ +#define SGTL5000_DIG_POWER_DEFAULT		0x0000  #define SGTL5000_ADC_EN				0x0040  #define SGTL5000_DAC_EN				0x0020  #define SGTL5000_DAP_POWERUP			0x0010 diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c index d395feffb30b..4cb788f3e5f7 100644 --- a/sound/soc/codecs/tas2764.c +++ b/sound/soc/codecs/tas2764.c @@ -42,10 +42,12 @@ static void tas2764_reset(struct tas2764_priv *tas2764)  		gpiod_set_value_cansleep(tas2764->reset_gpio, 0);  		msleep(20);  		gpiod_set_value_cansleep(tas2764->reset_gpio, 1); +		usleep_range(1000, 2000);  	}  	snd_soc_component_write(tas2764->component, TAS2764_SW_RST,  				TAS2764_RST); +	usleep_range(1000, 2000);  }  static int tas2764_set_bias_level(struct snd_soc_component *component, @@ -107,8 +109,10 @@ static int tas2764_codec_resume(struct snd_soc_component *component)  	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component);  	int ret; -	if (tas2764->sdz_gpio) +	if (tas2764->sdz_gpio) {  		gpiod_set_value_cansleep(tas2764->sdz_gpio, 1); +		usleep_range(1000, 2000); +	}  	ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL,  					    TAS2764_PWR_CTRL_MASK, @@ -131,7 +135,8 @@ static const char * const tas2764_ASI1_src[] = {  };  static SOC_ENUM_SINGLE_DECL( -	tas2764_ASI1_src_enum, TAS2764_TDM_CFG2, 4, tas2764_ASI1_src); +	tas2764_ASI1_src_enum, TAS2764_TDM_CFG2, TAS2764_TDM_CFG2_SCFG_SHIFT, +	tas2764_ASI1_src);  static const struct snd_kcontrol_new tas2764_asi1_mux =  	SOC_DAPM_ENUM("ASI1 Source", tas2764_ASI1_src_enum); @@ -329,20 +334,22 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)  {  	struct snd_soc_component *component = dai->component;  	struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); -	u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0; -	int iface; +	u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0;  	int ret;  	switch (fmt & SND_SOC_DAIFMT_INV_MASK) { +	case SND_SOC_DAIFMT_NB_IF: +		asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; +		fallthrough;  	case SND_SOC_DAIFMT_NB_NF:  		asi_cfg_1 = TAS2764_TDM_CFG1_RX_RISING;  		break; +	case SND_SOC_DAIFMT_IB_IF: +		asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; +		fallthrough;  	case SND_SOC_DAIFMT_IB_NF:  		asi_cfg_1 = TAS2764_TDM_CFG1_RX_FALLING;  		break; -	default: -		dev_err(tas2764->dev, "ASI format Inverse is not found\n"); -		return -EINVAL;  	}  	ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1, @@ -353,13 +360,13 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)  	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {  	case SND_SOC_DAIFMT_I2S: +		asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; +		fallthrough;  	case SND_SOC_DAIFMT_DSP_A: -		iface = TAS2764_TDM_CFG2_SCFG_I2S;  		tdm_rx_start_slot = 1;  		break;  	case SND_SOC_DAIFMT_DSP_B:  	case SND_SOC_DAIFMT_LEFT_J: -		iface = TAS2764_TDM_CFG2_SCFG_LEFT_J;  		tdm_rx_start_slot = 0;  		break;  	default: @@ -368,14 +375,15 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)  		return -EINVAL;  	} -	ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1, -					    TAS2764_TDM_CFG1_MASK, -					    (tdm_rx_start_slot << TAS2764_TDM_CFG1_51_SHIFT)); +	ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG0, +					    TAS2764_TDM_CFG0_FRAME_START, +					    asi_cfg_0);  	if (ret < 0)  		return ret; -	ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG2, -					    TAS2764_TDM_CFG2_SCFG_MASK, iface); +	ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1, +					    TAS2764_TDM_CFG1_MASK, +					    (tdm_rx_start_slot << TAS2764_TDM_CFG1_51_SHIFT));  	if (ret < 0)  		return ret; @@ -501,8 +509,10 @@ static int tas2764_codec_probe(struct snd_soc_component *component)  	tas2764->component = component; -	if (tas2764->sdz_gpio) +	if (tas2764->sdz_gpio) {  		gpiod_set_value_cansleep(tas2764->sdz_gpio, 1); +		usleep_range(1000, 2000); +	}  	tas2764_reset(tas2764); @@ -526,12 +536,12 @@ static int tas2764_codec_probe(struct snd_soc_component *component)  }  static DECLARE_TLV_DB_SCALE(tas2764_digital_tlv, 1100, 50, 0); -static DECLARE_TLV_DB_SCALE(tas2764_playback_volume, -10000, 50, 0); +static DECLARE_TLV_DB_SCALE(tas2764_playback_volume, -10050, 50, 1);  static const struct snd_kcontrol_new tas2764_snd_controls[] = {  	SOC_SINGLE_TLV("Speaker Volume", TAS2764_DVC, 0,  		       TAS2764_DVC_MAX, 1, tas2764_playback_volume), -	SOC_SINGLE_TLV("Amp Gain Volume", TAS2764_CHNL_0, 0, 0x14, 0, +	SOC_SINGLE_TLV("Amp Gain Volume", TAS2764_CHNL_0, 1, 0x14, 0,  		       tas2764_digital_tlv),  }; @@ -556,7 +566,7 @@ static const struct reg_default tas2764_reg_defaults[] = {  	{ TAS2764_SW_RST, 0x00 },  	{ TAS2764_PWR_CTRL, 0x1a },  	{ TAS2764_DVC, 0x00 }, -	{ TAS2764_CHNL_0, 0x00 }, +	{ TAS2764_CHNL_0, 0x28 },  	{ TAS2764_TDM_CFG0, 0x09 },  	{ TAS2764_TDM_CFG1, 0x02 },  	{ TAS2764_TDM_CFG2, 0x0a }, diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h index 67d6fd903c42..f015f22a083b 100644 --- a/sound/soc/codecs/tas2764.h +++ b/sound/soc/codecs/tas2764.h @@ -47,6 +47,7 @@  #define TAS2764_TDM_CFG0_MASK		GENMASK(3, 1)  #define TAS2764_TDM_CFG0_44_1_48KHZ	BIT(3)  #define TAS2764_TDM_CFG0_88_2_96KHZ	(BIT(3) | BIT(1)) +#define TAS2764_TDM_CFG0_FRAME_START	BIT(0)  /* TDM Configuration Reg1 */  #define TAS2764_TDM_CFG1		TAS2764_REG(0X0, 0x09) @@ -66,10 +67,7 @@  #define TAS2764_TDM_CFG2_RXS_16BITS	0x0  #define TAS2764_TDM_CFG2_RXS_24BITS	BIT(0)  #define TAS2764_TDM_CFG2_RXS_32BITS	BIT(1) -#define TAS2764_TDM_CFG2_SCFG_MASK	GENMASK(5, 4) -#define TAS2764_TDM_CFG2_SCFG_I2S	0x0 -#define TAS2764_TDM_CFG2_SCFG_LEFT_J	BIT(4) -#define TAS2764_TDM_CFG2_SCFG_RIGHT_J	BIT(5) +#define TAS2764_TDM_CFG2_SCFG_SHIFT	4  /* TDM Configuration Reg3 */  #define TAS2764_TDM_CFG3		TAS2764_REG(0X0, 0x0c) diff --git a/sound/soc/codecs/tlv320adcx140.c b/sound/soc/codecs/tlv320adcx140.c index b55f0b836932..0b729658fde8 100644 --- a/sound/soc/codecs/tlv320adcx140.c +++ b/sound/soc/codecs/tlv320adcx140.c @@ -33,7 +33,6 @@ struct adcx140_priv {  	bool micbias_vg;  	unsigned int dai_fmt; -	unsigned int tdm_delay;  	unsigned int slot_width;  }; @@ -792,12 +791,13 @@ static int adcx140_set_dai_tdm_slot(struct snd_soc_dai *codec_dai,  {  	struct snd_soc_component *component = codec_dai->component;  	struct adcx140_priv *adcx140 = snd_soc_component_get_drvdata(component); -	unsigned int lsb; -	/* TDM based on DSP mode requires slots to be adjacent */ -	lsb = __ffs(tx_mask); -	if ((lsb + 1) != __fls(tx_mask)) { -		dev_err(component->dev, "Invalid mask, slots must be adjacent\n"); +	/* +	 * The chip itself supports arbitrary masks, but the driver currently +	 * only supports adjacent slots beginning at the first slot. +	 */ +	if (tx_mask != GENMASK(__fls(tx_mask), 0)) { +		dev_err(component->dev, "Only lower adjacent slots are supported\n");  		return -EINVAL;  	} @@ -812,7 +812,6 @@ static int adcx140_set_dai_tdm_slot(struct snd_soc_dai *codec_dai,  		return -EINVAL;  	} -	adcx140->tdm_delay = lsb;  	adcx140->slot_width = slot_width;  	return 0; diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index d9f135200688..3cb7a3eab8c7 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -342,7 +342,7 @@ struct wcd9335_codec {  	struct regulator_bulk_data supplies[WCD9335_MAX_SUPPLY];  	unsigned int rx_port_value[WCD9335_RX_MAX]; -	unsigned int tx_port_value; +	unsigned int tx_port_value[WCD9335_TX_MAX];  	int hph_l_gain;  	int hph_r_gain;  	u32 rx_bias_count; @@ -1334,8 +1334,13 @@ static int slim_tx_mixer_get(struct snd_kcontrol *kc,  	struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kc);  	struct wcd9335_codec *wcd = dev_get_drvdata(dapm->dev); +	struct snd_soc_dapm_widget *widget = snd_soc_dapm_kcontrol_widget(kc); +	struct soc_mixer_control *mixer = +			(struct soc_mixer_control *)kc->private_value; +	int dai_id = widget->shift; +	int port_id = mixer->shift; -	ucontrol->value.integer.value[0] = wcd->tx_port_value; +	ucontrol->value.integer.value[0] = wcd->tx_port_value[port_id] == dai_id;  	return 0;  } @@ -1358,12 +1363,12 @@ static int slim_tx_mixer_put(struct snd_kcontrol *kc,  	case AIF2_CAP:  	case AIF3_CAP:  		/* only add to the list if value not set */ -		if (enable && !(wcd->tx_port_value & BIT(port_id))) { -			wcd->tx_port_value |= BIT(port_id); +		if (enable && wcd->tx_port_value[port_id] != dai_id) { +			wcd->tx_port_value[port_id] = dai_id;  			list_add_tail(&wcd->tx_chs[port_id].list,  					&wcd->dai[dai_id].slim_ch_list); -		} else if (!enable && (wcd->tx_port_value & BIT(port_id))) { -			wcd->tx_port_value &= ~BIT(port_id); +		} else if (!enable && wcd->tx_port_value[port_id] == dai_id) { +			wcd->tx_port_value[port_id] = -1;  			list_del_init(&wcd->tx_chs[port_id].list);  		}  		break; diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c index da2f8998df87..b034df47a5ef 100644 --- a/sound/soc/codecs/wm5102.c +++ b/sound/soc/codecs/wm5102.c @@ -680,12 +680,17 @@ static int wm5102_out_comp_coeff_put(struct snd_kcontrol *kcontrol,  {  	struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);  	struct arizona *arizona = dev_get_drvdata(component->dev->parent); +	uint16_t dac_comp_coeff = get_unaligned_be16(ucontrol->value.bytes.data); +	int ret = 0;  	mutex_lock(&arizona->dac_comp_lock); -	arizona->dac_comp_coeff = get_unaligned_be16(ucontrol->value.bytes.data); +	if (arizona->dac_comp_coeff != dac_comp_coeff) { +		arizona->dac_comp_coeff = dac_comp_coeff; +		ret = 1; +	}  	mutex_unlock(&arizona->dac_comp_lock); -	return 0; +	return ret;  }  static int wm5102_out_comp_switch_get(struct snd_kcontrol *kcontrol, @@ -706,12 +711,20 @@ static int wm5102_out_comp_switch_put(struct snd_kcontrol *kcontrol,  {  	struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);  	struct arizona *arizona = dev_get_drvdata(component->dev->parent); +	struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; +	int ret = 0; + +	if (ucontrol->value.integer.value[0] > mc->max) +		return -EINVAL;  	mutex_lock(&arizona->dac_comp_lock); -	arizona->dac_comp_enabled = ucontrol->value.integer.value[0]; +	if (arizona->dac_comp_enabled != ucontrol->value.integer.value[0]) { +		arizona->dac_comp_enabled = ucontrol->value.integer.value[0]; +		ret = 1; +	}  	mutex_unlock(&arizona->dac_comp_lock); -	return 0; +	return ret;  }  static const char * const wm5102_osr_text[] = { diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c index 00b59fc9b1fe..ab5481187c71 100644 --- a/sound/soc/codecs/wm8998.c +++ b/sound/soc/codecs/wm8998.c @@ -108,6 +108,7 @@ static int wm8998_inmux_put(struct snd_kcontrol *kcontrol,  	struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;  	unsigned int mode_reg, mode_index;  	unsigned int mux, inmode, src_val, mode_val; +	int change, ret;  	mux = ucontrol->value.enumerated.item[0];  	if (mux > 1) @@ -137,14 +138,20 @@ static int wm8998_inmux_put(struct snd_kcontrol *kcontrol,  	snd_soc_component_update_bits(component, mode_reg,  				      ARIZONA_IN1_MODE_MASK, mode_val); -	snd_soc_component_update_bits(component, e->reg, -				      ARIZONA_IN1L_SRC_MASK | -				      ARIZONA_IN1L_SRC_SE_MASK, -				      src_val); +	change = snd_soc_component_update_bits(component, e->reg, +					       ARIZONA_IN1L_SRC_MASK | +					       ARIZONA_IN1L_SRC_SE_MASK, +					       src_val); -	return snd_soc_dapm_mux_update_power(dapm, kcontrol, -					     ucontrol->value.enumerated.item[0], -					     e, NULL); +	ret = snd_soc_dapm_mux_update_power(dapm, kcontrol, +					    ucontrol->value.enumerated.item[0], +					    e, NULL); +	if (ret < 0) { +		dev_err(arizona->dev, "Failed to update demux power state: %d\n", ret); +		return ret; +	} + +	return change;  }  static const char * const wm8998_inmux_texts[] = { diff --git a/sound/soc/generic/audio-graph-card2.c b/sound/soc/generic/audio-graph-card2.c index 77ac4051b827..d34b29a49268 100644 --- a/sound/soc/generic/audio-graph-card2.c +++ b/sound/soc/generic/audio-graph-card2.c @@ -90,12 +90,12 @@ links indicates connection part of CPU side (= A).  			ports@0 {  (X) (A)			mcpu:	port@0 { mcpu0_ep: endpoint { remote-endpoint = <&mcodec0_ep>; }; };  (y)				port@1 { mcpu1_ep: endpoint { remote-endpoint = <&cpu1_ep>; }; }; -(y)				port@1 { mcpu2_ep: endpoint { remote-endpoint = <&cpu2_ep>; }; }; +(y)				port@2 { mcpu2_ep: endpoint { remote-endpoint = <&cpu2_ep>; }; };  			};  			ports@1 {  (X)				port@0 { mcodec0_ep: endpoint { remote-endpoint = <&mcpu0_ep>; }; }; -(y)				port@0 { mcodec1_ep: endpoint { remote-endpoint = <&codec1_ep>; }; }; -(y)				port@1 { mcodec2_ep: endpoint { remote-endpoint = <&codec2_ep>; }; }; +(y)				port@1 { mcodec1_ep: endpoint { remote-endpoint = <&codec1_ep>; }; }; +(y)				port@2 { mcodec2_ep: endpoint { remote-endpoint = <&codec2_ep>; }; };  			};  		};  	}; diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c index 5d67a2c87a1d..4a90a0a5d831 100644 --- a/sound/soc/intel/boards/sof_rt5682.c +++ b/sound/soc/intel/boards/sof_rt5682.c @@ -69,11 +69,10 @@ static unsigned long sof_rt5682_quirk = SOF_RT5682_MCLK_EN |  static int is_legacy_cpu; -static struct snd_soc_jack sof_hdmi[3]; -  struct sof_hdmi_pcm {  	struct list_head head;  	struct snd_soc_dai *codec_dai; +	struct snd_soc_jack hdmi_jack;  	int device;  }; @@ -434,7 +433,6 @@ static int sof_card_late_probe(struct snd_soc_card *card)  	char jack_name[NAME_SIZE];  	struct sof_hdmi_pcm *pcm;  	int err; -	int i = 0;  	/* HDMI is not supported by SOF on Baytrail/CherryTrail */  	if (is_legacy_cpu || !ctx->idisp_codec) @@ -455,17 +453,15 @@ static int sof_card_late_probe(struct snd_soc_card *card)  		snprintf(jack_name, sizeof(jack_name),  			 "HDMI/DP, pcm=%d Jack", pcm->device);  		err = snd_soc_card_jack_new(card, jack_name, -					    SND_JACK_AVOUT, &sof_hdmi[i]); +					    SND_JACK_AVOUT, &pcm->hdmi_jack);  		if (err)  			return err;  		err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device, -					  &sof_hdmi[i]); +					  &pcm->hdmi_jack);  		if (err < 0)  			return err; - -		i++;  	}  	if (sof_rt5682_quirk & SOF_MAX98373_SPEAKER_AMP_PRESENT) { diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c index 2439a574ac2f..deb7b820325e 100644 --- a/sound/soc/intel/skylake/skl-nhlt.c +++ b/sound/soc/intel/skylake/skl-nhlt.c @@ -99,7 +99,6 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,  	struct nhlt_fmt_cfg *fmt_cfg;  	struct wav_fmt_ext *wav_fmt;  	unsigned long rate; -	bool present = false;  	int rate_index = 0;  	u16 channels, bps;  	u8 clk_src; @@ -112,9 +111,12 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,  	if (fmt->fmt_count == 0)  		return; +	fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config;  	for (i = 0; i < fmt->fmt_count; i++) { -		fmt_cfg = &fmt->fmt_config[i]; -		wav_fmt = &fmt_cfg->fmt_ext; +		struct nhlt_fmt_cfg *saved_fmt_cfg = fmt_cfg; +		bool present = false; + +		wav_fmt = &saved_fmt_cfg->fmt_ext;  		channels = wav_fmt->fmt.channels;  		bps = wav_fmt->fmt.bits_per_sample; @@ -132,12 +134,18 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,  		 * derive the rate.  		 */  		for (j = i; j < fmt->fmt_count; j++) { -			fmt_cfg = &fmt->fmt_config[j]; -			wav_fmt = &fmt_cfg->fmt_ext; +			struct nhlt_fmt_cfg *tmp_fmt_cfg = fmt_cfg; + +			wav_fmt = &tmp_fmt_cfg->fmt_ext;  			if ((fs == wav_fmt->fmt.samples_per_sec) && -			   (bps == wav_fmt->fmt.bits_per_sample)) +			   (bps == wav_fmt->fmt.bits_per_sample)) {  				channels = max_t(u16, channels,  						wav_fmt->fmt.channels); +				saved_fmt_cfg = tmp_fmt_cfg; +			} +			/* Move to the next nhlt_fmt_cfg */ +			tmp_fmt_cfg = (struct nhlt_fmt_cfg *)(tmp_fmt_cfg->config.caps + +							      tmp_fmt_cfg->config.size);  		}  		rate = channels * bps * fs; @@ -153,8 +161,11 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,  		/* Fill rate and parent for sclk/sclkfs */  		if (!present) { +			struct nhlt_fmt_cfg *first_fmt_cfg; + +			first_fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config;  			i2s_config_ext = (struct skl_i2s_config_blob_ext *) -						fmt->fmt_config[0].config.caps; +						first_fmt_cfg->config.caps;  			/* MCLK Divider Source Select */  			if (is_legacy_blob(i2s_config_ext->hdr.sig)) { @@ -168,6 +179,9 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,  			parent = skl_get_parent_clk(clk_src); +			/* Move to the next nhlt_fmt_cfg */ +			fmt_cfg = (struct nhlt_fmt_cfg *)(fmt_cfg->config.caps + +							  fmt_cfg->config.size);  			/*  			 * Do not copy the config data if there is no parent  			 * clock available for this clock source select @@ -176,9 +190,9 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks,  				continue;  			sclk[id].rate_cfg[rate_index].rate = rate; -			sclk[id].rate_cfg[rate_index].config = fmt_cfg; +			sclk[id].rate_cfg[rate_index].config = saved_fmt_cfg;  			sclkfs[id].rate_cfg[rate_index].rate = rate; -			sclkfs[id].rate_cfg[rate_index].config = fmt_cfg; +			sclkfs[id].rate_cfg[rate_index].config = saved_fmt_cfg;  			sclk[id].parent_name = parent->name;  			sclkfs[id].parent_name = parent->name; @@ -192,13 +206,13 @@ static void skl_get_mclk(struct skl_dev *skl, struct skl_ssp_clk *mclk,  {  	struct skl_i2s_config_blob_ext *i2s_config_ext;  	struct skl_i2s_config_blob_legacy *i2s_config; -	struct nhlt_specific_cfg *fmt_cfg; +	struct nhlt_fmt_cfg *fmt_cfg;  	struct skl_clk_parent_src *parent;  	u32 clkdiv, div_ratio;  	u8 clk_src; -	fmt_cfg = &fmt->fmt_config[0].config; -	i2s_config_ext = (struct skl_i2s_config_blob_ext *)fmt_cfg->caps; +	fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config; +	i2s_config_ext = (struct skl_i2s_config_blob_ext *)fmt_cfg->config.caps;  	/* MCLK Divider Source Select and divider */  	if (is_legacy_blob(i2s_config_ext->hdr.sig)) { @@ -227,7 +241,7 @@ static void skl_get_mclk(struct skl_dev *skl, struct skl_ssp_clk *mclk,  		return;  	mclk[id].rate_cfg[0].rate = parent->rate/div_ratio; -	mclk[id].rate_cfg[0].config = &fmt->fmt_config[0]; +	mclk[id].rate_cfg[0].config = fmt_cfg;  	mclk[id].parent_name = parent->name;  } diff --git a/sound/soc/qcom/qdsp6/q6apm.c b/sound/soc/qcom/qdsp6/q6apm.c index f424d7aa389a..794019286c70 100644 --- a/sound/soc/qcom/qdsp6/q6apm.c +++ b/sound/soc/qcom/qdsp6/q6apm.c @@ -75,6 +75,7 @@ static struct audioreach_graph *q6apm_get_audioreach_graph(struct q6apm *apm, ui  	id = idr_alloc(&apm->graph_idr, graph, graph_id, graph_id + 1, GFP_KERNEL);  	if (id < 0) {  		dev_err(apm->dev, "Unable to allocate graph id (%d)\n", graph_id); +		kfree(graph->graph);  		kfree(graph);  		mutex_unlock(&apm->lock);  		return ERR_PTR(id); diff --git a/sound/soc/ti/omap-mcbsp-priv.h b/sound/soc/ti/omap-mcbsp-priv.h index 7865cda4bf0a..da519ea1f303 100644 --- a/sound/soc/ti/omap-mcbsp-priv.h +++ b/sound/soc/ti/omap-mcbsp-priv.h @@ -316,8 +316,6 @@ static inline int omap_mcbsp_read(struct omap_mcbsp *mcbsp, u16 reg,  /* Sidetone specific API */  int omap_mcbsp_st_init(struct platform_device *pdev); -void omap_mcbsp_st_cleanup(struct platform_device *pdev); -  int omap_mcbsp_st_start(struct omap_mcbsp *mcbsp);  int omap_mcbsp_st_stop(struct omap_mcbsp *mcbsp); diff --git a/sound/soc/ti/omap-mcbsp-st.c b/sound/soc/ti/omap-mcbsp-st.c index 0bc7d26c660a..7e8179cae92e 100644 --- a/sound/soc/ti/omap-mcbsp-st.c +++ b/sound/soc/ti/omap-mcbsp-st.c @@ -347,7 +347,7 @@ int omap_mcbsp_st_init(struct platform_device *pdev)  	if (!st_data)  		return -ENOMEM; -	st_data->mcbsp_iclk = clk_get(mcbsp->dev, "ick"); +	st_data->mcbsp_iclk = devm_clk_get(mcbsp->dev, "ick");  	if (IS_ERR(st_data->mcbsp_iclk)) {  		dev_warn(mcbsp->dev,  			 "Failed to get ick, sidetone might be broken\n"); @@ -359,7 +359,7 @@ int omap_mcbsp_st_init(struct platform_device *pdev)  	if (!st_data->io_base_st)  		return -ENOMEM; -	ret = sysfs_create_group(&mcbsp->dev->kobj, &sidetone_attr_group); +	ret = devm_device_add_group(mcbsp->dev, &sidetone_attr_group);  	if (ret)  		return ret; @@ -368,16 +368,6 @@ int omap_mcbsp_st_init(struct platform_device *pdev)  	return 0;  } -void omap_mcbsp_st_cleanup(struct platform_device *pdev) -{ -	struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev); - -	if (mcbsp->st_data) { -		sysfs_remove_group(&mcbsp->dev->kobj, &sidetone_attr_group); -		clk_put(mcbsp->st_data->mcbsp_iclk); -	} -} -  static int omap_mcbsp_st_info_volsw(struct snd_kcontrol *kcontrol,  				    struct snd_ctl_elem_info *uinfo)  { diff --git a/sound/soc/ti/omap-mcbsp.c b/sound/soc/ti/omap-mcbsp.c index 4479d74f0a45..9933b33c80ca 100644 --- a/sound/soc/ti/omap-mcbsp.c +++ b/sound/soc/ti/omap-mcbsp.c @@ -702,8 +702,7 @@ static int omap_mcbsp_init(struct platform_device *pdev)  		mcbsp->max_tx_thres = max_thres(mcbsp) - 0x10;  		mcbsp->max_rx_thres = max_thres(mcbsp) - 0x10; -		ret = sysfs_create_group(&mcbsp->dev->kobj, -					 &additional_attr_group); +		ret = devm_device_add_group(mcbsp->dev, &additional_attr_group);  		if (ret) {  			dev_err(mcbsp->dev,  				"Unable to create additional controls\n"); @@ -711,16 +710,7 @@ static int omap_mcbsp_init(struct platform_device *pdev)  		}  	} -	ret = omap_mcbsp_st_init(pdev); -	if (ret) -		goto err_st; - -	return 0; - -err_st: -	if (mcbsp->pdata->buffer_size) -		sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group); -	return ret; +	return omap_mcbsp_st_init(pdev);  }  /* @@ -1431,11 +1421,6 @@ static int asoc_mcbsp_remove(struct platform_device *pdev)  	if (cpu_latency_qos_request_active(&mcbsp->pm_qos_req))  		cpu_latency_qos_remove_request(&mcbsp->pm_qos_req); -	if (mcbsp->pdata->buffer_size) -		sysfs_remove_group(&mcbsp->dev->kobj, &additional_attr_group); - -	omap_mcbsp_st_cleanup(pdev); -  	return 0;  } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index f4009dbdf62d..ef78e0e1a754 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -5222,22 +5222,25 @@ union bpf_attr {   *	Return   *		Nothing. Always succeeds.   * - * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset) + * long bpf_dynptr_read(void *dst, u32 len, struct bpf_dynptr *src, u32 offset, u64 flags)   *	Description   *		Read *len* bytes from *src* into *dst*, starting from *offset*   *		into *src*. + *		*flags* is currently unused.   *	Return   *		0 on success, -E2BIG if *offset* + *len* exceeds the length - *		of *src*'s data, -EINVAL if *src* is an invalid dynptr. + *		of *src*'s data, -EINVAL if *src* is an invalid dynptr or if + *		*flags* is not 0.   * - * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len) + * long bpf_dynptr_write(struct bpf_dynptr *dst, u32 offset, void *src, u32 len, u64 flags)   *	Description   *		Write *len* bytes from *src* into *dst*, starting from *offset*   *		into *dst*. + *		*flags* is currently unused.   *	Return   *		0 on success, -E2BIG if *offset* + *len* exceeds the length   *		of *dst*'s data, -EINVAL if *dst* is an invalid dynptr or if *dst* - *		is a read-only dynptr. + *		is a read-only dynptr or if *flags* is not 0.   *   * void *bpf_dynptr_data(struct bpf_dynptr *ptr, u32 offset, u32 len)   *	Description diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index d811cff73597..0a26c243e6e9 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -140,12 +140,12 @@ int use_after_invalid(void *ctx)  	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(read_data), 0, &ptr); -	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0); +	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);  	bpf_ringbuf_submit_dynptr(&ptr, 0);  	/* this should fail */ -	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0); +	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);  	return 0;  } @@ -338,7 +338,7 @@ int invalid_helper2(void *ctx)  	get_map_val_dynptr(&ptr);  	/* this should fail */ -	bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0); +	bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 8, 0, 0);  	return 0;  } @@ -377,7 +377,7 @@ int invalid_write2(void *ctx)  	memcpy((void *)&ptr + 8, &x, sizeof(x));  	/* this should fail */ -	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0); +	bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);  	bpf_ringbuf_submit_dynptr(&ptr, 0); @@ -473,7 +473,7 @@ int invalid_read2(void *ctx)  	get_map_val_dynptr(&ptr);  	/* this should fail */ -	bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0); +	bpf_dynptr_read(read_data, sizeof(read_data), (void *)&ptr + 1, 0, 0);  	return 0;  } diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index d67be48df4b2..a3a6103c8569 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -43,10 +43,10 @@ int test_read_write(void *ctx)  	bpf_ringbuf_reserve_dynptr(&ringbuf, sizeof(write_data), 0, &ptr);  	/* Write data into the dynptr */ -	err = err ?: bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data)); +	err = bpf_dynptr_write(&ptr, 0, write_data, sizeof(write_data), 0);  	/* Read the data that was written into the dynptr */ -	err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0); +	err = err ?: bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0);  	/* Ensure the data we read matches the data we wrote */  	for (i = 0; i < sizeof(read_data); i++) { diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index a29f79618934..ffc35a22e914 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -36,4 +36,5 @@ test_unix_oob  gro  ioam6_parser  toeplitz +tun  cmsg_sender diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index ddad703ace34..db05b3764b77 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -11,7 +11,7 @@ TEST_PROGS += udpgso_bench.sh fib_rule_tests.sh msg_zerocopy.sh psock_snd.sh  TEST_PROGS += udpgro_bench.sh udpgro.sh test_vxlan_under_vrf.sh reuseport_addr_any.sh  TEST_PROGS += test_vxlan_fdb_changelink.sh so_txtime.sh ipv6_flowlabel.sh  TEST_PROGS += tcp_fastopen_backup_key.sh fcnal-test.sh l2tp.sh traceroute.sh -TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh +TEST_PROGS += fin_ack_lat.sh fib_nexthop_multiprefix.sh fib_nexthops.sh fib_nexthop_nongw.sh  TEST_PROGS += altnames.sh icmp.sh icmp_redirect.sh ip6_gre_headroom.sh  TEST_PROGS += route_localnet.sh  TEST_PROGS += reuseaddr_ports_exhausted.sh diff --git a/tools/testing/selftests/net/fib_nexthop_nongw.sh b/tools/testing/selftests/net/fib_nexthop_nongw.sh new file mode 100755 index 000000000000..b7b928b38ce4 --- /dev/null +++ b/tools/testing/selftests/net/fib_nexthop_nongw.sh @@ -0,0 +1,119 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# ns: h1               | ns: h2 +#   192.168.0.1/24     | +#            eth0      | +#                      |       192.168.1.1/32 +#            veth0 <---|---> veth1 +# Validate source address selection for route without gateway + +PAUSE_ON_FAIL=no +VERBOSE=0 +ret=0 + +################################################################################ +# helpers + +log_test() +{ +	local rc=$1 +	local expected=$2 +	local msg="$3" + +	if [ ${rc} -eq ${expected} ]; then +		printf "TEST: %-60s  [ OK ]\n" "${msg}" +		nsuccess=$((nsuccess+1)) +	else +		ret=1 +		nfail=$((nfail+1)) +		printf "TEST: %-60s  [FAIL]\n" "${msg}" +		if [ "${PAUSE_ON_FAIL}" = "yes" ]; then +			echo +			echo "hit enter to continue, 'q' to quit" +			read a +			[ "$a" = "q" ] && exit 1 +		fi +	fi + +	[ "$VERBOSE" = "1" ] && echo +} + +run_cmd() +{ +	local cmd="$*" +	local out +	local rc + +	if [ "$VERBOSE" = "1" ]; then +		echo "COMMAND: $cmd" +	fi + +	out=$(eval $cmd 2>&1) +	rc=$? +	if [ "$VERBOSE" = "1" -a -n "$out" ]; then +		echo "$out" +	fi + +	[ "$VERBOSE" = "1" ] && echo + +	return $rc +} + +################################################################################ +# config +setup() +{ +	ip netns add h1 +	ip -n h1 link set lo up +	ip netns add h2 +	ip -n h2 link set lo up + +	# Add a fake eth0 to support an ip address +	ip -n h1 link add name eth0 type dummy +	ip -n h1 link set eth0 up +	ip -n h1 address add 192.168.0.1/24 dev eth0 + +	# Configure veths (same @mac, arp off) +	ip -n h1 link add name veth0 type veth peer name veth1 netns h2 +	ip -n h1 link set veth0 up + +	ip -n h2 link set veth1 up + +	# Configure @IP in the peer netns +	ip -n h2 address add 192.168.1.1/32 dev veth1 +	ip -n h2 route add default dev veth1 + +	# Add a nexthop without @gw and use it in a route +	ip -n h1 nexthop add id 1 dev veth0 +	ip -n h1 route add 192.168.1.1 nhid 1 +} + +cleanup() +{ +	ip netns del h1 2>/dev/null +	ip netns del h2 2>/dev/null +} + +trap cleanup EXIT + +################################################################################ +# main + +while getopts :pv o +do +	case $o in +		p) PAUSE_ON_FAIL=yes;; +		v) VERBOSE=1;; +	esac +done + +cleanup +setup + +run_cmd ip -netns h1 route get 192.168.1.1 +log_test $? 0 "nexthop: get route with nexthop without gw" +run_cmd ip netns exec h1 ping -c1 192.168.1.1 +log_test $? 0 "nexthop: ping through nexthop without gw" + +exit $ret diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile index 8f481218a492..57b84e0c879e 100644 --- a/tools/testing/selftests/net/forwarding/Makefile +++ b/tools/testing/selftests/net/forwarding/Makefile @@ -37,6 +37,7 @@ TEST_PROGS = bridge_igmp.sh \  	ipip_hier_gre_key.sh \  	ipip_hier_gre_keys.sh \  	ipip_hier_gre.sh \ +	local_termination.sh \  	loopback.sh \  	mirror_gre_bound.sh \  	mirror_gre_bridge_1d.sh \ @@ -52,6 +53,7 @@ TEST_PROGS = bridge_igmp.sh \  	mirror_gre_vlan_bridge_1q.sh \  	mirror_gre_vlan.sh \  	mirror_vlan.sh \ +	no_forwarding.sh \  	pedit_dsfield.sh \  	pedit_ip.sh \  	pedit_l4port.sh \ diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile index f905d5358e68..48a99e1453e1 100644 --- a/tools/testing/selftests/net/mptcp/Makefile +++ b/tools/testing/selftests/net/mptcp/Makefile @@ -6,7 +6,7 @@ KSFT_KHDR_INSTALL := 1  CFLAGS =  -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include $(KHDR_INCLUDES)  TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \ -	      simult_flows.sh mptcp_sockopt.sh +	      simult_flows.sh mptcp_sockopt.sh userspace_pm.sh  TEST_GEN_FILES = mptcp_connect pm_nl_ctl mptcp_sockopt mptcp_inq  |