diff options
73 files changed, 873 insertions, 375 deletions
@@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 11 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc7 NAME = Fearless Coyote # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/am335x-baltos.dtsi b/arch/arm/boot/dts/am335x-baltos.dtsi index efb5eae290a8..d42b98f15e8b 100644 --- a/arch/arm/boot/dts/am335x-baltos.dtsi +++ b/arch/arm/boot/dts/am335x-baltos.dtsi @@ -371,6 +371,8 @@ phy1: ethernet-phy@1 { reg = <7>; + eee-broken-100tx; + eee-broken-1000t; }; }; diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 9e43c443738a..9ba4b18c0cb2 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts @@ -672,6 +672,7 @@ ti,non-removable; bus-width = <4>; cap-power-off-card; + keep-power-in-suspend; pinctrl-names = "default"; pinctrl-0 = <&mmc2_pins>; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 2c9e56f4aac5..bbfb9d5a70a9 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -283,6 +283,7 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x20013000 0x13000 0 0xffed000>; + bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; linux,pci-domain = <0>; @@ -319,6 +320,7 @@ device_type = "pci"; ranges = <0x81000000 0 0 0x03000 0 0x00010000 0x82000000 0 0x30013000 0x13000 0 0xffed000>; + bus-range = <0x00 0xff>; #interrupt-cells = <1>; num-lanes = <1>; linux,pci-domain = <1>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 8f9a69ca818c..efe53998c961 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -121,7 +121,7 @@ &i2c3 { clock-frequency = <400000>; at24@50 { - compatible = "at24,24c02"; + compatible = "atmel,24c64"; readonly; reg = <0x50>; }; diff --git a/arch/arm/boot/dts/sun8i-a33.dtsi b/arch/arm/boot/dts/sun8i-a33.dtsi index 0467fb365bfc..306af6cadf26 100644 --- a/arch/arm/boot/dts/sun8i-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a33.dtsi @@ -66,12 +66,6 @@ opp-microvolt = <1200000>; clock-latency-ns = <244144>; /* 8 32k periods */ }; - - opp@1200000000 { - opp-hz = /bits/ 64 <1200000000>; - opp-microvolt = <1320000>; - clock-latency-ns = <244144>; /* 8 32k periods */ - }; }; cpus { @@ -81,16 +75,22 @@ operating-points-v2 = <&cpu0_opp_table>; }; + cpu@1 { + operating-points-v2 = <&cpu0_opp_table>; + }; + cpu@2 { compatible = "arm,cortex-a7"; device_type = "cpu"; reg = <2>; + operating-points-v2 = <&cpu0_opp_table>; }; cpu@3 { compatible = "arm,cortex-a7"; device_type = "cpu"; reg = <3>; + operating-points-v2 = <&cpu0_opp_table>; }; }; diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index c4f2ace91ea2..3089d3bfa19b 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -270,6 +270,7 @@ extern const struct smp_operations omap4_smp_ops; extern int omap4_mpuss_init(void); extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state); extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state); +extern u32 omap4_get_cpu1_ns_pa_addr(void); #else static inline int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state) diff --git a/arch/arm/mach-omap2/omap-hotplug.c b/arch/arm/mach-omap2/omap-hotplug.c index d3fb5661bb5d..433db6d0b073 100644 --- a/arch/arm/mach-omap2/omap-hotplug.c +++ b/arch/arm/mach-omap2/omap-hotplug.c @@ -50,7 +50,7 @@ void omap4_cpu_die(unsigned int cpu) omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF); if (omap_secure_apis_support()) - boot_cpu = omap_read_auxcoreboot0(); + boot_cpu = omap_read_auxcoreboot0() >> 9; else boot_cpu = readl_relaxed(base + OMAP_AUX_CORE_BOOT_0) >> 5; diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c index 113ab2dd2ee9..03ec6d307c82 100644 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c @@ -64,6 +64,7 @@ #include "prm-regbits-44xx.h" static void __iomem *sar_base; +static u32 old_cpu1_ns_pa_addr; #if defined(CONFIG_PM) && defined(CONFIG_SMP) @@ -212,6 +213,11 @@ static void __init save_l2x0_context(void) {} #endif +u32 omap4_get_cpu1_ns_pa_addr(void) +{ + return old_cpu1_ns_pa_addr; +} + /** * omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function * The purpose of this function is to manage low power programming @@ -460,22 +466,30 @@ int __init omap4_mpuss_init(void) void __init omap4_mpuss_early_init(void) { unsigned long startup_pa; + void __iomem *ns_pa_addr; - if (!(cpu_is_omap44xx() || soc_is_omap54xx())) + if (!(soc_is_omap44xx() || soc_is_omap54xx())) return; sar_base = omap4_get_sar_ram_base(); - if (cpu_is_omap443x()) + /* Save old NS_PA_ADDR for validity checks later on */ + if (soc_is_omap44xx()) + ns_pa_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET; + else + ns_pa_addr = sar_base + OMAP5_CPU1_WAKEUP_NS_PA_ADDR_OFFSET; + old_cpu1_ns_pa_addr = readl_relaxed(ns_pa_addr); + + if (soc_is_omap443x()) startup_pa = __pa_symbol(omap4_secondary_startup); - else if (cpu_is_omap446x()) + else if (soc_is_omap446x()) startup_pa = __pa_symbol(omap4460_secondary_startup); else if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) startup_pa = __pa_symbol(omap5_secondary_hyp_startup); else startup_pa = __pa_symbol(omap5_secondary_startup); - if (cpu_is_omap44xx()) + if (soc_is_omap44xx()) writel_relaxed(startup_pa, sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET); else diff --git a/arch/arm/mach-omap2/omap-smc.S b/arch/arm/mach-omap2/omap-smc.S index fd90125bffc7..72506e6cf9e7 100644 --- a/arch/arm/mach-omap2/omap-smc.S +++ b/arch/arm/mach-omap2/omap-smc.S @@ -94,6 +94,5 @@ ENTRY(omap_read_auxcoreboot0) ldr r12, =0x103 dsb smc #0 - mov r0, r0, lsr #9 ldmfd sp!, {r2-r12, pc} ENDPROC(omap_read_auxcoreboot0) diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 003353b0b794..3faf454ba487 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -21,6 +21,7 @@ #include <linux/io.h> #include <linux/irqchip/arm-gic.h> +#include <asm/sections.h> #include <asm/smp_scu.h> #include <asm/virt.h> @@ -40,10 +41,14 @@ #define OMAP5_CORE_COUNT 0x2 +#define AUX_CORE_BOOT0_GP_RELEASE 0x020 +#define AUX_CORE_BOOT0_HS_RELEASE 0x200 + struct omap_smp_config { unsigned long cpu1_rstctrl_pa; void __iomem *cpu1_rstctrl_va; void __iomem *scu_base; + void __iomem *wakeupgen_base; void *startup_addr; }; @@ -140,7 +145,6 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) static struct clockdomain *cpu1_clkdm; static bool booted; static struct powerdomain *cpu1_pwrdm; - void __iomem *base = omap_get_wakeupgen_base(); /* * Set synchronisation state between this boot processor @@ -155,9 +159,11 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) * A barrier is added to ensure that write buffer is drained */ if (omap_secure_apis_support()) - omap_modify_auxcoreboot0(0x200, 0xfffffdff); + omap_modify_auxcoreboot0(AUX_CORE_BOOT0_HS_RELEASE, + 0xfffffdff); else - writel_relaxed(0x20, base + OMAP_AUX_CORE_BOOT_0); + writel_relaxed(AUX_CORE_BOOT0_GP_RELEASE, + cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_0); if (!cpu1_clkdm && !cpu1_pwrdm) { cpu1_clkdm = clkdm_lookup("mpu1_clkdm"); @@ -261,9 +267,72 @@ static void __init omap4_smp_init_cpus(void) set_cpu_possible(i, true); } +/* + * For now, just make sure the start-up address is not within the booting + * kernel space as that means we just overwrote whatever secondary_startup() + * code there was. + */ +static bool __init omap4_smp_cpu1_startup_valid(unsigned long addr) +{ + if ((addr >= __pa(PAGE_OFFSET)) && (addr <= __pa(__bss_start))) + return false; + + return true; +} + +/* + * We may need to reset CPU1 before configuring, otherwise kexec boot can end + * up trying to use old kernel startup address or suspend-resume will + * occasionally fail to bring up CPU1 on 4430 if CPU1 fails to enter deeper + * idle states. + */ +static void __init omap4_smp_maybe_reset_cpu1(struct omap_smp_config *c) +{ + unsigned long cpu1_startup_pa, cpu1_ns_pa_addr; + bool needs_reset = false; + u32 released; + + if (omap_secure_apis_support()) + released = omap_read_auxcoreboot0() & AUX_CORE_BOOT0_HS_RELEASE; + else + released = readl_relaxed(cfg.wakeupgen_base + + OMAP_AUX_CORE_BOOT_0) & + AUX_CORE_BOOT0_GP_RELEASE; + if (released) { + pr_warn("smp: CPU1 not parked?\n"); + + return; + } + + cpu1_startup_pa = readl_relaxed(cfg.wakeupgen_base + + OMAP_AUX_CORE_BOOT_1); + cpu1_ns_pa_addr = omap4_get_cpu1_ns_pa_addr(); + + /* Did the configured secondary_startup() get overwritten? */ + if (!omap4_smp_cpu1_startup_valid(cpu1_startup_pa)) + needs_reset = true; + + /* + * If omap4 or 5 has NS_PA_ADDR configured, CPU1 may be in a + * deeper idle state in WFI and will wake to an invalid address. + */ + if ((soc_is_omap44xx() || soc_is_omap54xx()) && + !omap4_smp_cpu1_startup_valid(cpu1_ns_pa_addr)) + needs_reset = true; + + if (!needs_reset || !c->cpu1_rstctrl_va) + return; + + pr_info("smp: CPU1 parked within kernel, needs reset (0x%lx 0x%lx)\n", + cpu1_startup_pa, cpu1_ns_pa_addr); + + writel_relaxed(1, c->cpu1_rstctrl_va); + readl_relaxed(c->cpu1_rstctrl_va); + writel_relaxed(0, c->cpu1_rstctrl_va); +} + static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) { - void __iomem *base = omap_get_wakeupgen_base(); const struct omap_smp_config *c = NULL; if (soc_is_omap443x()) @@ -281,6 +350,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) /* Must preserve cfg.scu_base set earlier */ cfg.cpu1_rstctrl_pa = c->cpu1_rstctrl_pa; cfg.startup_addr = c->startup_addr; + cfg.wakeupgen_base = omap_get_wakeupgen_base(); if (soc_is_dra74x() || soc_is_omap54xx()) { if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) @@ -299,15 +369,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) if (cfg.scu_base) scu_enable(cfg.scu_base); - /* - * Reset CPU1 before configuring, otherwise kexec will - * end up trying to use old kernel startup address. - */ - if (cfg.cpu1_rstctrl_va) { - writel_relaxed(1, cfg.cpu1_rstctrl_va); - readl_relaxed(cfg.cpu1_rstctrl_va); - writel_relaxed(0, cfg.cpu1_rstctrl_va); - } + omap4_smp_maybe_reset_cpu1(&cfg); /* * Write the address of secondary startup routine into the @@ -319,7 +381,7 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) omap_auxcoreboot_addr(__pa_symbol(cfg.startup_addr)); else writel_relaxed(__pa_symbol(cfg.startup_addr), - base + OMAP_AUX_CORE_BOOT_1); + cfg.wakeupgen_base + OMAP_AUX_CORE_BOOT_1); } const struct smp_operations omap4_smp_ops __initconst = { diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index e920dd83e443..f989145480c8 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb, dev_err(dev, "failed to idle\n"); } break; + case BUS_NOTIFY_BIND_DRIVER: + od = to_omap_device(pdev); + if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) && + pm_runtime_status_suspended(dev)) { + od->_driver_status = BUS_NOTIFY_BIND_DRIVER; + pm_runtime_set_active(dev); + } + break; case BUS_NOTIFY_ADD_DEVICE: if (pdev->dev.of_node) omap_device_build_from_dt(pdev); diff --git a/arch/arm/mach-orion5x/Kconfig b/arch/arm/mach-orion5x/Kconfig index 633442ad4e4c..2a7bb6ccdcb7 100644 --- a/arch/arm/mach-orion5x/Kconfig +++ b/arch/arm/mach-orion5x/Kconfig @@ -6,6 +6,7 @@ menuconfig ARCH_ORION5X select GPIOLIB select MVEBU_MBUS select PCI + select PHYLIB if NETDEVICES select PLAT_ORION_LEGACY help Support for the following Marvell Orion 5x series SoCs: diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 9255b6d67ba5..aff6994950ba 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -468,6 +468,7 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, eth_data, &orion_ge11); } +#ifdef CONFIG_ARCH_ORION5X /***************************************************************************** * Ethernet switch ****************************************************************************/ @@ -480,6 +481,9 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d) struct mdio_board_info *bd; unsigned int i; + if (!IS_BUILTIN(CONFIG_PHYLIB)) + return; + for (i = 0; i < ARRAY_SIZE(d->port_names); i++) if (!strcmp(d->port_names[i], "cpu")) break; @@ -493,6 +497,7 @@ void __init orion_ge00_switch_init(struct dsa_chip_data *d) mdiobus_register_board_info(&orion_ge00_switch_board_info, 1); } +#endif /***************************************************************************** * I2C diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 1c64ea2d23f9..0565779e66fa 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi @@ -179,8 +179,10 @@ usbphy: phy@01c19400 { compatible = "allwinner,sun50i-a64-usb-phy"; reg = <0x01c19400 0x14>, + <0x01c1a800 0x4>, <0x01c1b800 0x4>; reg-names = "phy_ctrl", + "pmu0", "pmu1"; clocks = <&ccu CLK_USB_PHY0>, <&ccu CLK_USB_PHY1>; diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 8442727f28d2..cbd4f4af8108 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -39,10 +39,10 @@ #define get_user __get_user #if !defined(CONFIG_64BIT) -#define LDD_USER(ptr) __get_user_asm64(ptr) +#define LDD_USER(val, ptr) __get_user_asm64(val, ptr) #define STD_USER(x, ptr) __put_user_asm64(x, ptr) #else -#define LDD_USER(ptr) __get_user_asm("ldd", ptr) +#define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr) #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) #endif @@ -97,63 +97,87 @@ struct exception_data { " mtsp %0,%%sr2\n\t" \ : : "r"(get_fs()) : ) -#define __get_user(x, ptr) \ -({ \ - register long __gu_err __asm__ ("r8") = 0; \ - register long __gu_val; \ - \ - load_sr2(); \ - switch (sizeof(*(ptr))) { \ - case 1: __get_user_asm("ldb", ptr); break; \ - case 2: __get_user_asm("ldh", ptr); break; \ - case 4: __get_user_asm("ldw", ptr); break; \ - case 8: LDD_USER(ptr); break; \ - default: BUILD_BUG(); break; \ - } \ - \ - (x) = (__force __typeof__(*(ptr))) __gu_val; \ - __gu_err; \ +#define __get_user_internal(val, ptr) \ +({ \ + register long __gu_err __asm__ ("r8") = 0; \ + \ + switch (sizeof(*(ptr))) { \ + case 1: __get_user_asm(val, "ldb", ptr); break; \ + case 2: __get_user_asm(val, "ldh", ptr); break; \ + case 4: __get_user_asm(val, "ldw", ptr); break; \ + case 8: LDD_USER(val, ptr); break; \ + default: BUILD_BUG(); \ + } \ + \ + __gu_err; \ }) -#define __get_user_asm(ldx, ptr) \ +#define __get_user(val, ptr) \ +({ \ + load_sr2(); \ + __get_user_internal(val, ptr); \ +}) + +#define __get_user_asm(val, ldx, ptr) \ +{ \ + register long __gu_val; \ + \ __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ "9:\n" \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err)); + : "r"(ptr), "1"(__gu_err)); \ + \ + (val) = (__force __typeof__(*(ptr))) __gu_val; \ +} #if !defined(CONFIG_64BIT) -#define __get_user_asm64(ptr) \ +#define __get_user_asm64(val, ptr) \ +{ \ + union { \ + unsigned long long l; \ + __typeof__(*(ptr)) t; \ + } __gu_tmp; \ + \ __asm__(" copy %%r0,%R0\n" \ "1: ldw 0(%%sr2,%2),%0\n" \ "2: ldw 4(%%sr2,%2),%R0\n" \ "9:\n" \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ - : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err)); + : "=&r"(__gu_tmp.l), "=r"(__gu_err) \ + : "r"(ptr), "1"(__gu_err)); \ + \ + (val) = __gu_tmp.t; \ +} #endif /* !defined(CONFIG_64BIT) */ -#define __put_user(x, ptr) \ +#define __put_user_internal(x, ptr) \ ({ \ register long __pu_err __asm__ ("r8") = 0; \ __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ \ - load_sr2(); \ switch (sizeof(*(ptr))) { \ - case 1: __put_user_asm("stb", __x, ptr); break; \ - case 2: __put_user_asm("sth", __x, ptr); break; \ - case 4: __put_user_asm("stw", __x, ptr); break; \ - case 8: STD_USER(__x, ptr); break; \ - default: BUILD_BUG(); break; \ - } \ + case 1: __put_user_asm("stb", __x, ptr); break; \ + case 2: __put_user_asm("sth", __x, ptr); break; \ + case 4: __put_user_asm("stw", __x, ptr); break; \ + case 8: STD_USER(__x, ptr); break; \ + default: BUILD_BUG(); \ + } \ \ __pu_err; \ }) +#define __put_user(x, ptr) \ +({ \ + load_sr2(); \ + __put_user_internal(x, ptr); \ +}) + + /* * The "__put_user/kernel_asm()" macros tell gcc they read from memory * instead of writing. This is because they do not write to any memory diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S index f01188c044ee..85c28bb80fb7 100644 --- a/arch/parisc/lib/lusercopy.S +++ b/arch/parisc/lib/lusercopy.S @@ -201,7 +201,7 @@ ENTRY_CFI(pa_memcpy) add dst,len,end /* short copy with less than 16 bytes? */ - cmpib,>>=,n 15,len,.Lbyte_loop + cmpib,COND(>>=),n 15,len,.Lbyte_loop /* same alignment? */ xor src,dst,t0 @@ -216,7 +216,7 @@ ENTRY_CFI(pa_memcpy) /* loop until we are 64-bit aligned */ .Lalign_loop64: extru dst,31,3,t1 - cmpib,=,n 0,t1,.Lcopy_loop_16 + cmpib,=,n 0,t1,.Lcopy_loop_16_start 20: ldb,ma 1(srcspc,src),t1 21: stb,ma t1,1(dstspc,dst) b .Lalign_loop64 @@ -225,6 +225,7 @@ ENTRY_CFI(pa_memcpy) ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) +.Lcopy_loop_16_start: ldi 31,t0 .Lcopy_loop_16: cmpb,COND(>>=),n t0,len,.Lword_loop @@ -267,7 +268,7 @@ ENTRY_CFI(pa_memcpy) /* loop until we are 32-bit aligned */ .Lalign_loop32: extru dst,31,2,t1 - cmpib,=,n 0,t1,.Lcopy_loop_4 + cmpib,=,n 0,t1,.Lcopy_loop_8 20: ldb,ma 1(srcspc,src),t1 21: stb,ma t1,1(dstspc,dst) b .Lalign_loop32 @@ -277,7 +278,7 @@ ENTRY_CFI(pa_memcpy) ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) -.Lcopy_loop_4: +.Lcopy_loop_8: cmpib,COND(>>=),n 15,len,.Lbyte_loop 10: ldw 0(srcspc,src),t1 @@ -299,7 +300,7 @@ ENTRY_CFI(pa_memcpy) ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done) ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done) - b .Lcopy_loop_4 + b .Lcopy_loop_8 ldo -16(len),len .Lbyte_loop: @@ -324,7 +325,7 @@ ENTRY_CFI(pa_memcpy) .Lunaligned_copy: /* align until dst is 32bit-word-aligned */ extru dst,31,2,t1 - cmpib,COND(=),n 0,t1,.Lcopy_dstaligned + cmpib,=,n 0,t1,.Lcopy_dstaligned 20: ldb 0(srcspc,src),t1 ldo 1(src),src 21: stb,ma t1,1(dstspc,dst) @@ -362,7 +363,7 @@ ENTRY_CFI(pa_memcpy) cmpiclr,<> 1,t0,%r0 b,n .Lcase1 .Lcase0: - cmpb,= %r0,len,.Lcda_finish + cmpb,COND(=) %r0,len,.Lcda_finish nop 1: ldw,ma 4(srcspc,src), a3 @@ -376,7 +377,7 @@ ENTRY_CFI(pa_memcpy) 1: ldw,ma 4(srcspc,src), a3 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) ldo -1(len),len - cmpb,=,n %r0,len,.Ldo0 + cmpb,COND(=),n %r0,len,.Ldo0 .Ldo4: 1: ldw,ma 4(srcspc,src), a0 ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) @@ -402,7 +403,7 @@ ENTRY_CFI(pa_memcpy) 1: stw,ma t0, 4(dstspc,dst) ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) ldo -4(len),len - cmpb,<> %r0,len,.Ldo4 + cmpb,COND(<>) %r0,len,.Ldo4 nop .Ldo0: shrpw a2, a3, %sar, t0 @@ -436,14 +437,14 @@ ENTRY_CFI(pa_memcpy) /* fault exception fixup handlers: */ #ifdef CONFIG_64BIT .Lcopy16_fault: -10: b .Lcopy_done - std,ma t1,8(dstspc,dst) + b .Lcopy_done +10: std,ma t1,8(dstspc,dst) ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) #endif .Lcopy8_fault: -10: b .Lcopy_done - stw,ma t1,4(dstspc,dst) + b .Lcopy_done +10: stw,ma t1,4(dstspc,dst) ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) .exit diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 68ac5c7cd982..a59deaef21e5 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -43,7 +43,7 @@ config SPARC select ARCH_HAS_SG_CHAIN select CPU_NO_EFFICIENT_FFS select HAVE_ARCH_HARDENED_USERCOPY - select PROVE_LOCKING_SMALL if PROVE_LOCKING + select LOCKDEP_SMALL if LOCKDEP select ARCH_WANT_RELAX_ORDER config SPARC32 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index ee5273ad918d..7c29d38e6b99 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -461,6 +461,22 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb, pgd_t *pgd; unsigned long next; + addr &= PMD_MASK; + if (addr < floor) { + addr += PMD_SIZE; + if (!addr) + return; + } + if (ceiling) { + ceiling &= PMD_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + end -= PMD_SIZE; + if (addr > end - 1) + return; + pgd = pgd_offset(tlb->mm, addr); do { next = pgd_addr_end(addr, end); diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index 2c1ebeb4d737..529bb4a6487a 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n) * @size: number of bytes to write back * * Write back a cache range using the CLWB (cache line write back) - * instruction. + * instruction. Note that @size is internally rounded up to be cache + * line size aligned. */ static inline void arch_wb_cache_pmem(void *addr, size_t size) { @@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size) clwb(p); } -/* - * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec - * iterators, so for other types (bvec & kvec) we must do a cache write-back. - */ -static inline bool __iter_needs_pmem_wb(struct iov_iter *i) -{ - return iter_is_iovec(i) == false; -} - /** * arch_copy_from_iter_pmem - copy data from an iterator to PMEM * @addr: PMEM destination address @@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes, /* TODO: skip the write-back by always using non-temporal stores */ len = copy_from_iter_nocache(addr, bytes, i); - if (__iter_needs_pmem_wb(i)) + /* + * In the iovec case on x86_64 copy_from_iter_nocache() uses + * non-temporal stores for the bulk of the transfer, but we need + * to manually flush if the transfer is unaligned. A cached + * memory copy is used when destination or size is not naturally + * aligned. That is: + * - Require 8-byte alignment when size is 8 bytes or larger. + * - Require 4-byte alignment when size is 4 bytes. + * + * In the non-iovec case the entire destination needs to be + * flushed. + */ + if (iter_is_iovec(i)) { + unsigned long flushed, dest = (unsigned long) addr; + + if (bytes < 8) { + if (!IS_ALIGNED(dest, 4) || (bytes != 4)) + arch_wb_cache_pmem(addr, 1); + } else { + if (!IS_ALIGNED(dest, 8)) { + dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); + arch_wb_cache_pmem(addr, 1); + } + + flushed = dest - (unsigned long) addr; + if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8)) + arch_wb_cache_pmem(addr + bytes - 1, 1); + } + } else arch_wb_cache_pmem(addr, bytes); return len; diff --git a/crypto/ahash.c b/crypto/ahash.c index e58c4970c22b..826cd7ab4d4a 100644 --- a/crypto/ahash.c +++ b/crypto/ahash.c @@ -32,6 +32,7 @@ struct ahash_request_priv { crypto_completion_t complete; void *data; u8 *result; + u32 flags; void *ubuf[] CRYPTO_MINALIGN_ATTR; }; @@ -253,6 +254,8 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) priv->result = req->result; priv->complete = req->base.complete; priv->data = req->base.data; + priv->flags = req->base.flags; + /* * WARNING: We do not backup req->priv here! The req->priv * is for internal use of the Crypto API and the @@ -267,38 +270,44 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) return 0; } -static void ahash_restore_req(struct ahash_request *req) +static void ahash_restore_req(struct ahash_request *req, int err) { struct ahash_request_priv *priv = req->priv; + if (!err) + memcpy(priv->result, req->result, + crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + /* Restore the original crypto request. */ req->result = priv->result; - req->base.complete = priv->complete; - req->base.data = priv->data; + + ahash_request_set_callback(req, priv->flags, + priv->complete, priv->data); req->priv = NULL; /* Free the req->priv.priv from the ADJUSTED request. */ kzfree(priv); } -static void ahash_op_unaligned_finish(struct ahash_request *req, int err) +static void ahash_notify_einprogress(struct ahash_request *req) { struct ahash_request_priv *priv = req->priv; + struct crypto_async_request oreq; - if (err == -EINPROGRESS) - return; - - if (!err) - memcpy(priv->result, req->result, - crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); + oreq.data = priv->data; - ahash_restore_req(req); + priv->complete(&oreq, -EINPROGRESS); } static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) { struct ahash_request *areq = req->data; + if (err == -EINPROGRESS) { + ahash_notify_einprogress(areq); + return; + } + /* * Restore the original request, see ahash_op_unaligned() for what * goes where. @@ -309,7 +318,7 @@ static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) */ /* First copy req->result into req->priv.result */ - ahash_op_unaligned_finish(areq, err); + ahash_restore_req(areq, err); /* Complete the ORIGINAL request. */ areq->base.complete(&areq->base, err); @@ -325,7 +334,12 @@ static int ahash_op_unaligned(struct ahash_request *req, return err; err = op(req); - ahash_op_unaligned_finish(req, err); + if (err == -EINPROGRESS || + (err == -EBUSY && (ahash_request_flags(req) & + CRYPTO_TFM_REQ_MAY_BACKLOG))) + return err; + + ahash_restore_req(req, err); return err; } @@ -360,25 +374,14 @@ int crypto_ahash_digest(struct ahash_request *req) } EXPORT_SYMBOL_GPL(crypto_ahash_digest); -static void ahash_def_finup_finish2(struct ahash_request *req, int err) +static void ahash_def_finup_done2(struct crypto_async_request *req, int err) { - struct ahash_request_priv *priv = req->priv; + struct ahash_request *areq = req->data; if (err == -EINPROGRESS) return; - if (!err) - memcpy(priv->result, req->result, - crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); - - ahash_restore_req(req); -} - -static void ahash_def_finup_done2(struct crypto_async_request *req, int err) -{ - struct ahash_request *areq = req->data; - - ahash_def_finup_finish2(areq, err); + ahash_restore_req(areq, err); areq->base.complete(&areq->base, err); } @@ -389,11 +392,15 @@ static int ahash_def_finup_finish1(struct ahash_request *req, int err) goto out; req->base.complete = ahash_def_finup_done2; - req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = crypto_ahash_reqtfm(req)->final(req); + if (err == -EINPROGRESS || + (err == -EBUSY && (ahash_request_flags(req) & + CRYPTO_TFM_REQ_MAY_BACKLOG))) + return err; out: - ahash_def_finup_finish2(req, err); + ahash_restore_req(req, err); return err; } @@ -401,7 +408,16 @@ static void ahash_def_finup_done1(struct crypto_async_request *req, int err) { struct ahash_request *areq = req->data; + if (err == -EINPROGRESS) { + ahash_notify_einprogress(areq); + return; + } + + areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + err = ahash_def_finup_finish1(areq, err); + if (areq->priv) + return; areq->base.complete(&areq->base, err); } @@ -416,6 +432,11 @@ static int ahash_def_finup(struct ahash_request *req) return err; err = tfm->update(req); + if (err == -EINPROGRESS || + (err == -EBUSY && (ahash_request_flags(req) & + CRYPTO_TFM_REQ_MAY_BACKLOG))) + return err; + return ahash_def_finup_finish1(req, err); } diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 5a8053758657..ef59d9926ee9 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c @@ -40,6 +40,7 @@ struct aead_async_req { struct aead_async_rsgl first_rsgl; struct list_head list; struct kiocb *iocb; + struct sock *sk; unsigned int tsgls; char iv[]; }; @@ -379,12 +380,10 @@ unlock: static void aead_async_cb(struct crypto_async_request *_req, int err) { - struct sock *sk = _req->data; - struct alg_sock *ask = alg_sk(sk); - struct aead_ctx *ctx = ask->private; - struct crypto_aead *tfm = crypto_aead_reqtfm(&ctx->aead_req); - struct aead_request *req = aead_request_cast(_req); + struct aead_request *req = _req->data; + struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct aead_async_req *areq = GET_ASYM_REQ(req, tfm); + struct sock *sk = areq->sk; struct scatterlist *sg = areq->tsgl; struct aead_async_rsgl *rsgl; struct kiocb *iocb = areq->iocb; @@ -447,11 +446,12 @@ static int aead_recvmsg_async(struct socket *sock, struct msghdr *msg, memset(&areq->first_rsgl, '\0', sizeof(areq->first_rsgl)); INIT_LIST_HEAD(&areq->list); areq->iocb = msg->msg_iocb; + areq->sk = sk; memcpy(areq->iv, ctx->iv, crypto_aead_ivsize(tfm)); aead_request_set_tfm(req, tfm); aead_request_set_ad(req, ctx->aead_assoclen); aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - aead_async_cb, sk); + aead_async_cb, req); used -= ctx->aead_assoclen; /* take over all tx sgls from ctx */ diff --git a/crypto/lrw.c b/crypto/lrw.c index 3ea095adafd9..a8bfae4451bf 100644 --- a/crypto/lrw.c +++ b/crypto/lrw.c @@ -345,6 +345,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err) struct rctx *rctx; rctx = skcipher_request_ctx(req); + + if (err == -EINPROGRESS) { + if (rctx->left != req->cryptlen) + return; + goto out; + } + subreq = &rctx->subreq; subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; @@ -352,6 +359,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err) if (rctx->left) return; +out: skcipher_request_complete(req, err); } @@ -389,6 +397,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err) struct rctx *rctx; rctx = skcipher_request_ctx(req); + + if (err == -EINPROGRESS) { + if (rctx->left != req->cryptlen) + return; + goto out; + } + subreq = &rctx->subreq; subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; @@ -396,6 +411,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err) if (rctx->left) return; +out: skcipher_request_complete(req, err); } diff --git a/crypto/xts.c b/crypto/xts.c index c976bfac29da..89ace5ebc2da 100644 --- a/crypto/xts.c +++ b/crypto/xts.c @@ -286,6 +286,13 @@ static void encrypt_done(struct crypto_async_request *areq, int err) struct rctx *rctx; rctx = skcipher_request_ctx(req); + + if (err == -EINPROGRESS) { + if (rctx->left != req->cryptlen) + return; + goto out; + } + subreq = &rctx->subreq; subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; @@ -293,6 +300,7 @@ static void encrypt_done(struct crypto_async_request *areq, int err) if (rctx->left) return; +out: skcipher_request_complete(req, err); } @@ -330,6 +338,13 @@ static void decrypt_done(struct crypto_async_request *areq, int err) struct rctx *rctx; rctx = skcipher_request_ctx(req); + + if (err == -EINPROGRESS) { + if (rctx->left != req->cryptlen) + return; + goto out; + } + subreq = &rctx->subreq; subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; @@ -337,6 +352,7 @@ static void decrypt_done(struct crypto_async_request *areq, int err) if (rctx->left) return; +out: skcipher_request_complete(req, err); } diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 662036bdc65e..c8ea9d698cd0 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1) const struct nfit_set_info_map *map0 = m0; const struct nfit_set_info_map *map1 = m1; - return map0->region_offset - map1->region_offset; + if (map0->region_offset < map1->region_offset) + return -1; + else if (map0->region_offset > map1->region_offset) + return 1; + return 0; } /* Retrieve the nth entry referencing this spa */ diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig index 3e2ab3b14eea..9e95bf94eb13 100644 --- a/drivers/dax/Kconfig +++ b/drivers/dax/Kconfig @@ -2,6 +2,7 @@ menuconfig DEV_DAX tristate "DAX: direct access to differentiated memory" default m if NVDIMM_DAX depends on TRANSPARENT_HUGEPAGE + select SRCU help Support raw access to differentiated (persistence, bandwidth, latency...) memory via an mmap(2) capable character diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 80c6db279ae1..806f180c80d8 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c @@ -25,6 +25,7 @@ #include "dax.h" static dev_t dax_devt; +DEFINE_STATIC_SRCU(dax_srcu); static struct class *dax_class; static DEFINE_IDA(dax_minor_ida); static int nr_dax = CONFIG_NR_DEV_DAX; @@ -60,7 +61,7 @@ struct dax_region { * @region - parent region * @dev - device backing the character device * @cdev - core chardev data - * @alive - !alive + rcu grace period == no new mappings can be established + * @alive - !alive + srcu grace period == no new mappings can be established * @id - child id in the region * @num_resources - number of physical address extents in this device * @res - array of physical address ranges @@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) static int dax_dev_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size) { - int rc; + int rc, id; struct file *filp = vmf->vma->vm_file; struct dax_dev *dax_dev = filp->private_data; @@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf, ? "write" : "read", vmf->vma->vm_start, vmf->vma->vm_end); - rcu_read_lock(); + id = srcu_read_lock(&dax_srcu); switch (pe_size) { case PE_SIZE_PTE: rc = __dax_dev_pte_fault(dax_dev, vmf); @@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf, default: return VM_FAULT_FALLBACK; } - rcu_read_unlock(); + srcu_read_unlock(&dax_srcu, id); return rc; } @@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev) * Note, rcu is not protecting the liveness of dax_dev, rcu is * ensuring that any fault handlers that might have seen * dax_dev->alive == true, have completed. Any fault handlers - * that start after synchronize_rcu() has started will abort + * that start after synchronize_srcu() has started will abort * upon seeing dax_dev->alive == false. */ dax_dev->alive = false; - synchronize_rcu(); + synchronize_srcu(&dax_srcu); unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1); cdev_del(cdev); device_unregister(dev); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 64a1095e4d14..a0ca68ce3fbb 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -134,6 +134,7 @@ static void set_max_bgx_per_node(struct pci_dev *pdev) pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid); switch (sdevid) { case PCI_SUBSYS_DEVID_81XX_BGX: + case PCI_SUBSYS_DEVID_81XX_RGX: max_bgx_per_node = MAX_BGX_PER_CN81XX; break; case PCI_SUBSYS_DEVID_83XX_BGX: diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index c5080f2cead5..6b7fe6fdd13b 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h @@ -16,6 +16,7 @@ /* Subsystem device IDs */ #define PCI_SUBSYS_DEVID_88XX_BGX 0xA126 #define PCI_SUBSYS_DEVID_81XX_BGX 0xA226 +#define PCI_SUBSYS_DEVID_81XX_RGX 0xA254 #define PCI_SUBSYS_DEVID_83XX_BGX 0xA326 #define MAX_BGX_THUNDER 8 /* Max 2 nodes, 4 per node */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index d81d3b6dfd87..16f97552ae98 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -613,7 +613,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; struct mtk_tx_dma *itxd, *txd; - struct mtk_tx_buf *tx_buf; + struct mtk_tx_buf *itx_buf, *tx_buf; dma_addr_t mapped_addr; unsigned int nr_frags; int i, n_desc = 1; @@ -627,8 +627,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT; txd4 |= fport; - tx_buf = mtk_desc_to_tx_buf(ring, itxd); - memset(tx_buf, 0, sizeof(*tx_buf)); + itx_buf = mtk_desc_to_tx_buf(ring, itxd); + memset(itx_buf, 0, sizeof(*itx_buf)); if (gso) txd4 |= TX_DMA_TSO; @@ -647,9 +647,11 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, return -ENOMEM; WRITE_ONCE(itxd->txd1, mapped_addr); - tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; - dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); - dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); + itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; + itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : + MTK_TX_FLAGS_FPORT1; + dma_unmap_addr_set(itx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(itx_buf, dma_len0, skb_headlen(skb)); /* TX SG offload */ txd = itxd; @@ -685,11 +687,13 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, last_frag * TX_DMA_LS0)); WRITE_ONCE(txd->txd4, fport); - tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; tx_buf = mtk_desc_to_tx_buf(ring, txd); memset(tx_buf, 0, sizeof(*tx_buf)); - + tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC; tx_buf->flags |= MTK_TX_FLAGS_PAGE0; + tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : + MTK_TX_FLAGS_FPORT1; + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); frag_size -= frag_map_size; @@ -698,7 +702,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, } /* store skb to cleanup */ - tx_buf->skb = skb; + itx_buf->skb = skb; WRITE_ONCE(itxd->txd4, txd4); WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) | @@ -1012,17 +1016,16 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget) while ((cpu != dma) && budget) { u32 next_cpu = desc->txd2; - int mac; + int mac = 0; desc = mtk_qdma_phys_to_virt(ring, desc->txd2); if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) break; - mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) & - TX_DMA_FPORT_MASK; - mac--; - tx_buf = mtk_desc_to_tx_buf(ring, desc); + if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) + mac = 1; + skb = tx_buf->skb; if (!skb) { condition = 1; diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 996024d02668..3c46a3b613b9 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -410,12 +410,18 @@ struct mtk_hw_stats { struct u64_stats_sync syncp; }; -/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how - * memory was allocated so that it can be freed properly - */ enum mtk_tx_flags { + /* PDMA descriptor can point at 1-2 segments. This enum allows us to + * track how memory was allocated so that it can be freed properly. + */ MTK_TX_FLAGS_SINGLE0 = 0x01, MTK_TX_FLAGS_PAGE0 = 0x02, + + /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted + * SKB out instead of looking up through hardware TX descriptor. + */ + MTK_TX_FLAGS_FPORT0 = 0x04, + MTK_TX_FLAGS_FPORT1 = 0x08, }; /* This enum allows us to identify how the clock is defined on the array of the diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 54248775f227..f68c4db656ed 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1127,12 +1127,70 @@ static struct mdiobb_ops bb_ops = { .get_mdio_data = sh_get_mdio, }; +/* free Tx skb function */ +static int sh_eth_tx_free(struct net_device *ndev, bool sent_only) +{ + struct sh_eth_private *mdp = netdev_priv(ndev); + struct sh_eth_txdesc *txdesc; + int free_num = 0; + int entry; + bool sent; + + for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { + entry = mdp->dirty_tx % mdp->num_tx_ring; + txdesc = &mdp->tx_ring[entry]; + sent = !(txdesc->status & cpu_to_le32(TD_TACT)); + if (sent_only && !sent) + break; + /* TACT bit must be checked before all the following reads */ + dma_rmb(); + netif_info(mdp, tx_done, ndev, + "tx entry %d status 0x%08x\n", + entry, le32_to_cpu(txdesc->status)); + /* Free the original skb. */ + if (mdp->tx_skbuff[entry]) { + dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), + le32_to_cpu(txdesc->len) >> 16, + DMA_TO_DEVICE); + dev_kfree_skb_irq(mdp->tx_skbuff[entry]); + mdp->tx_skbuff[entry] = NULL; + free_num++; + } + txdesc->status = cpu_to_le32(TD_TFP); + if (entry >= mdp->num_tx_ring - 1) + txdesc->status |= cpu_to_le32(TD_TDLE); + + if (sent) { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; + } + } + return free_num; +} + /* free skb and descriptor buffer */ static void sh_eth_ring_free(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); int ringsize, i; + if (mdp->rx_ring) { + for (i = 0; i < mdp->num_rx_ring; i++) { + if (mdp->rx_skbuff[i]) { + struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; + + dma_unmap_single(&ndev->dev, + le32_to_cpu(rxdesc->addr), + ALIGN(mdp->rx_buf_sz, 32), + DMA_FROM_DEVICE); + } + } + ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; + dma_free_coherent(NULL, ringsize, mdp->rx_ring, + mdp->rx_desc_dma); + mdp->rx_ring = NULL; + } + /* Free Rx skb ringbuffer */ if (mdp->rx_skbuff) { for (i = 0; i < mdp->num_rx_ring; i++) @@ -1141,27 +1199,18 @@ static void sh_eth_ring_free(struct net_device *ndev) kfree(mdp->rx_skbuff); mdp->rx_skbuff = NULL; - /* Free Tx skb ringbuffer */ - if (mdp->tx_skbuff) { - for (i = 0; i < mdp->num_tx_ring; i++) - dev_kfree_skb(mdp->tx_skbuff[i]); - } - kfree(mdp->tx_skbuff); - mdp->tx_skbuff = NULL; - - if (mdp->rx_ring) { - ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; - dma_free_coherent(NULL, ringsize, mdp->rx_ring, - mdp->rx_desc_dma); - mdp->rx_ring = NULL; - } - if (mdp->tx_ring) { + sh_eth_tx_free(ndev, false); + ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); mdp->tx_ring = NULL; } + + /* Free Tx skb ringbuffer */ + kfree(mdp->tx_skbuff); + mdp->tx_skbuff = NULL; } /* format skb and descriptor buffer */ @@ -1409,43 +1458,6 @@ static void sh_eth_dev_exit(struct net_device *ndev) update_mac_address(ndev); } -/* free Tx skb function */ -static int sh_eth_txfree(struct net_device *ndev) -{ - struct sh_eth_private *mdp = netdev_priv(ndev); - struct sh_eth_txdesc *txdesc; - int free_num = 0; - int entry; - - for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { - entry = mdp->dirty_tx % mdp->num_tx_ring; - txdesc = &mdp->tx_ring[entry]; - if (txdesc->status & cpu_to_le32(TD_TACT)) - break; - /* TACT bit must be checked before all the following reads */ - dma_rmb(); - netif_info(mdp, tx_done, ndev, - "tx entry %d status 0x%08x\n", - entry, le32_to_cpu(txdesc->status)); - /* Free the original skb. */ - if (mdp->tx_skbuff[entry]) { - dma_unmap_single(&ndev->dev, le32_to_cpu(txdesc->addr), - le32_to_cpu(txdesc->len) >> 16, - DMA_TO_DEVICE); - dev_kfree_skb_irq(mdp->tx_skbuff[entry]); - mdp->tx_skbuff[entry] = NULL; - free_num++; - } - txdesc->status = cpu_to_le32(TD_TFP); - if (entry >= mdp->num_tx_ring - 1) - txdesc->status |= cpu_to_le32(TD_TDLE); - - ndev->stats.tx_packets++; - ndev->stats.tx_bytes += le32_to_cpu(txdesc->len) >> 16; - } - return free_num; -} - /* Packet receive function */ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) { @@ -1690,7 +1702,7 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status) intr_status, mdp->cur_tx, mdp->dirty_tx, (u32)ndev->state, edtrr); /* dirty buffer free */ - sh_eth_txfree(ndev); + sh_eth_tx_free(ndev, true); /* SH7712 BUG */ if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) { @@ -1751,7 +1763,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) /* Clear Tx interrupts */ sh_eth_write(ndev, intr_status & cd->tx_check, EESR); - sh_eth_txfree(ndev); + sh_eth_tx_free(ndev, true); netif_wake_queue(ndev); } @@ -2412,7 +2424,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) spin_lock_irqsave(&mdp->lock, flags); if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { - if (!sh_eth_txfree(ndev)) { + if (!sh_eth_tx_free(ndev, true)) { netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); netif_stop_queue(ndev); spin_unlock_irqrestore(&mdp->lock, flags); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 50d28261b6b9..b9cb697b2818 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1371,6 +1371,13 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx) free_cpumask_var(thread_mask); } + if (count > EFX_MAX_RX_QUEUES) { + netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, + "Reducing number of rx queues from %u to %u.\n", + count, EFX_MAX_RX_QUEUES); + count = EFX_MAX_RX_QUEUES; + } + /* If RSS is requested for the PF *and* VFs then we can't write RSS * table entries that are inaccessible to VFs */ diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index f5e5cd1659a1..29614da91cbf 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -1354,6 +1354,13 @@ static unsigned int ef4_wanted_parallelism(struct ef4_nic *efx) free_cpumask_var(thread_mask); } + if (count > EF4_MAX_RX_QUEUES) { + netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn, + "Reducing number of rx queues from %u to %u.\n", + count, EF4_MAX_RX_QUEUES); + count = EF4_MAX_RX_QUEUES; + } + return count; } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index b847184de6fc..2e89aab1801a 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -797,9 +797,6 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, - .get_sset_count = kszphy_get_sset_count, - .get_strings = kszphy_get_strings, - .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -939,9 +936,6 @@ static struct phy_driver ksphy_driver[] = { .read_status = genphy_read_status, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, - .get_sset_count = kszphy_get_sset_count, - .get_strings = kszphy_get_strings, - .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -951,6 +945,7 @@ static struct phy_driver ksphy_driver[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .driver_data = &ksz9021_type, + .probe = kszphy_probe, .config_init = ksz9021_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -970,6 +965,7 @@ static struct phy_driver ksphy_driver[] = { .features = PHY_GBIT_FEATURES, .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, .driver_data = &ksz9021_type, + .probe = kszphy_probe, .config_init = ksz9031_config_init, .config_aneg = genphy_config_aneg, .read_status = ksz9031_read_status, @@ -988,9 +984,6 @@ static struct phy_driver ksphy_driver[] = { .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg, .read_status = ksz8873mll_read_status, - .get_sset_count = kszphy_get_sset_count, - .get_strings = kszphy_get_strings, - .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -1002,9 +995,6 @@ static struct phy_driver ksphy_driver[] = { .config_init = kszphy_config_init, .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, - .get_sset_count = kszphy_get_sset_count, - .get_strings = kszphy_get_strings, - .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, }, { @@ -1016,9 +1006,6 @@ static struct phy_driver ksphy_driver[] = { .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg, .read_status = ksz8873mll_read_status, - .get_sset_count = kszphy_get_sset_count, - .get_strings = kszphy_get_strings, - .get_stats = kszphy_get_stats, .suspend = genphy_suspend, .resume = genphy_resume, } }; diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index a84dcad2ee91..aa5d30428bba 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1264,7 +1264,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it) goto nla_put_failure; /* rule only needs to appear once */ - nlh->nlmsg_flags &= NLM_F_EXCL; + nlh->nlmsg_flags |= NLM_F_EXCL; frh = nlmsg_data(nlh); memset(frh, 0, sizeof(*frh)); diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 23d4a1728cdf..351bac8f6503 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL); if (rc < 0) goto out_unlock; + nvdimm_bus_unlock(&nvdimm_bus->dev); + if (copy_to_user(p, buf, buf_len)) rc = -EFAULT; + + vfree(buf); + return rc; + out_unlock: nvdimm_bus_unlock(&nvdimm_bus->dev); out: diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index b3323c0697f6..ca6d572c48fc 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, } if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { - if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) { + /* + * FIXME: nsio_rw_bytes() may be called from atomic + * context in the btt case and nvdimm_clear_poison() + * takes a sleeping lock. Until the locking can be + * reworked this capability requires that the namespace + * is not claimed by btt. + */ + if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) + && (!ndns->claim || !is_nd_btt(ndns->claim))) { long cleared; cleared = nvdimm_clear_poison(&ndns->dev, offset, size); diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 0eedc49e0d47..8b721321be5b 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create); int alias_dpa_busy(struct device *dev, void *data) { - resource_size_t map_end, blk_start, new, busy; + resource_size_t map_end, blk_start, new; struct blk_alloc_info *info = data; struct nd_mapping *nd_mapping; struct nd_region *nd_region; @@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data) retry: /* * Find the free dpa from the end of the last pmem allocation to - * the end of the interleave-set mapping that is not already - * covered by a blk allocation. + * the end of the interleave-set mapping. */ - busy = 0; for_each_dpa_resource(ndd, res) { + if (strncmp(res->name, "pmem", 4) != 0) + continue; if ((res->start >= blk_start && res->start < map_end) || (res->end >= blk_start && res->end <= map_end)) { - if (strncmp(res->name, "pmem", 4) == 0) { - new = max(blk_start, min(map_end + 1, - res->end + 1)); - if (new != blk_start) { - blk_start = new; - goto retry; - } - } else - busy += min(map_end, res->end) - - max(nd_mapping->start, res->start) + 1; - } else if (nd_mapping->start > res->start - && map_end < res->end) { - /* total eclipse of the PMEM region mapping */ - busy += nd_mapping->size; - break; + new = max(blk_start, min(map_end + 1, res->end + 1)); + if (new != blk_start) { + blk_start = new; + goto retry; + } } } @@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data) return 1; } - info->available -= blk_start - nd_mapping->start + busy; + info->available -= blk_start - nd_mapping->start; return 0; } -static int blk_dpa_busy(struct device *dev, void *data) -{ - struct blk_alloc_info *info = data; - struct nd_mapping *nd_mapping; - struct nd_region *nd_region; - resource_size_t map_end; - int i; - - if (!is_nd_pmem(dev)) - return 0; - - nd_region = to_nd_region(dev); - for (i = 0; i < nd_region->ndr_mappings; i++) { - nd_mapping = &nd_region->mapping[i]; - if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) - break; - } - - if (i >= nd_region->ndr_mappings) - return 0; - - map_end = nd_mapping->start + nd_mapping->size - 1; - if (info->res->start >= nd_mapping->start - && info->res->start < map_end) { - if (info->res->end <= map_end) { - info->busy = 0; - return 1; - } else { - info->busy -= info->res->end - map_end; - return 0; - } - } else if (info->res->end >= nd_mapping->start - && info->res->end <= map_end) { - info->busy -= nd_mapping->start - info->res->start; - return 0; - } else { - info->busy -= nd_mapping->size; - return 0; - } -} - /** * nd_blk_available_dpa - account the unused dpa of BLK region * @nd_mapping: container of dpa-resource-root + labels @@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) for_each_dpa_resource(ndd, res) { if (strncmp(res->name, "blk", 3) != 0) continue; - - info.res = res; - info.busy = resource_size(res); - device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy); - info.available -= info.busy; + info.available -= resource_size(res); } return info.available; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 9690beb15e69..d996ca73d3be 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -2023,7 +2023,7 @@ nvme_fc_configure_admin_queue(struct nvme_fc_ctrl *ctrl) } ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); + min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); if (error) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 47a479f26e5d..16f84eb0b95e 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1606,7 +1606,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) } ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); + min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); if (error) diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 22f7bc6bac7f..c7b0b6a52708 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -392,7 +392,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) } ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); + min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); if (error) diff --git a/drivers/reset/core.c b/drivers/reset/core.c index f1e5e65388bb..cd739d2fa160 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -275,7 +275,7 @@ int reset_control_status(struct reset_control *rstc) } EXPORT_SYMBOL_GPL(reset_control_status); -static struct reset_control *__reset_control_get( +static struct reset_control *__reset_control_get_internal( struct reset_controller_dev *rcdev, unsigned int index, bool shared) { @@ -308,7 +308,7 @@ static struct reset_control *__reset_control_get( return rstc; } -static void __reset_control_put(struct reset_control *rstc) +static void __reset_control_put_internal(struct reset_control *rstc) { lockdep_assert_held(&reset_list_mutex); @@ -377,7 +377,7 @@ struct reset_control *__of_reset_control_get(struct device_node *node, } /* reset_list_mutex also protects the rcdev's reset_control list */ - rstc = __reset_control_get(rcdev, rstc_id, shared); + rstc = __reset_control_get_internal(rcdev, rstc_id, shared); mutex_unlock(&reset_list_mutex); @@ -385,6 +385,17 @@ struct reset_control *__of_reset_control_get(struct device_node *node, } EXPORT_SYMBOL_GPL(__of_reset_control_get); +struct reset_control *__reset_control_get(struct device *dev, const char *id, + int index, bool shared, bool optional) +{ + if (dev->of_node) + return __of_reset_control_get(dev->of_node, id, index, shared, + optional); + + return optional ? NULL : ERR_PTR(-EINVAL); +} +EXPORT_SYMBOL_GPL(__reset_control_get); + /** * reset_control_put - free the reset controller * @rstc: reset controller @@ -396,7 +407,7 @@ void reset_control_put(struct reset_control *rstc) return; mutex_lock(&reset_list_mutex); - __reset_control_put(rstc); + __reset_control_put_internal(rstc); mutex_unlock(&reset_list_mutex); } EXPORT_SYMBOL_GPL(reset_control_put); @@ -417,8 +428,7 @@ struct reset_control *__devm_reset_control_get(struct device *dev, if (!ptr) return ERR_PTR(-ENOMEM); - rstc = __of_reset_control_get(dev ? dev->of_node : NULL, - id, index, shared, optional); + rstc = __reset_control_get(dev, id, index, shared, optional); if (!IS_ERR(rstc)) { *ptr = rstc; devres_add(dev, ptr); diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index d036a806f31c..d281492009fb 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -1690,9 +1690,6 @@ struct aac_dev #define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \ (dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) -#define aac_adapter_check_health(dev) \ - (dev)->a_ops.adapter_check_health(dev) - #define aac_adapter_restart(dev, bled, reset_type) \ ((dev)->a_ops.adapter_restart(dev, bled, reset_type)) @@ -2615,6 +2612,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor) return capacity; } +static inline int aac_adapter_check_health(struct aac_dev *dev) +{ + if (unlikely(pci_channel_offline(dev->pdev))) + return -1; + + return (dev)->a_ops.adapter_check_health(dev); +} + /* SCp.phase values */ #define AAC_OWNER_MIDLEVEL 0x101 #define AAC_OWNER_LOWLEVEL 0x102 diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index c8172f16cf33..1f4918355fdb 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -1873,7 +1873,8 @@ int aac_check_health(struct aac_dev * aac) spin_unlock_irqrestore(&aac->fib_lock, flagv); if (BlinkLED < 0) { - printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED); + printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n", + aac->name, BlinkLED); goto out; } diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index b29afafc2885..5d5e272fd815 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6293,7 +6293,12 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, break; case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */ case IPR_IOASA_IR_DUAL_IOA_DISABLED: - scsi_cmd->result |= (DID_PASSTHROUGH << 16); + /* + * exception: do not set DID_PASSTHROUGH on CHECK CONDITION + * so SCSI mid-layer and upper layers handle it accordingly. + */ + if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION) + scsi_cmd->result |= (DID_PASSTHROUGH << 16); break; case IPR_IOASC_BUS_WAS_RESET: case IPR_IOASC_BUS_WAS_RESET_BY_OTHER: diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c index ed58b9104f58..e10b91cc3c62 100644 --- a/drivers/scsi/qedf/qedf_fip.c +++ b/drivers/scsi/qedf/qedf_fip.c @@ -99,7 +99,8 @@ static void qedf_fcoe_process_vlan_resp(struct qedf_ctx *qedf, qedf_set_vlan_id(qedf, vid); /* Inform waiter that it's ok to call fcoe_ctlr_link up() */ - complete(&qedf->fipvlan_compl); + if (!completion_done(&qedf->fipvlan_compl)) + complete(&qedf->fipvlan_compl); } } diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index 8e2a160490e6..cceddd995a4b 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -2803,6 +2803,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) atomic_set(&qedf->num_offloads, 0); qedf->stop_io_on_error = false; pci_set_drvdata(pdev, qedf); + init_completion(&qedf->fipvlan_compl); QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "QLogic FastLinQ FCoE Module qedf %s, " diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 3e7011757c82..83d61d2142e9 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -1160,8 +1160,13 @@ static inline uint32_t qla2x00_isp_reg_stat(struct qla_hw_data *ha) { struct device_reg_24xx __iomem *reg = &ha->iobase->isp24; + struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82; - return ((RD_REG_DWORD(®->host_status)) == ISP_REG_DISCONNECT); + if (IS_P3P_TYPE(ha)) + return ((RD_REG_DWORD(®82->host_int)) == ISP_REG_DISCONNECT); + else + return ((RD_REG_DWORD(®->host_status)) == + ISP_REG_DISCONNECT); } /************************************************************************** diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index fcfeddc79331..35ad5e8a31ab 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2102,6 +2102,22 @@ static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp, #define READ_CAPACITY_RETRIES_ON_RESET 10 +/* + * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set + * and the reported logical block size is bigger than 512 bytes. Note + * that last_sector is a u64 and therefore logical_to_sectors() is not + * applicable. + */ +static bool sd_addressable_capacity(u64 lba, unsigned int sector_size) +{ + u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9); + + if (sizeof(sector_t) == 4 && last_sector > U32_MAX) + return false; + + return true; +} + static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, unsigned char *buffer) { @@ -2167,7 +2183,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp, return -ENODEV; } - if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) { + if (!sd_addressable_capacity(lba, sector_size)) { sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " "kernel compiled with support for large block " "devices.\n"); @@ -2256,7 +2272,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp, return sector_size; } - if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) { + if (!sd_addressable_capacity(lba, sector_size)) { sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a " "kernel compiled with support for large block " "devices.\n"); @@ -2956,7 +2972,8 @@ static int sd_revalidate_disk(struct gendisk *disk) q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); } else - rw_max = BLK_DEF_MAX_SECTORS; + rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), + (sector_t)BLK_DEF_MAX_SECTORS); /* Combine with controller limits */ q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 0b29b9329b1c..a8f630213a1a 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -836,6 +836,7 @@ static void get_capabilities(struct scsi_cd *cd) unsigned char *buffer; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; + unsigned int ms_len = 128; int rc, n; static const char *loadmech[] = @@ -862,10 +863,11 @@ static void get_capabilities(struct scsi_cd *cd) scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr); /* ask for mode page 0x2a */ - rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128, + rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len, SR_TIMEOUT, 3, &data, NULL); - if (!scsi_status_is_good(rc)) { + if (!scsi_status_is_good(rc) || data.length > ms_len || + data.header_length + data.block_descriptor_length > data.length) { /* failed, drive doesn't have capabilities mode page */ cd->cdi.speed = 1; cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R | diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index b0500a0a87b8..e4603b09863a 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c @@ -492,6 +492,41 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld) } /** + * tty_ldisc_restore - helper for tty ldisc change + * @tty: tty to recover + * @old: previous ldisc + * + * Restore the previous line discipline or N_TTY when a line discipline + * change fails due to an open error + */ + +static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) +{ + struct tty_ldisc *new_ldisc; + int r; + + /* There is an outstanding reference here so this is safe */ + old = tty_ldisc_get(tty, old->ops->num); + WARN_ON(IS_ERR(old)); + tty->ldisc = old; + tty_set_termios_ldisc(tty, old->ops->num); + if (tty_ldisc_open(tty, old) < 0) { + tty_ldisc_put(old); + /* This driver is always present */ + new_ldisc = tty_ldisc_get(tty, N_TTY); + if (IS_ERR(new_ldisc)) + panic("n_tty: get"); + tty->ldisc = new_ldisc; + tty_set_termios_ldisc(tty, N_TTY); + r = tty_ldisc_open(tty, new_ldisc); + if (r < 0) + panic("Couldn't open N_TTY ldisc for " + "%s --- error %d.", + tty_name(tty), r); + } +} + +/** * tty_set_ldisc - set line discipline * @tty: the terminal to set * @ldisc: the line discipline @@ -504,7 +539,12 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld) int tty_set_ldisc(struct tty_struct *tty, int disc) { - int retval, old_disc; + int retval; + struct tty_ldisc *old_ldisc, *new_ldisc; + + new_ldisc = tty_ldisc_get(tty, disc); + if (IS_ERR(new_ldisc)) + return PTR_ERR(new_ldisc); tty_lock(tty); retval = tty_ldisc_lock(tty, 5 * HZ); @@ -517,8 +557,7 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) } /* Check the no-op case */ - old_disc = tty->ldisc->ops->num; - if (old_disc == disc) + if (tty->ldisc->ops->num == disc) goto out; if (test_bit(TTY_HUPPED, &tty->flags)) { @@ -527,25 +566,34 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) goto out; } - retval = tty_ldisc_reinit(tty, disc); + old_ldisc = tty->ldisc; + + /* Shutdown the old discipline. */ + tty_ldisc_close(tty, old_ldisc); + + /* Now set up the new line discipline. */ + tty->ldisc = new_ldisc; + tty_set_termios_ldisc(tty, disc); + + retval = tty_ldisc_open(tty, new_ldisc); if (retval < 0) { /* Back to the old one or N_TTY if we can't */ - if (tty_ldisc_reinit(tty, old_disc) < 0) { - pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n"); - if (tty_ldisc_reinit(tty, N_TTY) < 0) { - /* At this point we have tty->ldisc == NULL. */ - pr_err("tty: reinitializing N_TTY failed\n"); - } - } + tty_ldisc_put(new_ldisc); + tty_ldisc_restore(tty, old_ldisc); } - if (tty->ldisc && tty->ldisc->ops->num != old_disc && - tty->ops->set_ldisc) { + if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) { down_read(&tty->termios_rwsem); tty->ops->set_ldisc(tty); up_read(&tty->termios_rwsem); } + /* At this point we hold a reference to the new ldisc and a + reference to the old ldisc, or we hold two references to + the old ldisc (if it was restored as part of error cleanup + above). In either case, releasing a single reference from + the old ldisc is correct. */ + new_ldisc = old_ldisc; out: tty_ldisc_unlock(tty); @@ -553,6 +601,7 @@ out: already running */ tty_buffer_restart_work(tty->port); err: + tty_ldisc_put(new_ldisc); /* drop the extra reference */ tty_unlock(tty); return retval; } @@ -613,8 +662,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) int retval; ld = tty_ldisc_get(tty, disc); - if (IS_ERR(ld)) + if (IS_ERR(ld)) { + BUG_ON(disc == N_TTY); return PTR_ERR(ld); + } if (tty->ldisc) { tty_ldisc_close(tty, tty->ldisc); @@ -626,8 +677,10 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) tty_set_termios_ldisc(tty, disc); retval = tty_ldisc_open(tty, tty->ldisc); if (retval) { - tty_ldisc_put(tty->ldisc); - tty->ldisc = NULL; + if (!WARN_ON(disc == N_TTY)) { + tty_ldisc_put(tty->ldisc); + tty->ldisc = NULL; + } } return retval; } diff --git a/fs/namei.c b/fs/namei.c index d41fab78798b..19dcf62133cc 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2145,6 +2145,9 @@ static const char *path_init(struct nameidata *nd, unsigned flags) int retval = 0; const char *s = nd->name->name; + if (!*s) + flags &= ~LOOKUP_RCU; + nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; nd->depth = 0; diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c index c4ab6fdf17a0..e1534c9bab16 100644 --- a/fs/orangefs/devorangefs-req.c +++ b/fs/orangefs/devorangefs-req.c @@ -208,14 +208,19 @@ restart: continue; /* * Skip ops whose filesystem we don't know about unless - * it is being mounted. + * it is being mounted or unmounted. It is possible for + * a filesystem we don't know about to be unmounted if + * it fails to mount in the kernel after userspace has + * been sent the mount request. */ /* XXX: is there a better way to detect this? */ } else if (ret == -1 && !(op->upcall.type == ORANGEFS_VFS_OP_FS_MOUNT || op->upcall.type == - ORANGEFS_VFS_OP_GETATTR)) { + ORANGEFS_VFS_OP_GETATTR || + op->upcall.type == + ORANGEFS_VFS_OP_FS_UMOUNT)) { gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: skipping op tag %llu %s\n", llu(op->tag), get_opname_string(op)); diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h index 5e48a0be9761..8afac46fcc87 100644 --- a/fs/orangefs/orangefs-kernel.h +++ b/fs/orangefs/orangefs-kernel.h @@ -249,6 +249,7 @@ struct orangefs_sb_info_s { char devname[ORANGEFS_MAX_SERVER_ADDR_LEN]; struct super_block *sb; int mount_pending; + int no_list; struct list_head list; }; diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index cd261c8de53a..629d8c917fa6 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -493,7 +493,7 @@ struct dentry *orangefs_mount(struct file_system_type *fst, if (ret) { d = ERR_PTR(ret); - goto free_op; + goto free_sb_and_op; } /* @@ -519,6 +519,9 @@ struct dentry *orangefs_mount(struct file_system_type *fst, spin_unlock(&orangefs_superblocks_lock); op_release(new_op); + /* Must be removed from the list now. */ + ORANGEFS_SB(sb)->no_list = 0; + if (orangefs_userspace_version >= 20906) { new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES); if (!new_op) @@ -533,6 +536,10 @@ struct dentry *orangefs_mount(struct file_system_type *fst, return dget(sb->s_root); +free_sb_and_op: + /* Will call orangefs_kill_sb with sb not in list. */ + ORANGEFS_SB(sb)->no_list = 1; + deactivate_locked_super(sb); free_op: gossip_err("orangefs_mount: mount request failed with %d\n", ret); if (ret == -EINVAL) { @@ -558,12 +565,14 @@ void orangefs_kill_sb(struct super_block *sb) */ orangefs_unmount_sb(sb); - /* remove the sb from our list of orangefs specific sb's */ - - spin_lock(&orangefs_superblocks_lock); - __list_del_entry(&ORANGEFS_SB(sb)->list); /* not list_del_init */ - ORANGEFS_SB(sb)->list.prev = NULL; - spin_unlock(&orangefs_superblocks_lock); + if (!ORANGEFS_SB(sb)->no_list) { + /* remove the sb from our list of orangefs specific sb's */ + spin_lock(&orangefs_superblocks_lock); + /* not list_del_init */ + __list_del_entry(&ORANGEFS_SB(sb)->list); + ORANGEFS_SB(sb)->list.prev = NULL; + spin_unlock(&orangefs_superblocks_lock); + } /* * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 1d4f365d8f03..f6d9af3efa45 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -166,6 +166,16 @@ static inline struct ahash_instance *ahash_alloc_instance( return crypto_alloc_instance2(name, alg, ahash_instance_headroom()); } +static inline void ahash_request_complete(struct ahash_request *req, int err) +{ + req->base.complete(&req->base, err); +} + +static inline u32 ahash_request_flags(struct ahash_request *req) +{ + return req->base.flags; +} + static inline struct crypto_ahash *crypto_spawn_ahash( struct crypto_ahash_spawn *spawn) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7548f332121a..01a696b0a4d3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1672,12 +1672,36 @@ static inline bool bios_segs_mergeable(struct request_queue *q, return true; } -static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, - struct bio *next) +static inline bool bio_will_gap(struct request_queue *q, + struct request *prev_rq, + struct bio *prev, + struct bio *next) { if (bio_has_data(prev) && queue_virt_boundary(q)) { struct bio_vec pb, nb; + /* + * don't merge if the 1st bio starts with non-zero + * offset, otherwise it is quite difficult to respect + * sg gap limit. We work hard to merge a huge number of small + * single bios in case of mkfs. + */ + if (prev_rq) + bio_get_first_bvec(prev_rq->bio, &pb); + else + bio_get_first_bvec(prev, &pb); + if (pb.bv_offset) + return true; + + /* + * We don't need to worry about the situation that the + * merged segment ends in unaligned virt boundary: + * + * - if 'pb' ends aligned, the merged segment ends aligned + * - if 'pb' ends unaligned, the next bio must include + * one single bvec of 'nb', otherwise the 'nb' can't + * merge with 'pb' + */ bio_get_last_bvec(prev, &pb); bio_get_first_bvec(next, &nb); @@ -1690,12 +1714,12 @@ static inline bool bio_will_gap(struct request_queue *q, struct bio *prev, static inline bool req_gap_back_merge(struct request *req, struct bio *bio) { - return bio_will_gap(req->q, req->biotail, bio); + return bio_will_gap(req->q, req, req->biotail, bio); } static inline bool req_gap_front_merge(struct request *req, struct bio *bio) { - return bio_will_gap(req->q, bio, req->bio); + return bio_will_gap(req->q, NULL, bio, req->bio); } int kblockd_schedule_work(struct work_struct *work); diff --git a/include/linux/reset.h b/include/linux/reset.h index 96fb139bdd08..13d8681210d5 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -15,6 +15,9 @@ int reset_control_status(struct reset_control *rstc); struct reset_control *__of_reset_control_get(struct device_node *node, const char *id, int index, bool shared, bool optional); +struct reset_control *__reset_control_get(struct device *dev, const char *id, + int index, bool shared, + bool optional); void reset_control_put(struct reset_control *rstc); struct reset_control *__devm_reset_control_get(struct device *dev, const char *id, int index, bool shared, @@ -72,6 +75,13 @@ static inline struct reset_control *__of_reset_control_get( return optional ? NULL : ERR_PTR(-ENOTSUPP); } +static inline struct reset_control *__reset_control_get( + struct device *dev, const char *id, + int index, bool shared, bool optional) +{ + return optional ? NULL : ERR_PTR(-ENOTSUPP); +} + static inline struct reset_control *__devm_reset_control_get( struct device *dev, const char *id, int index, bool shared, bool optional) @@ -102,8 +112,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) #ifndef CONFIG_RESET_CONTROLLER WARN_ON(1); #endif - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, - false); + return __reset_control_get(dev, id, 0, false, false); } /** @@ -131,22 +140,19 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) static inline struct reset_control *reset_control_get_shared( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, - false); + return __reset_control_get(dev, id, 0, true, false); } static inline struct reset_control *reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, false, - true); + return __reset_control_get(dev, id, 0, false, true); } static inline struct reset_control *reset_control_get_optional_shared( struct device *dev, const char *id) { - return __of_reset_control_get(dev ? dev->of_node : NULL, id, 0, true, - true); + return __reset_control_get(dev, id, 0, true, true); } /** diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 62e1e447ded9..ca15cf2b85bb 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -3349,6 +3349,14 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) if (insn->imm == BPF_FUNC_xdp_adjust_head) prog->xdp_adjust_head = 1; if (insn->imm == BPF_FUNC_tail_call) { + /* If we tail call into other programs, we + * cannot make any assumptions since they can + * be replaced dynamically during runtime in + * the program array. + */ + prog->cb_access = 1; + prog->xdp_adjust_head = 1; + /* mark bpf_tail_call as different opcode to avoid * conditional branch in the interpeter for every normal * call and to prevent accidental JITing by JIT compiler diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 12e19f0636ea..1dc22f6b49f5 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -1146,7 +1146,7 @@ struct dentry *cgroup1_mount(struct file_system_type *fs_type, int flags, * path is super cold. Let's just sleep a bit and retry. */ pinned_sb = kernfs_pin_sb(root->kf_root, NULL); - if (IS_ERR_OR_NULL(pinned_sb) || + if (IS_ERR(pinned_sb) || !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { mutex_unlock(&cgroup_mutex); if (!IS_ERR_OR_NULL(pinned_sb)) diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index c2b88490d857..c08fbd2f5ba9 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h @@ -46,13 +46,13 @@ enum { (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) /* - * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text, + * CONFIG_LOCKDEP_SMALL is defined for sparc. Sparc requires .text, * .data and .bss to fit in required 32MB limit for the kernel. With - * PROVE_LOCKING we could go over this limit and cause system boot-up problems. + * CONFIG_LOCKDEP we could go over this limit and cause system boot-up problems. * So, reduce the static allocations for lockdeps related structures so that * everything fits in current required size limit. */ -#ifdef CONFIG_PROVE_LOCKING_SMALL +#ifdef CONFIG_LOCKDEP_SMALL /* * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies * we track. diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b9691ee8f6c1..dd3e91d68dc7 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -3755,23 +3755,24 @@ static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) ftrace_probe_registered = 1; } -static void __disable_ftrace_function_probe(void) +static bool __disable_ftrace_function_probe(void) { int i; if (!ftrace_probe_registered) - return; + return false; for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { struct hlist_head *hhd = &ftrace_func_hash[i]; if (hhd->first) - return; + return false; } /* no more funcs left */ ftrace_shutdown(&trace_probe_ops, 0); ftrace_probe_registered = 0; + return true; } @@ -3901,6 +3902,7 @@ static void __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, void *data, int flags) { + struct ftrace_ops_hash old_hash_ops; struct ftrace_func_entry *rec_entry; struct ftrace_func_probe *entry; struct ftrace_func_probe *p; @@ -3912,6 +3914,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, struct hlist_node *tmp; char str[KSYM_SYMBOL_LEN]; int i, ret; + bool disabled; if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) func_g.search = NULL; @@ -3930,6 +3933,10 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, mutex_lock(&trace_probe_ops.func_hash->regex_lock); + old_hash_ops.filter_hash = old_hash; + /* Probes only have filters */ + old_hash_ops.notrace_hash = NULL; + hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); if (!hash) /* Hmm, should report this somehow */ @@ -3967,12 +3974,17 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, } } mutex_lock(&ftrace_lock); - __disable_ftrace_function_probe(); + disabled = __disable_ftrace_function_probe(); /* * Remove after the disable is called. Otherwise, if the last * probe is removed, a null hash means *all enabled*. */ ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); + + /* still need to update the function call sites */ + if (ftrace_enabled && !disabled) + ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, + &old_hash_ops); synchronize_sched(); if (!ret) free_ftrace_hash_rcu(old_hash); @@ -5554,6 +5566,15 @@ static void clear_ftrace_pids(struct trace_array *tr) trace_free_pid_list(pid_list); } +void ftrace_clear_pids(struct trace_array *tr) +{ + mutex_lock(&ftrace_lock); + + clear_ftrace_pids(tr); + + mutex_unlock(&ftrace_lock); +} + static void ftrace_pid_reset(struct trace_array *tr) { mutex_lock(&ftrace_lock); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f35109514a01..d484452ae648 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -7402,6 +7402,7 @@ static int instance_rmdir(const char *name) tracing_set_nop(tr); event_trace_del_tracer(tr); + ftrace_clear_pids(tr); ftrace_destroy_function_files(tr); tracefs_remove_recursive(tr->dir); free_trace_buffers(tr); diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index ae1cce91fead..d19d52d600d6 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -896,6 +896,7 @@ int using_ftrace_ops_list_func(void); void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d_tracer); void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d_tracer); +void ftrace_clear_pids(struct trace_array *tr); #else static inline int ftrace_trace_task(struct trace_array *tr) { @@ -914,6 +915,7 @@ ftrace_init_global_array_ops(struct trace_array *tr) { } static inline void ftrace_reset_array_ops(struct trace_array *tr) { } static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } +static inline void ftrace_clear_pids(struct trace_array *tr) { } /* ftace_func_t type is not defined, use macro instead of static inline */ #define ftrace_init_array_ops(tr, func) do { } while (0) #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 97d62c2da6c2..fa16c0f82d6e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1103,9 +1103,6 @@ config PROVE_LOCKING For more details, see Documentation/locking/lockdep-design.txt. -config PROVE_LOCKING_SMALL - bool - config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT @@ -1114,6 +1111,9 @@ config LOCKDEP select KALLSYMS select KALLSYMS_ALL +config LOCKDEP_SMALL + bool + config LOCK_STAT bool "Lock usage statistics" depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ad2af563756a..58604c1889bd 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3817,6 +3817,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb, serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; serr->ee.ee_info = tstype; serr->opt_stats = opt_stats; + serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { serr->ee.ee_data = skb_shinfo(skb)->tskey; if (sk->sk_protocol == IPPROTO_TCP && diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ebd953bc5607..1d46d05efb0f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -488,16 +488,15 @@ static bool ipv4_datagram_support_cmsg(const struct sock *sk, return false; /* Support IP_PKTINFO on tstamp packets if requested, to correlate - * timestamp with egress dev. Not possible for packets without dev + * timestamp with egress dev. Not possible for packets without iif * or without payload (SOF_TIMESTAMPING_OPT_TSONLY). */ - if ((!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG)) || - (!skb->dev)) + info = PKTINFO_SKB_CB(skb); + if (!(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_CMSG) || + !info->ipi_ifindex) return false; - info = PKTINFO_SKB_CB(skb); info->ipi_spec_dst.s_addr = ip_hdr(skb)->saddr; - info->ipi_ifindex = skb->dev->ifindex; return true; } @@ -591,6 +590,7 @@ static bool setsockopt_needs_rtnl(int optname) case MCAST_LEAVE_GROUP: case MCAST_LEAVE_SOURCE_GROUP: case MCAST_UNBLOCK_SOURCE: + case IP_ROUTER_ALERT: return true; } return false; diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 95ea3585a223..3a02d52ed50e 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1278,7 +1278,7 @@ static void mrtsock_destruct(struct sock *sk) struct net *net = sock_net(sk); struct mr_table *mrt; - rtnl_lock(); + ASSERT_RTNL(); ipmr_for_each_table(mrt, net) { if (sk == rtnl_dereference(mrt->mroute_sk)) { IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; @@ -1290,7 +1290,6 @@ static void mrtsock_destruct(struct sock *sk) mroute_clean_tables(mrt, false); } } - rtnl_unlock(); } /* Socket options and virtual interface manipulation. The whole @@ -1355,13 +1354,8 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, if (sk != rcu_access_pointer(mrt->mroute_sk)) { ret = -EACCES; } else { - /* We need to unlock here because mrtsock_destruct takes - * care of rtnl itself and we can't change that due to - * the IP_ROUTER_ALERT setsockopt which runs without it. - */ - rtnl_unlock(); ret = ip_ra_control(sk, 0, NULL); - goto out; + goto out_unlock; } break; case MRT_ADD_VIF: @@ -1472,7 +1466,6 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, } out_unlock: rtnl_unlock(); -out: return ret; } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 8119e1f66e03..9d943974de2b 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -682,7 +682,9 @@ static void raw_close(struct sock *sk, long timeout) /* * Raw sockets may have direct kernel references. Kill them. */ + rtnl_lock(); ip_ra_control(sk, 0, NULL); + rtnl_unlock(); sk_common_release(sk); } diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index eec27f87efac..e011122ebd43 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -405,9 +405,6 @@ static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr) * At one point, excluding local errors was a quick test to identify icmp/icmp6 * errors. This is no longer true, but the test remained, so the v6 stack, * unlike v4, also honors cmsg requests on all wifi and timestamp errors. - * - * Timestamp code paths do not initialize the fields expected by cmsg: - * the PKTINFO fields in skb->cb[]. Fill those in here. */ static bool ip6_datagram_support_cmsg(struct sk_buff *skb, struct sock_exterr_skb *serr) @@ -419,14 +416,9 @@ static bool ip6_datagram_support_cmsg(struct sk_buff *skb, if (serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL) return false; - if (!skb->dev) + if (!IP6CB(skb)->iif) return false; - if (skb->protocol == htons(ETH_P_IPV6)) - IP6CB(skb)->iif = skb->dev->ifindex; - else - PKTINFO_SKB_CB(skb)->ipi_ifindex = skb->dev->ifindex; - return true; } diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index b04539dd4629..9ee208a348f5 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -124,11 +124,14 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); /* * RFC4291 2.5.3 + * The loopback address must not be used as the source address in IPv6 + * packets that are sent outside of a single node. [..] * A packet received on an interface with a destination address * of loopback must be dropped. */ - if (!(dev->flags & IFF_LOOPBACK) && - ipv6_addr_loopback(&hdr->daddr)) + if ((ipv6_addr_loopback(&hdr->saddr) || + ipv6_addr_loopback(&hdr->daddr)) && + !(dev->flags & IFF_LOOPBACK)) goto err; /* RFC4291 Errata ID: 3480 diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc new file mode 100644 index 000000000000..bab5ff7c607e --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc @@ -0,0 +1,117 @@ +#!/bin/sh +# description: ftrace - function pid filters + +# Make sure that function pid matching filter works. +# Also test it on an instance directory + +if ! grep -q function available_tracers; then + echo "no function tracer configured" + exit_unsupported +fi + +if [ ! -f set_ftrace_pid ]; then + echo "set_ftrace_pid not found? Is function tracer not set?" + exit_unsupported +fi + +if [ ! -f set_ftrace_filter ]; then + echo "set_ftrace_filter not found? Is function tracer not set?" + exit_unsupported +fi + +do_function_fork=1 + +if [ ! -f options/function-fork ]; then + do_function_fork=0 + echo "no option for function-fork found. Option will not be tested." +fi + +read PID _ < /proc/self/stat + +if [ $do_function_fork -eq 1 ]; then + # default value of function-fork option + orig_value=`grep function-fork trace_options` +fi + +do_reset() { + reset_tracer + clear_trace + enable_tracing + echo > set_ftrace_filter + echo > set_ftrace_pid + + if [ $do_function_fork -eq 0 ]; then + return + fi + + echo $orig_value > trace_options +} + +fail() { # msg + do_reset + echo $1 + exit $FAIL +} + +yield() { + ping localhost -c 1 || sleep .001 || usleep 1 || sleep 1 +} + +do_test() { + disable_tracing + + echo do_execve* > set_ftrace_filter + echo *do_fork >> set_ftrace_filter + + echo $PID > set_ftrace_pid + echo function > current_tracer + + if [ $do_function_fork -eq 1 ]; then + # don't allow children to be traced + echo nofunction-fork > trace_options + fi + + enable_tracing + yield + + count_pid=`cat trace | grep -v ^# | grep $PID | wc -l` + count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l` + + # count_other should be 0 + if [ $count_pid -eq 0 -o $count_other -ne 0 ]; then + fail "PID filtering not working?" + fi + + disable_tracing + clear_trace + + if [ $do_function_fork -eq 0 ]; then + return + fi + + # allow children to be traced + echo function-fork > trace_options + + enable_tracing + yield + + count_pid=`cat trace | grep -v ^# | grep $PID | wc -l` + count_other=`cat trace | grep -v ^# | grep -v $PID | wc -l` + + # count_other should NOT be 0 + if [ $count_pid -eq 0 -o $count_other -eq 0 ]; then + fail "PID filtering not following fork?" + fi +} + +do_test + +mkdir instances/foo +cd instances/foo +do_test +cd ../../ +rmdir instances/foo + +do_reset + +exit 0 |