diff options
264 files changed, 2574 insertions, 1532 deletions
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt index e6b72d355151..68c0f517c60e 100644 --- a/Documentation/device-mapper/cache.txt +++ b/Documentation/device-mapper/cache.txt @@ -124,12 +124,11 @@ the default being 204800 sectors (or 100MB). Updating on-disk metadata ------------------------- -On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is -written. If no such requests are made then commits will occur every -second. This means the cache behaves like a physical disk that has a -write cache (the same is true of the thin-provisioning target). If -power is lost you may lose some recent writes. The metadata should -always be consistent in spite of any crash. +On-disk metadata is committed every time a FLUSH or FUA bio is written. +If no such requests are made then commits will occur every second. This +means the cache behaves like a physical disk that has a volatile write +cache. If power is lost you may lose some recent writes. The metadata +should always be consistent in spite of any crash. The 'dirty' state for a cache block changes far too frequently for us to keep updating it on the fly. So we treat it as a hint. In normal diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 8a7a3d46e0da..05a27e9442bd 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt @@ -116,6 +116,35 @@ Resuming a device with a new table itself triggers an event so the userspace daemon can use this to detect a situation where a new table already exceeds the threshold. +A low water mark for the metadata device is maintained in the kernel and +will trigger a dm event if free space on the metadata device drops below +it. + +Updating on-disk metadata +------------------------- + +On-disk metadata is committed every time a FLUSH or FUA bio is written. +If no such requests are made then commits will occur every second. This +means the thin-provisioning target behaves like a physical disk that has +a volatile write cache. If power is lost you may lose some recent +writes. The metadata should always be consistent in spite of any crash. + +If data space is exhausted the pool will either error or queue IO +according to the configuration (see: error_if_no_space). If metadata +space is exhausted or a metadata operation fails: the pool will error IO +until the pool is taken offline and repair is performed to 1) fix any +potential inconsistencies and 2) clear the flag that imposes repair. +Once the pool's metadata device is repaired it may be resized, which +will allow the pool to return to normal operation. Note that if a pool +is flagged as needing repair, the pool's data and metadata devices +cannot be resized until repair is performed. It should also be noted +that when the pool's metadata space is exhausted the current metadata +transaction is aborted. Given that the pool will cache IO whose +completion may have already been acknowledged to upper IO layers +(e.g. filesystem) it is strongly suggested that consistency checks +(e.g. fsck) be performed on those layers when repair of the pool is +required. + Thin provisioning ----------------- @@ -258,10 +287,9 @@ ii) Status should register for the event and then check the target's status. held metadata root: - The location, in sectors, of the metadata root that has been + The location, in blocks, of the metadata root that has been 'held' for userspace read access. '-' indicates there is no - held root. This feature is not yet implemented so '-' is - always returned. + held root. discard_passdown|no_discard_passdown Whether or not discards are actually being passed down to the diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt index a6a352c2771e..5992dceec7af 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt @@ -21,9 +21,9 @@ Required Properties: must appear in the same order as the output clocks. - #clock-cells: Must be 1 - clock-output-names: The name of the clocks as free-form strings - - renesas,indices: Indices of the gate clocks into the group (0 to 31) + - renesas,clock-indices: Indices of the gate clocks into the group (0 to 31) -The clocks, clock-output-names and renesas,indices properties contain one +The clocks, clock-output-names and renesas,clock-indices properties contain one entry per gate clock. The MSTP groups are sparsely populated. Unimplemented gate clocks must not be declared. diff --git a/Documentation/devicetree/bindings/net/opencores-ethoc.txt b/Documentation/devicetree/bindings/net/opencores-ethoc.txt new file mode 100644 index 000000000000..2dc127c30d9b --- /dev/null +++ b/Documentation/devicetree/bindings/net/opencores-ethoc.txt @@ -0,0 +1,22 @@ +* OpenCores MAC 10/100 Mbps + +Required properties: +- compatible: Should be "opencores,ethoc". +- reg: two memory regions (address and length), + first region is for the device registers and descriptor rings, + second is for the device packet memory. +- interrupts: interrupt for the device. + +Optional properties: +- clocks: phandle to refer to the clk used as per + Documentation/devicetree/bindings/clock/clock-bindings.txt + +Examples: + + enet0: ethoc@fd030000 { + compatible = "opencores,ethoc"; + reg = <0xfd030000 0x4000 0xfd800000 0x4000>; + interrupts = <1>; + local-mac-address = [00 50 c2 13 6f 00]; + clocks = <&osc>; + }; diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt index f3089d423515..0cbe6ec22d6f 100644 --- a/Documentation/networking/can.txt +++ b/Documentation/networking/can.txt @@ -554,12 +554,6 @@ solution for a couple of reasons: not specified in the struct can_frame and therefore it is only valid in CANFD_MTU sized CAN FD frames. - As long as the payload length is <=8 the received CAN frames from CAN FD - capable CAN devices can be received and read by legacy sockets too. When - user-generated CAN FD frames have a payload length <=8 these can be send - by legacy CAN network interfaces too. Sending CAN FD frames with payload - length > 8 to a legacy CAN network interface returns an -EMSGSIZE error. - Implementation hint for new CAN applications: To build a CAN FD aware application use struct canfd_frame as basic CAN diff --git a/MAINTAINERS b/MAINTAINERS index c6d0e93eff62..b7befe758429 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -73,7 +73,8 @@ Descriptions of section entries: L: Mailing list that is relevant to this area W: Web-page with status/info Q: Patchwork web based patch tracking system site - T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit. + T: SCM tree type and location. + Type is one of: git, hg, quilt, stgit, topgit S: Status, one of the following: Supported: Someone is actually paid to look after this. Maintained: Someone actually looks after it. @@ -473,7 +474,7 @@ F: net/rxrpc/af_rxrpc.c AGPGART DRIVER M: David Airlie <airlied@linux.ie> -T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git +T: git git://people.freedesktop.org/~airlied/linux (part of drm maint) S: Maintained F: drivers/char/agp/ F: include/linux/agp* @@ -1612,11 +1613,11 @@ S: Maintained F: drivers/net/wireless/atmel* ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER -M: Bradley Grove <linuxdrivers@attotech.com> -L: linux-scsi@vger.kernel.org -W: http://www.attotech.com -S: Supported -F: drivers/scsi/esas2r +M: Bradley Grove <linuxdrivers@attotech.com> +L: linux-scsi@vger.kernel.org +W: http://www.attotech.com +S: Supported +F: drivers/scsi/esas2r AUDIT SUBSYSTEM M: Eric Paris <eparis@redhat.com> @@ -2159,7 +2160,7 @@ F: Documentation/zh_CN/ CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER M: Peter Chen <Peter.Chen@freescale.com> -T: git://github.com/hzpeterchen/linux-usb.git +T: git git://github.com/hzpeterchen/linux-usb.git L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/chipidea/ @@ -2179,9 +2180,9 @@ S: Supported F: drivers/net/ethernet/cisco/enic/ CISCO VIC LOW LATENCY NIC DRIVER -M: Upinder Malhi <umalhi@cisco.com> -S: Supported -F: drivers/infiniband/hw/usnic +M: Upinder Malhi <umalhi@cisco.com> +S: Supported +F: drivers/infiniband/hw/usnic CIRRUS LOGIC EP93XX ETHERNET DRIVER M: Hartley Sweeten <hsweeten@visionengravers.com> @@ -2378,20 +2379,20 @@ F: drivers/cpufreq/arm_big_little.c F: drivers/cpufreq/arm_big_little_dt.c CPUIDLE DRIVER - ARM BIG LITTLE -M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> -M: Daniel Lezcano <daniel.lezcano@linaro.org> -L: linux-pm@vger.kernel.org -L: linux-arm-kernel@lists.infradead.org -T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git -S: Maintained -F: drivers/cpuidle/cpuidle-big_little.c +M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> +M: Daniel Lezcano <daniel.lezcano@linaro.org> +L: linux-pm@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git +S: Maintained +F: drivers/cpuidle/cpuidle-big_little.c CPUIDLE DRIVERS M: Rafael J. Wysocki <rjw@rjwysocki.net> M: Daniel Lezcano <daniel.lezcano@linaro.org> L: linux-pm@vger.kernel.org S: Maintained -T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git F: drivers/cpuidle/* F: include/linux/cpuidle.h @@ -2458,9 +2459,9 @@ S: Maintained F: sound/pci/cs5535audio/ CW1200 WLAN driver -M: Solomon Peachy <pizza@shaftnet.org> -S: Maintained -F: drivers/net/wireless/cw1200/ +M: Solomon Peachy <pizza@shaftnet.org> +S: Maintained +F: drivers/net/wireless/cw1200/ CX18 VIDEO4LINUX DRIVER M: Andy Walls <awalls@md.metrocast.net> @@ -3095,6 +3096,8 @@ F: fs/ecryptfs/ EDAC-CORE M: Doug Thompson <dougthompson@xmission.com> +M: Borislav Petkov <bp@alien8.de> +M: Mauro Carvalho Chehab <m.chehab@samsung.com> L: linux-edac@vger.kernel.org W: bluesmoke.sourceforge.net S: Supported @@ -4558,6 +4561,7 @@ F: Documentation/networking/ixgbevf.txt F: Documentation/networking/i40e.txt F: Documentation/networking/i40evf.txt F: drivers/net/ethernet/intel/ +F: drivers/net/ethernet/intel/*/ INTEL-MID GPIO DRIVER M: David Cohen <david.a.cohen@linux.intel.com> @@ -4914,7 +4918,7 @@ F: drivers/staging/ktap/ KCONFIG M: "Yann E. MORIN" <yann.morin.1998@free.fr> L: linux-kbuild@vger.kernel.org -T: git://gitorious.org/linux-kconfig/linux-kconfig +T: git git://gitorious.org/linux-kconfig/linux-kconfig S: Maintained F: Documentation/kbuild/kconfig-language.txt F: scripts/kconfig/ @@ -5471,11 +5475,11 @@ S: Maintained F: drivers/media/tuners/m88ts2022* MA901 MASTERKIT USB FM RADIO DRIVER -M: Alexey Klimov <klimov.linux@gmail.com> -L: linux-media@vger.kernel.org -T: git git://linuxtv.org/media_tree.git -S: Maintained -F: drivers/media/radio/radio-ma901.c +M: Alexey Klimov <klimov.linux@gmail.com> +L: linux-media@vger.kernel.org +T: git git://linuxtv.org/media_tree.git +S: Maintained +F: drivers/media/radio/radio-ma901.c MAC80211 M: Johannes Berg <johannes@sipsolutions.net> @@ -5636,7 +5640,7 @@ F: drivers/scsi/megaraid/ MELLANOX ETHERNET DRIVER (mlx4_en) M: Amir Vadai <amirv@mellanox.com> -L: netdev@vger.kernel.org +L: netdev@vger.kernel.org S: Supported W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ @@ -5677,7 +5681,7 @@ F: include/linux/mtd/ F: include/uapi/mtd/ MEN A21 WATCHDOG DRIVER -M: Johannes Thumshirn <johannes.thumshirn@men.de> +M: Johannes Thumshirn <johannes.thumshirn@men.de> L: linux-watchdog@vger.kernel.org S: Supported F: drivers/watchdog/mena21_wdt.c @@ -5733,20 +5737,20 @@ L: linux-rdma@vger.kernel.org W: http://www.mellanox.com Q: http://patchwork.ozlabs.org/project/netdev/list/ Q: http://patchwork.kernel.org/project/linux-rdma/list/ -T: git://openfabrics.org/~eli/connect-ib.git +T: git git://openfabrics.org/~eli/connect-ib.git S: Supported F: drivers/net/ethernet/mellanox/mlx5/core/ F: include/linux/mlx5/ Mellanox MLX5 IB driver -M: Eli Cohen <eli@mellanox.com> -L: linux-rdma@vger.kernel.org -W: http://www.mellanox.com -Q: http://patchwork.kernel.org/project/linux-rdma/list/ -T: git://openfabrics.org/~eli/connect-ib.git -S: Supported -F: include/linux/mlx5/ -F: drivers/infiniband/hw/mlx5/ +M: Eli Cohen <eli@mellanox.com> +L: linux-rdma@vger.kernel.org +W: http://www.mellanox.com +Q: http://patchwork.kernel.org/project/linux-rdma/list/ +T: git git://openfabrics.org/~eli/connect-ib.git +S: Supported +F: include/linux/mlx5/ +F: drivers/infiniband/hw/mlx5/ MODULE SUPPORT M: Rusty Russell <rusty@rustcorp.com.au> @@ -6171,6 +6175,12 @@ S: Supported F: drivers/block/nvme* F: include/linux/nvme.h +NXP TDA998X DRM DRIVER +M: Russell King <rmk+kernel@arm.linux.org.uk> +S: Supported +F: drivers/gpu/drm/i2c/tda998x_drv.c +F: include/drm/i2c/tda998x.h + OMAP SUPPORT M: Tony Lindgren <tony@atomide.com> L: linux-omap@vger.kernel.org @@ -8700,17 +8710,17 @@ S: Maintained F: drivers/media/radio/radio-raremono.c THERMAL -M: Zhang Rui <rui.zhang@intel.com> -M: Eduardo Valentin <eduardo.valentin@ti.com> -L: linux-pm@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git -T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git -Q: https://patchwork.kernel.org/project/linux-pm/list/ -S: Supported -F: drivers/thermal/ -F: include/linux/thermal.h -F: include/linux/cpu_cooling.h -F: Documentation/devicetree/bindings/thermal/ +M: Zhang Rui <rui.zhang@intel.com> +M: Eduardo Valentin <eduardo.valentin@ti.com> +L: linux-pm@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git +Q: https://patchwork.kernel.org/project/linux-pm/list/ +S: Supported +F: drivers/thermal/ +F: include/linux/thermal.h +F: include/linux/cpu_cooling.h +F: Documentation/devicetree/bindings/thermal/ THINGM BLINK(1) USB RGB LED DRIVER M: Vivien Didelot <vivien.didelot@savoirfairelinux.com> @@ -9812,7 +9822,7 @@ ZR36067 VIDEO FOR LINUX DRIVER L: mjpeg-users@lists.sourceforge.net L: linux-media@vger.kernel.org W: http://mjpeg.sourceforge.net/driver-zoran/ -T: Mercurial http://linuxtv.org/hg/v4l-dvb +T: hg http://linuxtv.org/hg/v4l-dvb S: Odd Fixes F: drivers/media/pci/zoran/ diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 6b58c1de7577..400c663b21c2 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -282,7 +282,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, #else /* if V-P const for loop, PTAG can be written once outside loop */ if (full_page_op) - write_aux_reg(ARC_REG_DC_PTAG, paddr); + write_aux_reg(aux_tag, paddr); #endif while (num_lines-- > 0) { @@ -296,7 +296,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, write_aux_reg(aux_cmd, vaddr); vaddr += L1_CACHE_BYTES; #else - write_aux_reg(aux, paddr); + write_aux_reg(aux_cmd, paddr); paddr += L1_CACHE_BYTES; #endif } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e25419817791..15949459611f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1578,6 +1578,7 @@ config BL_SWITCHER_DUMMY_IF choice prompt "Memory split" + depends on MMU default VMSPLIT_3G help Select the desired split between kernel and user memory. @@ -1595,6 +1596,7 @@ endchoice config PAGE_OFFSET hex + default PHYS_OFFSET if !MMU default 0x40000000 if VMSPLIT_1G default 0x80000000 if VMSPLIT_2G default 0xC0000000 @@ -1903,6 +1905,7 @@ config XEN depends on ARM && AEABI && OF depends on CPU_V7 && !CPU_V6 depends on !GENERIC_ATOMIC64 + depends on MMU select ARM_PSCI select SWIOTLB_XEN select ARCH_DMA_ADDR_T_64BIT diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index 47279aa96a6a..0714e0334e33 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore @@ -1,4 +1,5 @@ ashldi3.S +bswapsdi2.S font.c lib1funcs.S hyp-stub.S diff --git a/arch/arm/boot/dts/keystone-clocks.dtsi b/arch/arm/boot/dts/keystone-clocks.dtsi index 2363593e1050..ef58d1c24313 100644 --- a/arch/arm/boot/dts/keystone-clocks.dtsi +++ b/arch/arm/boot/dts/keystone-clocks.dtsi @@ -612,7 +612,7 @@ clocks { compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "vcp-3"; - reg = <0x0235000a8 0xb00>, <0x02350060 0x400>; + reg = <0x023500a8 0xb00>, <0x02350060 0x400>; reg-names = "control", "domain"; domain-id = <24>; }; diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 8756e4bcdba0..4afb376d9c7c 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -30,14 +30,15 @@ */ #define UL(x) _AC(x, UL) +/* PAGE_OFFSET - the virtual address of the start of the kernel image */ +#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) + #ifdef CONFIG_MMU /* - * PAGE_OFFSET - the virtual address of the start of the kernel image * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area */ -#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) @@ -104,10 +105,6 @@ #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif -#ifndef PAGE_OFFSET -#define PAGE_OFFSET PLAT_PHYS_OFFSET -#endif - /* * The module can be at any place in ram in nommu mode. */ diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 47cd974e57ea..c96ecacb2021 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -177,6 +177,18 @@ __lookup_processor_type_data: .long __proc_info_end .size __lookup_processor_type_data, . - __lookup_processor_type_data +__error_lpae: +#ifdef CONFIG_DEBUG_LL + adr r0, str_lpae + bl printascii + b __error +str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n" +#else + b __error +#endif + .align +ENDPROC(__error_lpae) + __error_p: #ifdef CONFIG_DEBUG_LL adr r0, str_p1 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 914616e0bdcd..f5f381d91556 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -102,7 +102,7 @@ ENTRY(stext) and r3, r3, #0xf @ extract VMSA support cmp r3, #5 @ long-descriptor translation table format? THUMB( it lo ) @ force fixup-able long branch encoding - blo __error_p @ only classic page table format + blo __error_lpae @ only classic page table format #endif #ifndef CONFIG_XIP_KERNEL diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h index f33679d2d3ee..50e1d850ee2e 100644 --- a/arch/arm/mach-sa1100/include/mach/collie.h +++ b/arch/arm/mach-sa1100/include/mach/collie.h @@ -13,6 +13,8 @@ #ifndef __ASM_ARCH_COLLIE_H #define __ASM_ARCH_COLLIE_H +#include "hardware.h" /* Gives GPIO_MAX */ + extern void locomolcd_power(int on); #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1) diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 2b3a56414271..ef69152f9b52 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -264,6 +264,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) note_page(st, addr, 3, pmd_val(*pmd)); else walk_pte(st, pmd, addr); + + if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) + note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1])); } } diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h index 09c5a0f5f4d1..86648c083bb4 100644 --- a/arch/c6x/include/asm/cache.h +++ b/arch/c6x/include/asm/cache.h @@ -12,6 +12,7 @@ #define _ASM_C6X_CACHE_H #include <linux/irqflags.h> +#include <linux/init.h> /* * Cache line size diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8d4c247f1738..af064d28b365 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1048,6 +1048,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) flush_altivec_to_thread(src); flush_vsx_to_thread(src); flush_spe_to_thread(src); + /* + * Flush TM state out so we can copy it. __switch_to_tm() does this + * flush but it removes the checkpointed state from the current CPU and + * transitions the CPU out of TM mode. Hence we need to call + * tm_recheckpoint_new_task() (on the same task) to restore the + * checkpointed state back and the TM mode. + */ + __switch_to_tm(src); + tm_recheckpoint_new_task(src); *dst = *src; diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index 1482327cfeba..d88736fbece6 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S @@ -81,6 +81,7 @@ _GLOBAL(relocate) 6: blr +.balign 8 p_dyn: .llong __dynamic_start - 0b p_rela: .llong __rela_dyn_start - 0b p_st: .llong _stext - 0b diff --git a/arch/sh/include/cpu-sh2/cpu/cache.h b/arch/sh/include/cpu-sh2/cpu/cache.h index 673515bc4135..aa1b2b9088a7 100644 --- a/arch/sh/include/cpu-sh2/cpu/cache.h +++ b/arch/sh/include/cpu-sh2/cpu/cache.h @@ -18,7 +18,7 @@ #define SH_CACHE_ASSOC 8 #if defined(CONFIG_CPU_SUBTYPE_SH7619) -#define CCR 0xffffffec +#define SH_CCR 0xffffffec #define CCR_CACHE_CE 0x01 /* Cache enable */ #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */ diff --git a/arch/sh/include/cpu-sh2a/cpu/cache.h b/arch/sh/include/cpu-sh2a/cpu/cache.h index defb0baa5a06..b27ce92cb600 100644 --- a/arch/sh/include/cpu-sh2a/cpu/cache.h +++ b/arch/sh/include/cpu-sh2a/cpu/cache.h @@ -17,8 +17,8 @@ #define SH_CACHE_COMBINED 4 #define SH_CACHE_ASSOC 8 -#define CCR 0xfffc1000 /* CCR1 */ -#define CCR2 0xfffc1004 +#define SH_CCR 0xfffc1000 /* CCR1 */ +#define SH_CCR2 0xfffc1004 /* * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not diff --git a/arch/sh/include/cpu-sh3/cpu/cache.h b/arch/sh/include/cpu-sh3/cpu/cache.h index bee2d81c56bf..29700fd88c75 100644 --- a/arch/sh/include/cpu-sh3/cpu/cache.h +++ b/arch/sh/include/cpu-sh3/cpu/cache.h @@ -17,7 +17,7 @@ #define SH_CACHE_COMBINED 4 #define SH_CACHE_ASSOC 8 -#define CCR 0xffffffec /* Address of Cache Control Register */ +#define SH_CCR 0xffffffec /* Address of Cache Control Register */ #define CCR_CACHE_CE 0x01 /* Cache Enable */ #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */ diff --git a/arch/sh/include/cpu-sh4/cpu/cache.h b/arch/sh/include/cpu-sh4/cpu/cache.h index 7bfb9e8b069c..92c4cd119b66 100644 --- a/arch/sh/include/cpu-sh4/cpu/cache.h +++ b/arch/sh/include/cpu-sh4/cpu/cache.h @@ -17,7 +17,7 @@ #define SH_CACHE_COMBINED 4 #define SH_CACHE_ASSOC 8 -#define CCR 0xff00001c /* Address of Cache Control Register */ +#define SH_CCR 0xff00001c /* Address of Cache Control Register */ #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */ diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index ecf83cd158dc..0d7360d549c1 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -112,7 +112,7 @@ static void cache_init(void) unsigned long ccr, flags; jump_to_uncached(); - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); /* * At this point we don't know whether the cache is enabled or not - a @@ -189,7 +189,7 @@ static void cache_init(void) l2_cache_init(); - __raw_writel(flags, CCR); + __raw_writel(flags, SH_CCR); back_to_cached(); } #else diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 115725198038..777e50f33c00 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c @@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter) */ jump_to_uncached(); - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); if ((ccr & CCR_CACHE_ENABLE) == 0) { back_to_cached(); diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index defcf719f2e8..a74259f2f981 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c @@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size) local_irq_save(flags); jump_to_uncached(); - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); ccr |= CCR_CACHE_INVALIDATE; - __raw_writel(ccr, CCR); + __raw_writel(ccr, SH_CCR); back_to_cached(); local_irq_restore(flags); diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 949e2d3138a0..ee87d081259b 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c @@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size) /* If there are too many pages then just blow the cache */ if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { - __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); + __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE, + SH_CCR); } else { for (v = begin; v < end; v += L1_CACHE_BYTES) sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); @@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args) /* I-Cache invalidate */ /* If there are too many pages then just blow the cache */ if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { - __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); + __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE, + SH_CCR); } else { for (v = start; v < end; v += L1_CACHE_BYTES) sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 0e529285b28d..51d8f7f31d1d 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -133,9 +133,9 @@ static void flush_icache_all(void) jump_to_uncached(); /* Flush I-cache */ - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); ccr |= CCR_CACHE_ICI; - __raw_writel(ccr, CCR); + __raw_writel(ccr, SH_CCR); /* * back_to_cached() will take care of the barrier for us, don't add diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c index c0adbee97b5f..24c58b7dc022 100644 --- a/arch/sh/mm/cache-shx3.c +++ b/arch/sh/mm/cache-shx3.c @@ -19,7 +19,7 @@ void __init shx3_cache_init(void) { unsigned int ccr; - ccr = __raw_readl(CCR); + ccr = __raw_readl(SH_CCR); /* * If we've got cache aliases, resolve them in hardware. @@ -40,5 +40,5 @@ void __init shx3_cache_init(void) ccr |= CCR_CACHE_IBE; #endif - writel_uncached(ccr, CCR); + writel_uncached(ccr, SH_CCR); } diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 616966a96cba..097c2cdd117f 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -285,8 +285,8 @@ void __init cpu_cache_init(void) { unsigned int cache_disabled = 0; -#ifdef CCR - cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); +#ifdef SH_CCR + cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE); #endif compute_alias(&boot_cpu_data.icache); diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 3d6b9f81cc68..acd86c850414 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -134,6 +134,7 @@ extern void efi_setup_page_tables(void); extern void __init old_map_region(efi_memory_desc_t *md); extern void __init runtime_code_page_mkexec(void); extern void __init efi_runtime_mkexec(void); +extern void __init efi_apply_memmap_quirks(void); struct efi_setup_data { u64 fw_vendor; diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 81ba27679f18..f36bd42d6f0c 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers) /* This is global to keep gas from relaxing the jumps */ ENTRY(early_idt_handler) cld + + cmpl $2,(%esp) # X86_TRAP_NMI + je is_nmi # Ignore NMI + cmpl $2,%ss:early_recursion_flag je hlt_loop incl %ss:early_recursion_flag @@ -594,8 +598,9 @@ ex_entry: pop %edx pop %ecx pop %eax - addl $8,%esp /* drop vector number and error code */ decl %ss:early_recursion_flag +is_nmi: + addl $8,%esp /* drop vector number and error code */ iret ENDPROC(early_idt_handler) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index e1aabdb314c8..a468c0a65c42 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -343,6 +343,9 @@ early_idt_handlers: ENTRY(early_idt_handler) cld + cmpl $2,(%rsp) # X86_TRAP_NMI + je is_nmi # Ignore NMI + cmpl $2,early_recursion_flag(%rip) jz 1f incl early_recursion_flag(%rip) @@ -405,8 +408,9 @@ ENTRY(early_idt_handler) popq %rdx popq %rcx popq %rax - addq $16,%rsp # drop vector number and error code decl early_recursion_flag(%rip) +is_nmi: + addq $16,%rsp # drop vector number and error code INTERRUPT_RETURN ENDPROC(early_idt_handler) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 06853e670354..ce72964b2f46 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1239,14 +1239,8 @@ void __init setup_arch(char **cmdline_p) register_refined_jiffies(CLOCK_TICK_RATE); #ifdef CONFIG_EFI - /* Once setup is done above, unmap the EFI memory map on - * mismatched firmware/kernel archtectures since there is no - * support for runtime services. - */ - if (efi_enabled(EFI_BOOT) && !efi_is_native()) { - pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); - efi_unmap_memmap(); - } + if (efi_enabled(EFI_BOOT)) + efi_apply_memmap_quirks(); #endif } diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6dea040cc3a1..a10c8c792161 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1020,13 +1020,17 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. + * + * This function must have noinline because both callers + * {,trace_}do_page_fault() have notrace on. Having this an actual function + * guarantees there's a function trace entry. */ -static void __kprobes -__do_page_fault(struct pt_regs *regs, unsigned long error_code) +static void __kprobes noinline +__do_page_fault(struct pt_regs *regs, unsigned long error_code, + unsigned long address) { struct vm_area_struct *vma; struct task_struct *tsk; - unsigned long address; struct mm_struct *mm; int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -1034,9 +1038,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) tsk = current; mm = tsk->mm; - /* Get the faulting address: */ - address = read_cr2(); - /* * Detect and handle instructions that would cause a page fault for * both a tracked kernel page and a userspace page. @@ -1248,32 +1249,50 @@ good_area: up_read(&mm->mmap_sem); } -dotraplinkage void __kprobes +dotraplinkage void __kprobes notrace do_page_fault(struct pt_regs *regs, unsigned long error_code) { + unsigned long address = read_cr2(); /* Get the faulting address */ enum ctx_state prev_state; + /* + * We must have this function tagged with __kprobes, notrace and call + * read_cr2() before calling anything else. To avoid calling any kind + * of tracing machinery before we've observed the CR2 value. + * + * exception_{enter,exit}() contain all sorts of tracepoints. + */ + prev_state = exception_enter(); - __do_page_fault(regs, error_code); + __do_page_fault(regs, error_code, address); exception_exit(prev_state); } -static void trace_page_fault_entries(struct pt_regs *regs, +#ifdef CONFIG_TRACING +static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs, unsigned long error_code) { if (user_mode(regs)) - trace_page_fault_user(read_cr2(), regs, error_code); + trace_page_fault_user(address, regs, error_code); else - trace_page_fault_kernel(read_cr2(), regs, error_code); + trace_page_fault_kernel(address, regs, error_code); } -dotraplinkage void __kprobes +dotraplinkage void __kprobes notrace trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) { + /* + * The exception_enter and tracepoint processing could + * trigger another page faults (user space callchain + * reading) and destroy the original cr2 value, so read + * the faulting address now. + */ + unsigned long address = read_cr2(); enum ctx_state prev_state; prev_state = exception_enter(); - trace_page_fault_entries(regs, error_code); - __do_page_fault(regs, error_code); + trace_page_fault_entries(address, regs, error_code); + __do_page_fault(regs, error_code, address); exception_exit(prev_state); } +#endif /* CONFIG_TRACING */ diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 1a201ac7cef8..b97acecf3fd9 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -52,6 +52,7 @@ #include <asm/tlbflush.h> #include <asm/x86_init.h> #include <asm/rtc.h> +#include <asm/uv/uv.h> #define EFI_DEBUG @@ -1210,3 +1211,22 @@ static int __init parse_efi_cmdline(char *str) return 0; } early_param("efi", parse_efi_cmdline); + +void __init efi_apply_memmap_quirks(void) +{ + /* + * Once setup is done earlier, unmap the EFI memory map on mismatched + * firmware/kernel architectures since there is no support for runtime + * services. + */ + if (!efi_is_native()) { + pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); + efi_unmap_memmap(); + } + + /* + * UV doesn't support the new EFI pagetable mapping yet. + */ + if (is_uv_system()) + set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); +} diff --git a/block/blk-exec.c b/block/blk-exec.c index c68613bb4c79..dbf4502b1d67 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, * be resued after dying flag is set */ if (q->mq_ops) { - blk_mq_insert_request(q, rq, at_head, true); + blk_mq_insert_request(rq, at_head, true, false); return; } diff --git a/block/blk-flush.c b/block/blk-flush.c index 66e2b697f5db..f598f794c3c6 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work) rq = container_of(work, struct request, mq_flush_work); memset(&rq->csd, 0, sizeof(rq->csd)); - blk_mq_run_request(rq, true, false); + blk_mq_insert_request(rq, false, true, false); } static bool blk_flush_queue_rq(struct request *rq) @@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq) if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { if (q->mq_ops) { - blk_mq_run_request(rq, false, true); + blk_mq_insert_request(rq, false, false, true); } else list_add_tail(&rq->queuelist, &q->queue_head); return; diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c index 3146befb56aa..136ef8643bba 100644 --- a/block/blk-mq-cpu.c +++ b/block/blk-mq-cpu.c @@ -11,7 +11,7 @@ #include "blk-mq.h" static LIST_HEAD(blk_mq_cpu_notify_list); -static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); +static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); static int blk_mq_main_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) @@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self, unsigned int cpu = (unsigned long) hcpu; struct blk_mq_cpu_notifier *notify; - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) notify->notify(notify->data, action, cpu); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); return NOTIFY_OK; } @@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) { BUG_ON(!notifier->notify); - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); } void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) { - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_del(¬ifier->list); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); } void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, diff --git a/block/blk-mq.c b/block/blk-mq.c index 1fa9dd153fde..883f72089015 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, set_bit(ctx->index_hw, hctx->ctx_map); } -static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp, - bool reserved) +static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, + gfp_t gfp, bool reserved) { struct request *rq; unsigned int tag; @@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } -static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, - gfp_t gfp, bool reserved) -{ - return blk_mq_alloc_rq(hctx, gfp, reserved); -} - static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, int rw, gfp_t gfp, bool reserved) @@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq) __blk_mq_free_request(hctx, ctx, rq); } -static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error) -{ - if (error) - clear_bit(BIO_UPTODATE, &bio->bi_flags); - else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - error = -EIO; - - if (unlikely(rq->cmd_flags & REQ_QUIET)) - set_bit(BIO_QUIET, &bio->bi_flags); - - /* don't actually finish bio if it's part of flush sequence */ - if (!(rq->cmd_flags & REQ_FLUSH_SEQ)) - bio_endio(bio, error); -} - -void blk_mq_end_io(struct request *rq, int error) +bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) { - struct bio *bio = rq->bio; - unsigned int bytes = 0; - - trace_block_rq_complete(rq->q, rq); - - while (bio) { - struct bio *next = bio->bi_next; - - bio->bi_next = NULL; - bytes += bio->bi_iter.bi_size; - blk_mq_bio_endio(rq, bio, error); - bio = next; - } - - blk_account_io_completion(rq, bytes); + if (blk_update_request(rq, error, blk_rq_bytes(rq))) + return true; blk_account_io_done(rq); @@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error) rq->end_io(rq, error); else blk_mq_free_request(rq); + return false; } -EXPORT_SYMBOL(blk_mq_end_io); +EXPORT_SYMBOL(blk_mq_end_io_partial); static void __blk_mq_complete_request_remote(void *data) { @@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, blk_mq_add_timer(rq); } -void blk_mq_insert_request(struct request_queue *q, struct request *rq, - bool at_head, bool run_queue) +void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, + bool async) { + struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx, *current_ctx; + struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; + + current_ctx = blk_mq_get_ctx(q); + if (!cpu_online(ctx->cpu)) + rq->mq_ctx = ctx = current_ctx; - ctx = rq->mq_ctx; hctx = q->mq_ops->map_queue(q, ctx->cpu); - if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) { + if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && + !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { blk_insert_flush(rq); } else { - current_ctx = blk_mq_get_ctx(q); - - if (!cpu_online(ctx->cpu)) { - ctx = current_ctx; - hctx = q->mq_ops->map_queue(q, ctx->cpu); - rq->mq_ctx = ctx; - } spin_lock(&ctx->lock); __blk_mq_insert_request(hctx, rq, at_head); spin_unlock(&ctx->lock); - - blk_mq_put_ctx(current_ctx); - } - - if (run_queue) - __blk_mq_run_hw_queue(hctx); -} -EXPORT_SYMBOL(blk_mq_insert_request); - -/* - * This is a special version of blk_mq_insert_request to bypass FLUSH request - * check. Should only be used internally. - */ -void blk_mq_run_request(struct request *rq, bool run_queue, bool async) -{ - struct request_queue *q = rq->q; - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx, *current_ctx; - - current_ctx = blk_mq_get_ctx(q); - - ctx = rq->mq_ctx; - if (!cpu_online(ctx->cpu)) { - ctx = current_ctx; - rq->mq_ctx = ctx; } - hctx = q->mq_ops->map_queue(q, ctx->cpu); - - /* ctx->cpu might be offline */ - spin_lock(&ctx->lock); - __blk_mq_insert_request(hctx, rq, false); - spin_unlock(&ctx->lock); blk_mq_put_ctx(current_ctx); @@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); + if (is_sync) + rw |= REQ_SYNC; trace_block_getrq(q, bio, rw); rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); if (likely(rq)) diff --git a/block/blk-mq.h b/block/blk-mq.h index ed0035cd458e..72beba1f9d55 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -23,7 +23,6 @@ struct blk_mq_ctx { }; void __blk_mq_complete_request(struct request *rq); -void blk_mq_run_request(struct request *rq, bool run_queue, bool async); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_init_flush(struct request_queue *q); void blk_mq_drain_queue(struct request_queue *q); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 959d41acc108..d7d32c28829b 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -67,6 +67,8 @@ enum ec_command { #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ +#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query + * when trying to clear the EC */ enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ @@ -116,6 +118,7 @@ EXPORT_SYMBOL(first_ec); static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ /* -------------------------------------------------------------------------- Transaction Management @@ -440,6 +443,29 @@ acpi_handle ec_get_handle(void) EXPORT_SYMBOL(ec_get_handle); +static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); + +/* + * Clears stale _Q events that might have accumulated in the EC. + * Run with locked ec mutex. + */ +static void acpi_ec_clear(struct acpi_ec *ec) +{ + int i, status; + u8 value = 0; + + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { + status = acpi_ec_query_unlocked(ec, &value); + if (status || !value) + break; + } + + if (unlikely(i == ACPI_EC_CLEAR_MAX)) + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); + else + pr_info("%d stale EC events cleared\n", i); +} + void acpi_ec_block_transactions(void) { struct acpi_ec *ec = first_ec; @@ -463,6 +489,10 @@ void acpi_ec_unblock_transactions(void) mutex_lock(&ec->mutex); /* Allow transactions to be carried out again */ clear_bit(EC_FLAGS_BLOCKED, &ec->flags); + + if (EC_FLAGS_CLEAR_ON_RESUME) + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); } @@ -821,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device) /* EC is fully operational, allow queries */ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); + + /* Clear stale _Q events if hardware might require that */ + if (EC_FLAGS_CLEAR_ON_RESUME) { + mutex_lock(&ec->mutex); + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); + } return ret; } @@ -922,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) return 0; } +/* + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) + * + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + * + * Ideally, the EC should also be instructed NOT to accumulate events during + * sleep (which Windows seems to do somehow), but the interface to control this + * behaviour is not known at this time. + * + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, + * however it is very likely that other Samsung models are affected. + * + * On systems which don't accumulate _Q events during sleep, this extra check + * should be harmless. + */ +static int ec_clear_on_resume(const struct dmi_system_id *id) +{ + pr_debug("Detected system needing EC poll on resume.\n"); + EC_FLAGS_CLEAR_ON_RESUME = 1; + return 0; +} + static struct dmi_system_id ec_dmi_table[] __initdata = { { ec_skip_dsdt_scan, "Compal JFL92", { @@ -965,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, + { + ec_clear_on_resume, "Samsung hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, {}, }; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index b7201fc6f1e1..0bdacc5e26a3 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) switch (ares->type) { case ACPI_RESOURCE_TYPE_MEMORY24: memory24 = &ares->data.memory24; + if (!memory24->address_length) + return false; acpi_dev_get_memresource(res, memory24->minimum, memory24->address_length, memory24->write_protect); break; case ACPI_RESOURCE_TYPE_MEMORY32: memory32 = &ares->data.memory32; + if (!memory32->address_length) + return false; acpi_dev_get_memresource(res, memory32->minimum, memory32->address_length, memory32->write_protect); break; case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: fixed_memory32 = &ares->data.fixed_memory32; + if (!fixed_memory32->address_length) + return false; acpi_dev_get_memresource(res, fixed_memory32->address, fixed_memory32->address_length, fixed_memory32->write_protect); @@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) switch (ares->type) { case ACPI_RESOURCE_TYPE_IO: io = &ares->data.io; + if (!io->address_length) + return false; acpi_dev_get_ioresource(res, io->minimum, io->address_length, io->io_decode); break; case ACPI_RESOURCE_TYPE_FIXED_IO: fixed_io = &ares->data.fixed_io; + if (!fixed_io->address_length) + return false; acpi_dev_get_ioresource(res, fixed_io->address, fixed_io->address_length, ACPI_DECODE_10); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1a3dbd1b196e..65d3f1b5966c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ @@ -4225,6 +4226,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* devices that don't properly handle queued TRIM commands */ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT???M500SSD3", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, /* * Some WD SATA-I drives spin up and down erratically when the link diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 8184451b57c0..422b7d84f686 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio) /* Non-zero page count for non-head members of * compound pages is no longer allowed by the kernel. */ - page = compound_trans_head(bv.bv_page); + page = compound_head(bv.bv_page); atomic_inc(&page->_count); } } @@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio) struct bvec_iter iter; bio_for_each_segment(bv, bio, iter) { - page = compound_trans_head(bv.bv_page); + page = compound_head(bv.bv_page); atomic_dec(&page->_count); } } diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index b52e9a6d6aad..54174cb32feb 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -53,7 +53,7 @@ #define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000 /* unaligned IO handling */ -#define MTIP_MAX_UNALIGNED_SLOTS 8 +#define MTIP_MAX_UNALIGNED_SLOTS 2 /* Macro to extract the tag bit number from a tag value. */ #define MTIP_TAG_BIT(tag) (tag & 0x1F) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 011e55d820b1..51c557cfd92b 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -612,6 +612,8 @@ static ssize_t disksize_store(struct device *dev, disksize = PAGE_ALIGN(disksize); meta = zram_meta_alloc(disksize); + if (!meta) + return -ENOMEM; down_write(&zram->init_lock); if (zram->init_done) { up_write(&zram->init_lock); diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index bd313f7816a8..c1af80bcdf20 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -242,7 +242,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc, irq = irq_of_parse_and_map(np, 0); if (!irq) - return; + goto out_free_characteristics; clk = at91_clk_register_master(pmc, irq, name, num_parents, parent_names, layout, diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c index 6a934a5296bd..05e04ce0f148 100644 --- a/drivers/clk/clk-nomadik.c +++ b/drivers/clk/clk-nomadik.c @@ -494,6 +494,9 @@ static const struct file_operations nomadik_src_clk_debugfs_ops = { static int __init nomadik_src_clk_init_debugfs(void) { + /* Vital for multiplatform */ + if (!src_base) + return -ENODEV; src_pcksr0_boot = readl(src_base + SRC_PCKSR0); src_pcksr1_boot = readl(src_base + SRC_PCKSR1); debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO, diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5517944495d8..c42e608af6bb 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2226,24 +2226,25 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister); */ int __clk_get(struct clk *clk) { - if (clk && !try_module_get(clk->owner)) - return 0; + if (clk) { + if (!try_module_get(clk->owner)) + return 0; - kref_get(&clk->ref); + kref_get(&clk->ref); + } return 1; } void __clk_put(struct clk *clk) { - if (WARN_ON_ONCE(IS_ERR(clk))) + if (!clk || WARN_ON_ONCE(IS_ERR(clk))) return; clk_prepare_lock(); kref_put(&clk->ref, __clk_release); clk_prepare_unlock(); - if (clk) - module_put(clk->owner); + module_put(clk->owner); } /*** clk rate change notifiers ***/ diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c index 17a598398a53..86f1e362eafb 100644 --- a/drivers/clk/keystone/gate.c +++ b/drivers/clk/keystone/gate.c @@ -179,6 +179,7 @@ static struct clk *clk_register_psc(struct device *dev, init.name = name; init.ops = &clk_psc_ops; + init.flags = 0; init.parent_names = (parent_name ? &parent_name : NULL); init.num_parents = (parent_name ? 1 : 0); diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c index 81a202d12a7a..bef198a83863 100644 --- a/drivers/clk/mvebu/armada-370.c +++ b/drivers/clk/mvebu/armada-370.c @@ -141,13 +141,6 @@ static const struct coreclk_soc_desc a370_coreclks = { .num_ratios = ARRAY_SIZE(a370_coreclk_ratios), }; -static void __init a370_coreclk_init(struct device_node *np) -{ - mvebu_coreclk_setup(np, &a370_coreclks); -} -CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock", - a370_coreclk_init); - /* * Clock Gating Control */ @@ -168,9 +161,15 @@ static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = { { } }; -static void __init a370_clk_gating_init(struct device_node *np) +static void __init a370_clk_init(struct device_node *np) { - mvebu_clk_gating_setup(np, a370_gating_desc); + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock"); + + mvebu_coreclk_setup(np, &a370_coreclks); + + if (cgnp) + mvebu_clk_gating_setup(cgnp, a370_gating_desc); } -CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock", - a370_clk_gating_init); +CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init); + diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c index 9922c4475aa8..b3094315a3c0 100644 --- a/drivers/clk/mvebu/armada-xp.c +++ b/drivers/clk/mvebu/armada-xp.c @@ -158,13 +158,6 @@ static const struct coreclk_soc_desc axp_coreclks = { .num_ratios = ARRAY_SIZE(axp_coreclk_ratios), }; -static void __init axp_coreclk_init(struct device_node *np) -{ - mvebu_coreclk_setup(np, &axp_coreclks); -} -CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock", - axp_coreclk_init); - /* * Clock Gating Control */ @@ -202,9 +195,14 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = { { } }; -static void __init axp_clk_gating_init(struct device_node *np) +static void __init axp_clk_init(struct device_node *np) { - mvebu_clk_gating_setup(np, axp_gating_desc); + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock"); + + mvebu_coreclk_setup(np, &axp_coreclks); + + if (cgnp) + mvebu_clk_gating_setup(cgnp, axp_gating_desc); } -CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock", - axp_clk_gating_init); +CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init); diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c index 38aee1e3f242..b8c2424ac926 100644 --- a/drivers/clk/mvebu/dove.c +++ b/drivers/clk/mvebu/dove.c @@ -154,12 +154,6 @@ static const struct coreclk_soc_desc dove_coreclks = { .num_ratios = ARRAY_SIZE(dove_coreclk_ratios), }; -static void __init dove_coreclk_init(struct device_node *np) -{ - mvebu_coreclk_setup(np, &dove_coreclks); -} -CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init); - /* * Clock Gating Control */ @@ -186,9 +180,14 @@ static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = { { } }; -static void __init dove_clk_gating_init(struct device_node *np) +static void __init dove_clk_init(struct device_node *np) { - mvebu_clk_gating_setup(np, dove_gating_desc); + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock"); + + mvebu_coreclk_setup(np, &dove_coreclks); + + if (cgnp) + mvebu_clk_gating_setup(cgnp, dove_gating_desc); } -CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock", - dove_clk_gating_init); +CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init); diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c index 2636a55f29f9..ddb666a86500 100644 --- a/drivers/clk/mvebu/kirkwood.c +++ b/drivers/clk/mvebu/kirkwood.c @@ -193,13 +193,6 @@ static const struct coreclk_soc_desc kirkwood_coreclks = { .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), }; -static void __init kirkwood_coreclk_init(struct device_node *np) -{ - mvebu_coreclk_setup(np, &kirkwood_coreclks); -} -CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock", - kirkwood_coreclk_init); - static const struct coreclk_soc_desc mv88f6180_coreclks = { .get_tclk_freq = kirkwood_get_tclk_freq, .get_cpu_freq = mv88f6180_get_cpu_freq, @@ -208,13 +201,6 @@ static const struct coreclk_soc_desc mv88f6180_coreclks = { .num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios), }; -static void __init mv88f6180_coreclk_init(struct device_node *np) -{ - mvebu_coreclk_setup(np, &mv88f6180_coreclks); -} -CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock", - mv88f6180_coreclk_init); - /* * Clock Gating Control */ @@ -239,9 +225,21 @@ static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = { { } }; -static void __init kirkwood_clk_gating_init(struct device_node *np) +static void __init kirkwood_clk_init(struct device_node *np) { - mvebu_clk_gating_setup(np, kirkwood_gating_desc); + struct device_node *cgnp = + of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock"); + + + if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock")) + mvebu_coreclk_setup(np, &mv88f6180_coreclks); + else + mvebu_coreclk_setup(np, &kirkwood_coreclks); + + if (cgnp) + mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc); } -CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock", - kirkwood_clk_gating_init); +CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock", + kirkwood_clk_init); +CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock", + kirkwood_clk_init); diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c index a59ec217a124..99c27b1c625b 100644 --- a/drivers/clk/shmobile/clk-rcar-gen2.c +++ b/drivers/clk/shmobile/clk-rcar-gen2.c @@ -26,6 +26,8 @@ struct rcar_gen2_cpg { void __iomem *reg; }; +#define CPG_FRQCRB 0x00000004 +#define CPG_FRQCRB_KICK BIT(31) #define CPG_SDCKCR 0x00000074 #define CPG_PLL0CR 0x000000d8 #define CPG_FRQCRC 0x000000e0 @@ -45,6 +47,7 @@ struct rcar_gen2_cpg { struct cpg_z_clk { struct clk_hw hw; void __iomem *reg; + void __iomem *kick_reg; }; #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw) @@ -83,17 +86,45 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate, { struct cpg_z_clk *zclk = to_z_clk(hw); unsigned int mult; - u32 val; + u32 val, kick; + unsigned int i; mult = div_u64((u64)rate * 32, parent_rate); mult = clamp(mult, 1U, 32U); + if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK) + return -EBUSY; + val = clk_readl(zclk->reg); val &= ~CPG_FRQCRC_ZFC_MASK; val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT; clk_writel(val, zclk->reg); - return 0; + /* + * Set KICK bit in FRQCRB to update hardware setting and wait for + * clock change completion. + */ + kick = clk_readl(zclk->kick_reg); + kick |= CPG_FRQCRB_KICK; + clk_writel(kick, zclk->kick_reg); + + /* + * Note: There is no HW information about the worst case latency. + * + * Using experimental measurements, it seems that no more than + * ~10 iterations are needed, independently of the CPU rate. + * Since this value might be dependant of external xtal rate, pll1 + * rate or even the other emulation clocks rate, use 1000 as a + * "super" safe value. + */ + for (i = 1000; i; i--) { + if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)) + return 0; + + cpu_relax(); + } + + return -ETIMEDOUT; } static const struct clk_ops cpg_z_clk_ops = { @@ -120,6 +151,7 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg) init.num_parents = 1; zclk->reg = cpg->reg + CPG_FRQCRC; + zclk->kick_reg = cpg->reg + CPG_FRQCRB; zclk->hw.init = &init; clk = clk_register(NULL, &zclk->hw); @@ -186,7 +218,7 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, const char *name) { const struct clk_div_table *table = NULL; - const char *parent_name = "main"; + const char *parent_name; unsigned int shift; unsigned int mult = 1; unsigned int div = 1; @@ -201,23 +233,31 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, * the multiplier value. */ u32 value = clk_readl(cpg->reg + CPG_PLL0CR); + parent_name = "main"; mult = ((value >> 24) & ((1 << 7) - 1)) + 1; } else if (!strcmp(name, "pll1")) { + parent_name = "main"; mult = config->pll1_mult / 2; } else if (!strcmp(name, "pll3")) { + parent_name = "main"; mult = config->pll3_mult; } else if (!strcmp(name, "lb")) { + parent_name = "pll1_div2"; div = cpg_mode & BIT(18) ? 36 : 24; } else if (!strcmp(name, "qspi")) { + parent_name = "pll1_div2"; div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2) - ? 16 : 20; + ? 8 : 10; } else if (!strcmp(name, "sdh")) { + parent_name = "pll1_div2"; table = cpg_sdh_div_table; shift = 8; } else if (!strcmp(name, "sd0")) { + parent_name = "pll1_div2"; table = cpg_sd01_div_table; shift = 4; } else if (!strcmp(name, "sd1")) { + parent_name = "pll1_div2"; table = cpg_sd01_div_table; shift = 0; } else if (!strcmp(name, "z")) { diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c index 4d75b1f37e3a..290f9c1a3749 100644 --- a/drivers/clk/tegra/clk-divider.c +++ b/drivers/clk/tegra/clk-divider.c @@ -59,7 +59,7 @@ static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate, return 0; if (divider_ux1 > get_max_div(divider)) - return -EINVAL; + return get_max_div(divider); return divider_ux1; } diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index cf0c323f2c36..c39613c519af 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -180,9 +180,13 @@ enum clk_id { tegra_clk_sbc6_8, tegra_clk_sclk, tegra_clk_sdmmc1, + tegra_clk_sdmmc1_8, tegra_clk_sdmmc2, + tegra_clk_sdmmc2_8, tegra_clk_sdmmc3, + tegra_clk_sdmmc3_8, tegra_clk_sdmmc4, + tegra_clk_sdmmc4_8, tegra_clk_se, tegra_clk_soc_therm, tegra_clk_sor0, diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 5c35885f4a7c..1fa5c3f33b20 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -371,9 +371,7 @@ static const char *mux_pllp3_pllc_clkm[] = { static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = { "pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m" }; -static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = { - [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6, -}; +#define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = { "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4", @@ -465,6 +463,10 @@ static struct tegra_periph_init_data periph_clks[] = { MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1), MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1), MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2), + MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8), + MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8), + MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8), + MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8), MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8), MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8), MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8), @@ -492,7 +494,7 @@ static struct tegra_periph_init_data periph_clks[] = { UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb), UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc), UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd), - UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte), + UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte), XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src), XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src), XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src), diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c index 05dce4aa2c11..feb3201c85ce 100644 --- a/drivers/clk/tegra/clk-tegra-super-gen4.c +++ b/drivers/clk/tegra/clk-tegra-super-gen4.c @@ -120,7 +120,7 @@ void __init tegra_super_clk_gen4_init(void __iomem *clk_base, ARRAY_SIZE(cclk_lp_parents), CLK_SET_RATE_PARENT, clk_base + CCLKLP_BURST_POLICY, - 0, 4, 8, 9, NULL); + TEGRA_DIVIDER_2, 4, 8, 9, NULL); *dt_clk = clk; } diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 90d9d25f2228..80431f0fb268 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -682,12 +682,12 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true }, [tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true }, [tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true }, - [tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true }, + [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true }, [tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true }, [tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true }, [tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true }, - [tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true }, - [tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true }, + [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true }, + [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true }, [tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true }, [tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true }, [tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true }, @@ -723,7 +723,7 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true }, [tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true }, [tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true }, - [tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true }, + [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true }, [tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true }, [tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true }, [tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true }, diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index aff86b5bc745..166e02f16c8a 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -516,11 +516,11 @@ static struct div_nmp pllp_nmp = { }; static struct tegra_clk_pll_freq_table pll_p_freq_table[] = { - {12000000, 216000000, 432, 12, 1, 8}, - {13000000, 216000000, 432, 13, 1, 8}, - {16800000, 216000000, 360, 14, 1, 8}, - {19200000, 216000000, 360, 16, 1, 8}, - {26000000, 216000000, 432, 26, 1, 8}, + {12000000, 408000000, 408, 12, 0, 8}, + {13000000, 408000000, 408, 13, 0, 8}, + {16800000, 408000000, 340, 14, 0, 8}, + {19200000, 408000000, 340, 16, 0, 8}, + {26000000, 408000000, 408, 26, 0, 8}, {0, 0, 0, 0, 0, 0}, }; @@ -570,6 +570,15 @@ static struct tegra_clk_pll_params pll_a_params = { .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK, }; +static struct div_nmp plld_nmp = { + .divm_shift = 0, + .divm_width = 5, + .divn_shift = 8, + .divn_width = 11, + .divp_shift = 20, + .divp_width = 3, +}; + static struct tegra_clk_pll_freq_table pll_d_freq_table[] = { {12000000, 216000000, 864, 12, 4, 12}, {13000000, 216000000, 864, 13, 4, 12}, @@ -603,19 +612,18 @@ static struct tegra_clk_pll_params pll_d_params = { .lock_mask = PLL_BASE_LOCK, .lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE, .lock_delay = 1000, - .div_nmp = &pllp_nmp, + .div_nmp = &plld_nmp, .freq_table = pll_d_freq_table, .flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON | TEGRA_PLL_USE_LOCK, }; static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = { - { 12000000, 148500000, 99, 1, 8}, - { 12000000, 594000000, 99, 1, 1}, - { 13000000, 594000000, 91, 1, 1}, /* actual: 591.5 MHz */ - { 16800000, 594000000, 71, 1, 1}, /* actual: 596.4 MHz */ - { 19200000, 594000000, 62, 1, 1}, /* actual: 595.2 MHz */ - { 26000000, 594000000, 91, 2, 1}, /* actual: 591.5 MHz */ + { 12000000, 594000000, 99, 1, 2}, + { 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */ + { 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */ + { 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */ + { 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */ { 0, 0, 0, 0, 0, 0 }, }; @@ -753,21 +761,19 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true }, [tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true }, [tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true }, - [tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, + [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true }, [tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true }, [tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true }, [tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true }, - [tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, - [tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, + [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true }, + [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true }, [tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true }, [tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true }, - [tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true }, [tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true }, [tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true }, - [tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true }, [tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true }, [tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true }, - [tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true }, + [tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true }, [tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true }, [tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true }, [tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true }, @@ -794,7 +800,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true }, [tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true }, [tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true }, - [tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true }, + [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true }, [tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true }, [tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true }, [tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true }, @@ -1286,9 +1292,9 @@ static void __init tegra124_pll_init(void __iomem *clk_base, clk_register_clkdev(clk, "pll_d2", NULL); clks[TEGRA124_CLK_PLL_D2] = clk; - /* PLLD2_OUT0 ?? */ + /* PLLD2_OUT0 */ clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2", - CLK_SET_RATE_PARENT, 1, 2); + CLK_SET_RATE_PARENT, 1, 1); clk_register_clkdev(clk, "pll_d2_out0", NULL); clks[TEGRA124_CLK_PLL_D2_OUT0] = clk; diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index dbace152b2fa..dace2b1b5ae6 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -574,6 +574,8 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = { [tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true }, [tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true }, [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true }, + [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true }, + [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true }, }; static unsigned long tegra20_clk_measure_input_freq(void) diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cb003a6b72c8..cf485d928903 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1109,6 +1109,21 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, goto err_set_policy_cpu; } + /* related cpus should atleast have policy->cpus */ + cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); + + /* + * affected cpus must always be the one, which are online. We aren't + * managing offline cpus here. + */ + cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); + + if (!frozen) { + policy->user_policy.min = policy->min; + policy->user_policy.max = policy->max; + } + + down_write(&policy->rwsem); write_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) per_cpu(cpufreq_cpu_data, j) = policy; @@ -1162,20 +1177,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, } } - /* related cpus should atleast have policy->cpus */ - cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); - - /* - * affected cpus must always be the one, which are online. We aren't - * managing offline cpus here. - */ - cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); - - if (!frozen) { - policy->user_policy.min = policy->min; - policy->user_policy.max = policy->max; - } - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_START, policy); @@ -1206,6 +1207,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; } + up_write(&policy->rwsem); kobject_uevent(&policy->kobj, KOBJ_ADD); up_read(&cpufreq_rwsem); @@ -1546,23 +1548,16 @@ static unsigned int __cpufreq_get(unsigned int cpu) */ unsigned int cpufreq_get(unsigned int cpu) { - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); unsigned int ret_freq = 0; - if (cpufreq_disabled() || !cpufreq_driver) - return -ENOENT; - - BUG_ON(!policy); - - if (!down_read_trylock(&cpufreq_rwsem)) - return 0; - - down_read(&policy->rwsem); - - ret_freq = __cpufreq_get(cpu); + if (policy) { + down_read(&policy->rwsem); + ret_freq = __cpufreq_get(cpu); + up_read(&policy->rwsem); - up_read(&policy->rwsem); - up_read(&cpufreq_rwsem); + cpufreq_cpu_put(policy); + } return ret_freq; } diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index de4aa409abe2..2c6d5e118ac1 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data) old->config_rom_retries = 0; fw_notice(card, "rediscovered device %s\n", dev_name(dev)); - PREPARE_DELAYED_WORK(&old->work, fw_device_update); + old->workfn = fw_device_update; fw_schedule_device_work(old, 0); if (current_node == card->root_node) @@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work) if (atomic_cmpxchg(&device->state, FW_DEVICE_INITIALIZING, FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + device->workfn = fw_device_shutdown; fw_schedule_device_work(device, SHUTDOWN_DELAY); } else { fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", @@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work) dev_name(&device->device), fw_rcode_string(ret)); gone: atomic_set(&device->state, FW_DEVICE_GONE); - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + device->workfn = fw_device_shutdown; fw_schedule_device_work(device, SHUTDOWN_DELAY); out: if (node_id == card->root_node->node_id) fw_schedule_bm_work(card, 0); } +static void fw_device_workfn(struct work_struct *work) +{ + struct fw_device *device = container_of(to_delayed_work(work), + struct fw_device, work); + device->workfn(work); +} + void fw_node_event(struct fw_card *card, struct fw_node *node, int event) { struct fw_device *device; @@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) * power-up after getting plugged in. We schedule the * first config rom scan half a second after bus reset. */ - INIT_DELAYED_WORK(&device->work, fw_device_init); + device->workfn = fw_device_init; + INIT_DELAYED_WORK(&device->work, fw_device_workfn); fw_schedule_device_work(device, INITIAL_DELAY); break; @@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) if (atomic_cmpxchg(&device->state, FW_DEVICE_RUNNING, FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { - PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); + device->workfn = fw_device_refresh; fw_schedule_device_work(device, device->is_local ? 0 : INITIAL_DELAY); } @@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) smp_wmb(); /* update node_id before generation */ device->generation = card->generation; if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { - PREPARE_DELAYED_WORK(&device->work, fw_device_update); + device->workfn = fw_device_update; fw_schedule_device_work(device, 0); } break; @@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) device = node->data; if (atomic_xchg(&device->state, FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + device->workfn = fw_device_shutdown; fw_schedule_device_work(device, list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); } diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 6b895986dc22..4af0a7bad7f2 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode, if (rcode == RCODE_COMPLETE) { fwnet_transmit_packet_done(ptask); } else { - fwnet_transmit_packet_failed(ptask); - if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { dev_err(&ptask->dev->netdev->dev, "fwnet_write_complete failed: %x (skipped %d)\n", @@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode, errors_skipped = 0; last_rcode = rcode; - } else + } else { errors_skipped++; + } + fwnet_transmit_packet_failed(ptask); } } diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 6f74d8d3f700..8db663219560 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define QUIRK_NO_MSI 0x10 #define QUIRK_TI_SLLZ059 0x20 #define QUIRK_IR_WAKE 0x40 -#define QUIRK_PHY_LCTRL_TIMEOUT 0x80 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { @@ -303,10 +302,7 @@ static const struct { QUIRK_BE_HEADERS}, {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, - QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI}, - - {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID, - QUIRK_PHY_LCTRL_TIMEOUT}, + QUIRK_NO_MSI}, {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, QUIRK_RESET_PACKET}, @@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" ", disable MSI = " __stringify(QUIRK_NO_MSI) ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) - ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT) ")"); #define OHCI_PARAM_DEBUG_AT_AR 1 @@ -2299,9 +2294,6 @@ static int ohci_enable(struct fw_card *card, * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but * cannot actually use the phy at that time. These need tens of * millisecods pause between LPS write and first phy access too. - * - * But do not wait for 50msec on Agere/LSI cards. Their phy - * arbitration state machine may time out during such a long wait. */ reg_write(ohci, OHCI1394_HCControlSet, @@ -2309,11 +2301,8 @@ static int ohci_enable(struct fw_card *card, OHCI1394_HCControl_postedWriteEnable); flush_writes(ohci); - if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT)) + for (lps = 0, i = 0; !lps && i < 3; i++) { msleep(50); - - for (lps = 0, i = 0; !lps && i < 150; i++) { - msleep(1); lps = reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS; } diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 281029daf98c..7aef911fdc71 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -146,6 +146,7 @@ struct sbp2_logical_unit { */ int generation; int retries; + work_func_t workfn; struct delayed_work work; bool has_sdev; bool blocked; @@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work) /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ sbp2_set_busy_timeout(lu); - PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); + lu->workfn = sbp2_reconnect; sbp2_agent_reset(lu); /* This was a re-login. */ @@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work) * If a bus reset happened, sbp2_update will have requeued * lu->work already. Reset the work from reconnect to login. */ - PREPARE_DELAYED_WORK(&lu->work, sbp2_login); + lu->workfn = sbp2_login; } static void sbp2_reconnect(struct work_struct *work) @@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work) lu->retries++ >= 5) { dev_err(tgt_dev(tgt), "failed to reconnect\n"); lu->retries = 0; - PREPARE_DELAYED_WORK(&lu->work, sbp2_login); + lu->workfn = sbp2_login; } sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); @@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work) sbp2_conditionally_unblock(lu); } +static void sbp2_lu_workfn(struct work_struct *work) +{ + struct sbp2_logical_unit *lu = container_of(to_delayed_work(work), + struct sbp2_logical_unit, work); + lu->workfn(work); +} + static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) { struct sbp2_logical_unit *lu; @@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) lu->blocked = false; ++tgt->dont_block; INIT_LIST_HEAD(&lu->orb_list); - INIT_DELAYED_WORK(&lu->work, sbp2_login); + lu->workfn = sbp2_login; + INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn); list_add_tail(&lu->link, &tgt->lu_list); return 0; diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index acf3a36c9ebc..32982da82694 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev, { struct armada_private *priv = dev->dev_private; - /* - * Yes, we really must jump through these hoops just to store a - * _pointer_ to something into the kfifo. This is utterly insane - * and idiotic, because it kfifo requires the _data_ pointed to by - * the pointer const, not the pointer itself. Not only that, but - * you have to pass a pointer _to_ the pointer you want stored. - */ - const struct drm_framebuffer *silly_api_alert = fb; - WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert)); + WARN_ON(!kfifo_put(&priv->fb_unref, fb)); schedule_work(&priv->fb_unref_work); } diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig index c8fcf12019f0..5f8b0c2b9a44 100644 --- a/drivers/gpu/drm/bochs/Kconfig +++ b/drivers/gpu/drm/bochs/Kconfig @@ -2,6 +2,7 @@ config DRM_BOCHS tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" depends on DRM && PCI select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 04f1f02c4019..ec7bb0fc71bc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -403,7 +403,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist); void intel_detect_pch(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct pci_dev *pch; + struct pci_dev *pch = NULL; /* In all current cases, num_pipes is equivalent to the PCH_NOP setting * (which really amounts to a PCH but no South Display). @@ -424,12 +424,9 @@ void intel_detect_pch(struct drm_device *dev) * all the ISA bridge devices and check for the first match, instead * of only checking the first one. */ - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); - while (pch) { - struct pci_dev *curr = pch; + while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { if (pch->vendor == PCI_VENDOR_ID_INTEL) { - unsigned short id; - id = pch->device & INTEL_PCH_DEVICE_ID_MASK; + unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; dev_priv->pch_id = id; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { @@ -461,18 +458,16 @@ void intel_detect_pch(struct drm_device *dev) DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev)); WARN_ON(!IS_ULT(dev)); - } else { - goto check_next; - } - pci_dev_put(pch); + } else + continue; + break; } -check_next: - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr); - pci_dev_put(curr); } if (!pch) - DRM_DEBUG_KMS("No PCH found?\n"); + DRM_DEBUG_KMS("No PCH found.\n"); + + pci_dev_put(pch); } bool i915_semaphore_is_enabled(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 1a24e84f2315..d58b4e287e32 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, "Graphics Stolen Memory"); if (r == NULL) { - DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", - base, base + (uint32_t)dev_priv->gtt.stolen_size); - base = 0; + /* + * One more attempt but this time requesting region from + * base + 1, as we have seen that this resolves the region + * conflict with the PCI Bus. + * This is a BIOS w/a: Some BIOS wrap stolen in the root + * PCI bus, but have an off-by-one error. Hence retry the + * reservation starting from 1 instead of 0. + */ + r = devm_request_mem_region(dev->dev, base + 1, + dev_priv->gtt.stolen_size - 1, + "Graphics Stolen Memory"); + if (r == NULL) { + DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", + base, base + (uint32_t)dev_priv->gtt.stolen_size); + base = 0; + } } return base; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4c1672809493..9b8a7c7ea7fc 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1092,12 +1092,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv, struct drm_device *dev = dev_priv->dev; bool cur_state; - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; - else if (IS_845G(dev) || IS_I865G(dev)) + if (IS_845G(dev) || IS_I865G(dev)) cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; - else + else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; + else + cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; WARN(cur_state != state, "cursor on pipe %c assertion failure (expected %s, current %s)\n", diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 6db0d9d17f47..ee3181ebcc92 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -845,7 +845,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi) { struct drm_device *dev = intel_hdmi_to_dev(hdmi); - if (IS_G4X(dev)) + if (!hdmi->has_hdmi_sink || IS_G4X(dev)) return 165000; else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) return 300000; @@ -899,8 +899,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, * outputs. We also need to check that the higher clock still fits * within limits. */ - if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit - && HAS_PCH_SPLIT(dev)) { + if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && + clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); desired_bpp = 12*3; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 350de359123a..079ea38f14d9 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -698,7 +698,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector) freq /= 0xff; ctl = freq << 17; - if (IS_GEN2(dev) && panel->backlight.combination_mode) + if (panel->backlight.combination_mode) ctl |= BLM_LEGACY_MODE; if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) ctl |= BLM_POLARITY_PNV; @@ -979,7 +979,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector) ctl = I915_READ(BLC_PWM_CTL); - if (IS_GEN2(dev)) + if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev)) panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; if (IS_PINEVIEW(dev)) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d77cc81900f9..e1fc35a72656 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3493,6 +3493,8 @@ static void valleyview_setup_pctx(struct drm_device *dev) u32 pcbr; int pctx_size = 24*1024; + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + pcbr = I915_READ(VLV_PCBR); if (pcbr) { /* BIOS set it up already, grab the pre-alloc'd space */ @@ -3542,8 +3544,6 @@ static void valleyview_enable_rps(struct drm_device *dev) I915_WRITE(GTFIFODBG, gtfifodbg); } - valleyview_setup_pctx(dev); - /* If VLV, Forcewake all wells, else re-direct to regular path */ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); @@ -4395,6 +4395,8 @@ void intel_enable_gt_powersave(struct drm_device *dev) ironlake_enable_rc6(dev); intel_init_emon(dev); } else if (IS_GEN6(dev) || IS_GEN7(dev)) { + if (IS_VALLEYVIEW(dev)) + valleyview_setup_pctx(dev); /* * PCU communication is slow and this doesn't need to be * done at any specific time, so do this out of our fast path diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 2cec2ab02f80..607dc14d195e 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1314,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t } if (is_dp) args.v5.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v5.ucLaneNum = 8; else args.v5.ucLaneNum = 4; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e6419ca7cd37..e22be8458d92 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width) } /** - * cik_select_se_sh - select which SE, SH to address + * cik_get_rb_disabled - computes the mask of disabled RBs * * @rdev: radeon_device pointer * @max_rb_num: max RBs (render backends) for the asic @@ -7902,7 +7902,8 @@ int cik_resume(struct radeon_device *rdev) /* init golden registers */ cik_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = cik_startup(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 8a2c010b7dc5..27b0ff16082e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5299,7 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev) /* init golden registers */ evergreen_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = evergreen_startup(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h index 76ada8cfe902..3a03ba37d043 100644 --- a/drivers/gpu/drm/radeon/evergreen_smc.h +++ b/drivers/gpu/drm/radeon/evergreen_smc.h @@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 -#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0 +#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8 #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ea932ac66fc6..bf6300cfd62d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2105,7 +2105,8 @@ int cayman_resume(struct radeon_device *rdev) /* init golden registers */ ni_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = cayman_startup(rdev); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index ef024ce3f7cc..3cc78bb66042 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3942,8 +3942,6 @@ int r100_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r100_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 7c63ef840e86..0b658b34b33a 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1430,8 +1430,6 @@ int r300_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r300_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 3768aab2710b..802b19220a21 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -325,8 +325,6 @@ int r420_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r420_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index e209eb75024f..98d6053c36c6 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -240,8 +240,6 @@ int r520_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r520_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index cdbc4171fe73..647ef4079217 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2968,7 +2968,8 @@ int r600_resume(struct radeon_device *rdev) /* post card */ atom_asic_init(rdev->mode_info.atom_context); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = r600_startup(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b012cbbc3ed5..044bc98fb459 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1521,13 +1521,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (r) DRM_ERROR("ib ring test failed (%d).\n", r); - if (rdev->pm.dpm_enabled) { + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { /* do dpm late init */ r = radeon_pm_late_init(rdev); if (r) { rdev->pm.dpm_enabled = false; DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); } + } else { + /* resume old pm late */ + radeon_pm_resume(rdev); } radeon_restore_bios_scratch_regs(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 77f5b0c3edb8..040a2a10ea17 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -714,6 +714,9 @@ int radeon_ttm_init(struct radeon_device *rdev) DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } + /* Change the size here instead of the init above so only lpfn is affected */ + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->stollen_vga_memory); @@ -935,7 +938,7 @@ static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, while (size) { loff_t p = *pos / PAGE_SIZE; unsigned off = *pos & ~PAGE_MASK; - ssize_t cur_size = min(size, PAGE_SIZE - off); + size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); struct page *page; void *ptr; diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index b5c2369cda2f..130d5cc50d43 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -474,8 +474,6 @@ int rs400_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs400_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index fdcde7693032..72d3616de08e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -1048,8 +1048,6 @@ int rs600_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs600_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 35950738bd5e..3462b64369bf 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -756,8 +756,6 @@ int rs690_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs690_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 98e8138ff779..237dd29d9f1c 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -586,8 +586,6 @@ int rv515_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rv515_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4e37a42305d8..fef310773aad 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1811,7 +1811,8 @@ int rv770_resume(struct radeon_device *rdev) /* init golden registers */ rv770_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = rv770_startup(rdev); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 83578324e5d1..9a124d0608b3 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6618,7 +6618,8 @@ int si_resume(struct radeon_device *rdev) /* init golden registers */ si_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = si_startup(rdev); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e81c5547e647..f9c12e92fdd6 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -53,8 +53,8 @@ #include "user.h" #define DRV_NAME MLX4_IB_DRV_NAME -#define DRV_VERSION "1.0" -#define DRV_RELDATE "April 4, 2008" +#define DRV_VERSION "2.2-1" +#define DRV_RELDATE "Feb 2014" #define MLX4_IB_FLOW_MAX_PRIO 0xFFF #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index aa03e732b6a8..bf900579ac08 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -46,8 +46,8 @@ #include "mlx5_ib.h" #define DRIVER_NAME "mlx5_ib" -#define DRIVER_VERSION "1.0" -#define DRIVER_RELDATE "June 2013" +#define DRIVER_VERSION "2.2-1" +#define DRIVER_RELDATE "Feb 2014" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index d18d08a076e8..8ee228e9ab5a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -492,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) isert_conn->state = ISER_CONN_INIT; INIT_LIST_HEAD(&isert_conn->conn_accept_node); init_completion(&isert_conn->conn_login_comp); - init_waitqueue_head(&isert_conn->conn_wait); - init_waitqueue_head(&isert_conn->conn_wait_comp_err); + init_completion(&isert_conn->conn_wait); + init_completion(&isert_conn->conn_wait_comp_err); kref_init(&isert_conn->conn_kref); kref_get(&isert_conn->conn_kref); mutex_init(&isert_conn->conn_mutex); - mutex_init(&isert_conn->conn_comp_mutex); spin_lock_init(&isert_conn->conn_lock); cma_id->context = isert_conn; @@ -688,11 +687,11 @@ isert_disconnect_work(struct work_struct *work) pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); mutex_lock(&isert_conn->conn_mutex); - isert_conn->state = ISER_CONN_DOWN; + if (isert_conn->state == ISER_CONN_UP) + isert_conn->state = ISER_CONN_TERMINATING; if (isert_conn->post_recv_buf_count == 0 && atomic_read(&isert_conn->post_send_buf_count) == 0) { - pr_debug("Calling wake_up(&isert_conn->conn_wait);\n"); mutex_unlock(&isert_conn->conn_mutex); goto wake_up; } @@ -712,7 +711,7 @@ isert_disconnect_work(struct work_struct *work) mutex_unlock(&isert_conn->conn_mutex); wake_up: - wake_up(&isert_conn->conn_wait); + complete(&isert_conn->conn_wait); isert_put_conn(isert_conn); } @@ -888,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. */ - mutex_lock(&isert_conn->conn_comp_mutex); - if (coalesce && + mutex_lock(&isert_conn->conn_mutex); + if (coalesce && isert_conn->state == ISER_CONN_UP && ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { + tx_desc->llnode_active = true; llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); - mutex_unlock(&isert_conn->conn_comp_mutex); + mutex_unlock(&isert_conn->conn_mutex); return; } isert_conn->conn_comp_batch = 0; tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); - mutex_unlock(&isert_conn->conn_comp_mutex); + mutex_unlock(&isert_conn->conn_mutex); send_wr->send_flags = IB_SEND_SIGNALED; } @@ -1464,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) case ISCSI_OP_SCSI_CMD: spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); if (cmd->data_direction == DMA_TO_DEVICE) @@ -1476,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) case ISCSI_OP_SCSI_TMFUNC: spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); transport_generic_free_cmd(&cmd->se_cmd, 0); @@ -1486,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) case ISCSI_OP_TEXT: spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); /* @@ -1549,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, iscsit_stop_dataout_timer(cmd); device->unreg_rdma_mem(isert_cmd, isert_conn); cmd->write_data_done = wr->cur_rdma_length; + wr->send_wr_num = 0; pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); spin_lock_bh(&cmd->istate_lock); @@ -1589,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work) pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); /* * Call atomic_dec(&isert_conn->post_send_buf_count) - * from isert_free_conn() + * from isert_wait_conn() */ isert_conn->logout_posted = true; iscsit_logout_post_handler(cmd, cmd->conn); @@ -1613,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) { struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || cmd->i_state == ISTATE_SEND_LOGOUTRSP || @@ -1624,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc, queue_work(isert_comp_wq, &isert_cmd->comp_work); return; } - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; isert_completion_put(tx_desc, isert_cmd, ib_dev); @@ -1662,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc, case ISER_IB_RDMA_READ: pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); isert_completion_rdma_read(tx_desc, isert_cmd); break; default: @@ -1691,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc, } static void -isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) +isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev) +{ + struct llist_node *llnode; + struct isert_rdma_wr *wr; + struct iser_tx_desc *t; + + mutex_lock(&isert_conn->conn_mutex); + llnode = llist_del_all(&isert_conn->conn_comp_llist); + isert_conn->conn_comp_batch = 0; + mutex_unlock(&isert_conn->conn_mutex); + + while (llnode) { + t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); + llnode = llist_next(llnode); + wr = &t->isert_cmd->rdma_wr; + + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + isert_completion_put(t, t->isert_cmd, ib_dev); + } +} + +static void +isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct isert_cmd *isert_cmd = tx_desc->isert_cmd; + struct llist_node *llnode = tx_desc->comp_llnode_batch; + struct isert_rdma_wr *wr; + struct iser_tx_desc *t; - if (tx_desc) { - struct isert_cmd *isert_cmd = tx_desc->isert_cmd; + while (llnode) { + t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); + llnode = llist_next(llnode); + wr = &t->isert_cmd->rdma_wr; - if (!isert_cmd) - isert_unmap_tx_desc(tx_desc, ib_dev); - else - isert_completion_put(tx_desc, isert_cmd, ib_dev); + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + isert_completion_put(t, t->isert_cmd, ib_dev); } + tx_desc->comp_llnode_batch = NULL; - if (isert_conn->post_recv_buf_count == 0 && - atomic_read(&isert_conn->post_send_buf_count) == 0) { - pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); - pr_debug("Calling wake_up from isert_cq_comp_err\n"); + if (!isert_cmd) + isert_unmap_tx_desc(tx_desc, ib_dev); + else + isert_completion_put(tx_desc, isert_cmd, ib_dev); +} - mutex_lock(&isert_conn->conn_mutex); - if (isert_conn->state != ISER_CONN_DOWN) - isert_conn->state = ISER_CONN_TERMINATING; - mutex_unlock(&isert_conn->conn_mutex); +static void +isert_cq_rx_comp_err(struct isert_conn *isert_conn) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct iscsi_conn *conn = isert_conn->conn; - wake_up(&isert_conn->conn_wait_comp_err); + if (isert_conn->post_recv_buf_count) + return; + + isert_cq_drain_comp_llist(isert_conn, ib_dev); + + if (conn->sess) { + target_sess_cmd_list_set_waiting(conn->sess->se_sess); + target_wait_for_sess_cmds(conn->sess->se_sess); } + + while (atomic_read(&isert_conn->post_send_buf_count)) + msleep(3000); + + mutex_lock(&isert_conn->conn_mutex); + isert_conn->state = ISER_CONN_DOWN; + mutex_unlock(&isert_conn->conn_mutex); + + complete(&isert_conn->conn_wait_comp_err); } static void @@ -1740,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work) pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); pr_debug("TX wc.status: 0x%08x\n", wc.status); pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); - atomic_dec(&isert_conn->post_send_buf_count); - isert_cq_comp_err(tx_desc, isert_conn); + + if (wc.wr_id != ISER_FASTREG_LI_WRID) { + if (tx_desc->llnode_active) + continue; + + atomic_dec(&isert_conn->post_send_buf_count); + isert_cq_tx_comp_err(tx_desc, isert_conn); + } } } @@ -1784,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work) wc.vendor_err); } isert_conn->post_recv_buf_count--; - isert_cq_comp_err(NULL, isert_conn); + isert_cq_rx_comp_err(isert_conn); } } @@ -2202,6 +2255,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, if (!fr_desc->valid) { memset(&inv_wr, 0, sizeof(inv_wr)); + inv_wr.wr_id = ISER_FASTREG_LI_WRID; inv_wr.opcode = IB_WR_LOCAL_INV; inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; wr = &inv_wr; @@ -2212,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, /* Prepare FASTREG WR */ memset(&fr_wr, 0, sizeof(fr_wr)); + fr_wr.wr_id = ISER_FASTREG_LI_WRID; fr_wr.opcode = IB_WR_FAST_REG_MR; fr_wr.wr.fast_reg.iova_start = fr_desc->data_frpl->page_list[0] + page_off; @@ -2377,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_init_send_wr(isert_conn, isert_cmd, &isert_cmd->tx_desc.send_wr, true); - atomic_inc(&isert_conn->post_send_buf_count); + atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); if (rc) { pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); } pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", isert_cmd); @@ -2410,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) return rc; } - atomic_inc(&isert_conn->post_send_buf_count); + atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); if (rc) { pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); } pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", isert_cmd); @@ -2702,22 +2757,11 @@ isert_free_np(struct iscsi_np *np) kfree(isert_np); } -static int isert_check_state(struct isert_conn *isert_conn, int state) -{ - int ret; - - mutex_lock(&isert_conn->conn_mutex); - ret = (isert_conn->state == state); - mutex_unlock(&isert_conn->conn_mutex); - - return ret; -} - -static void isert_free_conn(struct iscsi_conn *conn) +static void isert_wait_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; - pr_debug("isert_free_conn: Starting \n"); + pr_debug("isert_wait_conn: Starting \n"); /* * Decrement post_send_buf_count for special case when called * from isert_do_control_comp() -> iscsit_logout_post_handler() @@ -2727,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn) atomic_dec(&isert_conn->post_send_buf_count); if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { - pr_debug("Calling rdma_disconnect from isert_free_conn\n"); + pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); rdma_disconnect(isert_conn->conn_cm_id); } /* * Only wait for conn_wait_comp_err if the isert_conn made it * into full feature phase.. */ - if (isert_conn->state == ISER_CONN_UP) { - pr_debug("isert_free_conn: Before wait_event comp_err %d\n", - isert_conn->state); - mutex_unlock(&isert_conn->conn_mutex); - - wait_event(isert_conn->conn_wait_comp_err, - (isert_check_state(isert_conn, ISER_CONN_TERMINATING))); - - wait_event(isert_conn->conn_wait, - (isert_check_state(isert_conn, ISER_CONN_DOWN))); - - isert_put_conn(isert_conn); - return; - } if (isert_conn->state == ISER_CONN_INIT) { mutex_unlock(&isert_conn->conn_mutex); - isert_put_conn(isert_conn); return; } - pr_debug("isert_free_conn: wait_event conn_wait %d\n", - isert_conn->state); + if (isert_conn->state == ISER_CONN_UP) + isert_conn->state = ISER_CONN_TERMINATING; mutex_unlock(&isert_conn->conn_mutex); - wait_event(isert_conn->conn_wait, - (isert_check_state(isert_conn, ISER_CONN_DOWN))); + wait_for_completion(&isert_conn->conn_wait_comp_err); + + wait_for_completion(&isert_conn->conn_wait); +} + +static void isert_free_conn(struct iscsi_conn *conn) +{ + struct isert_conn *isert_conn = conn->context; isert_put_conn(isert_conn); } @@ -2771,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = { .iscsit_setup_np = isert_setup_np, .iscsit_accept_np = isert_accept_np, .iscsit_free_np = isert_free_np, + .iscsit_wait_conn = isert_wait_conn, .iscsit_free_conn = isert_free_conn, .iscsit_get_login_rx = isert_get_login_rx, .iscsit_put_login_tx = isert_put_login_tx, diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 708a069002f3..f6ae7f5dd408 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -6,6 +6,7 @@ #define ISERT_RDMA_LISTEN_BACKLOG 10 #define ISCSI_ISER_SG_TABLESIZE 256 +#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL enum isert_desc_type { ISCSI_TX_CONTROL, @@ -45,6 +46,7 @@ struct iser_tx_desc { struct isert_cmd *isert_cmd; struct llist_node *comp_llnode_batch; struct llist_node comp_llnode; + bool llnode_active; struct ib_send_wr send_wr; } __packed; @@ -116,8 +118,8 @@ struct isert_conn { struct isert_device *conn_device; struct work_struct conn_logout_work; struct mutex conn_mutex; - wait_queue_head_t conn_wait; - wait_queue_head_t conn_wait_comp_err; + struct completion conn_wait; + struct completion conn_wait_comp_err; struct kref conn_kref; struct list_head conn_fr_pool; int conn_fr_pool_size; @@ -126,7 +128,6 @@ struct isert_conn { #define ISERT_COMP_BATCH_COUNT 8 int conn_comp_batch; struct llist_head conn_comp_llist; - struct mutex conn_comp_mutex; }; #define ISERT_MAX_CQ 64 diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 9a06fe883766..95ad936e6048 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -254,16 +254,6 @@ config DM_THIN_PROVISIONING ---help--- Provides thin provisioning and snapshots that share a data store. -config DM_DEBUG_BLOCK_STACK_TRACING - boolean "Keep stack trace of persistent data block lock holders" - depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA - select STACKTRACE - ---help--- - Enable this for messages that may help debug problems with the - block manager locking used by thin provisioning and caching. - - If unsure, say N. - config DM_CACHE tristate "Cache target (EXPERIMENTAL)" depends on BLK_DEV_DM diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 1e018e986610..0e385e40909e 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -872,7 +872,7 @@ static void mq_destroy(struct dm_cache_policy *p) { struct mq_policy *mq = to_mq_policy(p); - kfree(mq->table); + vfree(mq->table); epool_exit(&mq->cache_pool); epool_exit(&mq->pre_cache_pool); kfree(mq); @@ -1245,7 +1245,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); mq->hash_bits = ffs(mq->nr_buckets) - 1; - mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); + mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets); if (!mq->table) goto bad_alloc_table; diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index afc3d017de4c..d6e88178d22c 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -546,6 +546,9 @@ static int read_exceptions(struct pstore *ps, r = insert_exceptions(ps, area, callback, callback_context, &full); + if (!full) + memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); + dm_bufio_release(bp); dm_bufio_forget(client, chunk); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index baa87ff12816..fb9efc829182 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -76,7 +76,7 @@ #define THIN_SUPERBLOCK_MAGIC 27022010 #define THIN_SUPERBLOCK_LOCATION 0 -#define THIN_VERSION 1 +#define THIN_VERSION 2 #define THIN_METADATA_CACHE_SIZE 64 #define SECTOR_TO_BLOCK_SHIFT 3 @@ -1755,3 +1755,38 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, return r; } + +int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) +{ + int r; + struct dm_block *sblock; + struct thin_disk_superblock *disk_super; + + down_write(&pmd->root_lock); + pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG; + + r = superblock_lock(pmd, &sblock); + if (r) { + DMERR("couldn't read superblock"); + goto out; + } + + disk_super = dm_block_data(sblock); + disk_super->flags = cpu_to_le32(pmd->flags); + + dm_bm_unlock(sblock); +out: + up_write(&pmd->root_lock); + return r; +} + +bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd) +{ + bool needs_check; + + down_read(&pmd->root_lock); + needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG; + up_read(&pmd->root_lock); + + return needs_check; +} diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 82ea384d36ff..e3c857db195a 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -25,6 +25,11 @@ /*----------------------------------------------------------------*/ +/* + * Thin metadata superblock flags. + */ +#define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0) + struct dm_pool_metadata; struct dm_thin_device; @@ -202,6 +207,12 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, dm_sm_threshold_fn fn, void *context); +/* + * Updates the superblock immediately. + */ +int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd); +bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd); + /*----------------------------------------------------------------*/ #endif diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 7e84baccf0ad..be70d38745f7 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -130,10 +130,11 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, struct dm_thin_new_mapping; /* - * The pool runs in 3 modes. Ordered in degraded order for comparisons. + * The pool runs in 4 modes. Ordered in degraded order for comparisons. */ enum pool_mode { PM_WRITE, /* metadata may be changed */ + PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ PM_READ_ONLY, /* metadata may not be changed */ PM_FAIL, /* all I/O fails */ }; @@ -198,7 +199,6 @@ struct pool { }; static enum pool_mode get_pool_mode(struct pool *pool); -static void out_of_data_space(struct pool *pool); static void metadata_operation_failed(struct pool *pool, const char *op, int r); /* @@ -226,6 +226,7 @@ struct thin_c { struct pool *pool; struct dm_thin_device *td; + bool requeue_mode:1; }; /*----------------------------------------------------------------*/ @@ -369,14 +370,18 @@ struct dm_thin_endio_hook { struct dm_thin_new_mapping *overwrite_mapping; }; -static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) +static void requeue_bio_list(struct thin_c *tc, struct bio_list *master) { struct bio *bio; struct bio_list bios; + unsigned long flags; bio_list_init(&bios); + + spin_lock_irqsave(&tc->pool->lock, flags); bio_list_merge(&bios, master); bio_list_init(master); + spin_unlock_irqrestore(&tc->pool->lock, flags); while ((bio = bio_list_pop(&bios))) { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); @@ -391,12 +396,26 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) static void requeue_io(struct thin_c *tc) { struct pool *pool = tc->pool; + + requeue_bio_list(tc, &pool->deferred_bios); + requeue_bio_list(tc, &pool->retry_on_resume_list); +} + +static void error_retry_list(struct pool *pool) +{ + struct bio *bio; unsigned long flags; + struct bio_list bios; + + bio_list_init(&bios); spin_lock_irqsave(&pool->lock, flags); - __requeue_bio_list(tc, &pool->deferred_bios); - __requeue_bio_list(tc, &pool->retry_on_resume_list); + bio_list_merge(&bios, &pool->retry_on_resume_list); + bio_list_init(&pool->retry_on_resume_list); spin_unlock_irqrestore(&pool->lock, flags); + + while ((bio = bio_list_pop(&bios))) + bio_io_error(bio); } /* @@ -925,13 +944,15 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) } } +static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); + static int alloc_data_block(struct thin_c *tc, dm_block_t *result) { int r; dm_block_t free_blocks; struct pool *pool = tc->pool; - if (get_pool_mode(pool) != PM_WRITE) + if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) return -EINVAL; r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); @@ -958,7 +979,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) } if (!free_blocks) { - out_of_data_space(pool); + set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); return -ENOSPC; } } @@ -988,15 +1009,32 @@ static void retry_on_resume(struct bio *bio) spin_unlock_irqrestore(&pool->lock, flags); } -static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) +static bool should_error_unserviceable_bio(struct pool *pool) { - /* - * When pool is read-only, no cell locking is needed because - * nothing is changing. - */ - WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY); + enum pool_mode m = get_pool_mode(pool); + + switch (m) { + case PM_WRITE: + /* Shouldn't get here */ + DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); + return true; + + case PM_OUT_OF_DATA_SPACE: + return pool->pf.error_if_no_space; - if (pool->pf.error_if_no_space) + case PM_READ_ONLY: + case PM_FAIL: + return true; + default: + /* Shouldn't get here */ + DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); + return true; + } +} + +static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) +{ + if (should_error_unserviceable_bio(pool)) bio_io_error(bio); else retry_on_resume(bio); @@ -1007,11 +1045,20 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c struct bio *bio; struct bio_list bios; + if (should_error_unserviceable_bio(pool)) { + cell_error(pool, cell); + return; + } + bio_list_init(&bios); cell_release(pool, cell, &bios); - while ((bio = bio_list_pop(&bios))) - handle_unserviceable_bio(pool, bio); + if (should_error_unserviceable_bio(pool)) + while ((bio = bio_list_pop(&bios))) + bio_io_error(bio); + else + while ((bio = bio_list_pop(&bios))) + retry_on_resume(bio); } static void process_discard(struct thin_c *tc, struct bio *bio) @@ -1296,6 +1343,11 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio) } } +static void process_bio_success(struct thin_c *tc, struct bio *bio) +{ + bio_endio(bio, 0); +} + static void process_bio_fail(struct thin_c *tc, struct bio *bio) { bio_io_error(bio); @@ -1328,6 +1380,11 @@ static void process_deferred_bios(struct pool *pool) struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); struct thin_c *tc = h->tc; + if (tc->requeue_mode) { + bio_endio(bio, DM_ENDIO_REQUEUE); + continue; + } + /* * If we've got no free new_mapping structs, and processing * this bio might require one, we pause until there are some @@ -1394,51 +1451,134 @@ static void do_waker(struct work_struct *ws) /*----------------------------------------------------------------*/ +struct noflush_work { + struct work_struct worker; + struct thin_c *tc; + + atomic_t complete; + wait_queue_head_t wait; +}; + +static void complete_noflush_work(struct noflush_work *w) +{ + atomic_set(&w->complete, 1); + wake_up(&w->wait); +} + +static void do_noflush_start(struct work_struct *ws) +{ + struct noflush_work *w = container_of(ws, struct noflush_work, worker); + w->tc->requeue_mode = true; + requeue_io(w->tc); + complete_noflush_work(w); +} + +static void do_noflush_stop(struct work_struct *ws) +{ + struct noflush_work *w = container_of(ws, struct noflush_work, worker); + w->tc->requeue_mode = false; + complete_noflush_work(w); +} + +static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) +{ + struct noflush_work w; + + INIT_WORK(&w.worker, fn); + w.tc = tc; + atomic_set(&w.complete, 0); + init_waitqueue_head(&w.wait); + + queue_work(tc->pool->wq, &w.worker); + + wait_event(w.wait, atomic_read(&w.complete)); +} + +/*----------------------------------------------------------------*/ + static enum pool_mode get_pool_mode(struct pool *pool) { return pool->pf.mode; } +static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) +{ + dm_table_event(pool->ti->table); + DMINFO("%s: switching pool to %s mode", + dm_device_name(pool->pool_md), new_mode); +} + static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) { - int r; - enum pool_mode old_mode = pool->pf.mode; + struct pool_c *pt = pool->ti->private; + bool needs_check = dm_pool_metadata_needs_check(pool->pmd); + enum pool_mode old_mode = get_pool_mode(pool); + + /* + * Never allow the pool to transition to PM_WRITE mode if user + * intervention is required to verify metadata and data consistency. + */ + if (new_mode == PM_WRITE && needs_check) { + DMERR("%s: unable to switch pool to write mode until repaired.", + dm_device_name(pool->pool_md)); + if (old_mode != new_mode) + new_mode = old_mode; + else + new_mode = PM_READ_ONLY; + } + /* + * If we were in PM_FAIL mode, rollback of metadata failed. We're + * not going to recover without a thin_repair. So we never let the + * pool move out of the old mode. + */ + if (old_mode == PM_FAIL) + new_mode = old_mode; switch (new_mode) { case PM_FAIL: if (old_mode != new_mode) - DMERR("%s: switching pool to failure mode", - dm_device_name(pool->pool_md)); + notify_of_pool_mode_change(pool, "failure"); dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_fail; pool->process_discard = process_bio_fail; pool->process_prepared_mapping = process_prepared_mapping_fail; pool->process_prepared_discard = process_prepared_discard_fail; + + error_retry_list(pool); break; case PM_READ_ONLY: if (old_mode != new_mode) - DMERR("%s: switching pool to read-only mode", - dm_device_name(pool->pool_md)); - r = dm_pool_abort_metadata(pool->pmd); - if (r) { - DMERR("%s: aborting transaction failed", - dm_device_name(pool->pool_md)); - new_mode = PM_FAIL; - set_pool_mode(pool, new_mode); - } else { - dm_pool_metadata_read_only(pool->pmd); - pool->process_bio = process_bio_read_only; - pool->process_discard = process_discard; - pool->process_prepared_mapping = process_prepared_mapping_fail; - pool->process_prepared_discard = process_prepared_discard_passdown; - } + notify_of_pool_mode_change(pool, "read-only"); + dm_pool_metadata_read_only(pool->pmd); + pool->process_bio = process_bio_read_only; + pool->process_discard = process_bio_success; + pool->process_prepared_mapping = process_prepared_mapping_fail; + pool->process_prepared_discard = process_prepared_discard_passdown; + + error_retry_list(pool); + break; + + case PM_OUT_OF_DATA_SPACE: + /* + * Ideally we'd never hit this state; the low water mark + * would trigger userland to extend the pool before we + * completely run out of data space. However, many small + * IOs to unprovisioned space can consume data space at an + * alarming rate. Adjust your low water mark if you're + * frequently seeing this mode. + */ + if (old_mode != new_mode) + notify_of_pool_mode_change(pool, "out-of-data-space"); + pool->process_bio = process_bio_read_only; + pool->process_discard = process_discard; + pool->process_prepared_mapping = process_prepared_mapping; + pool->process_prepared_discard = process_prepared_discard_passdown; break; case PM_WRITE: if (old_mode != new_mode) - DMINFO("%s: switching pool to write mode", - dm_device_name(pool->pool_md)); + notify_of_pool_mode_change(pool, "write"); dm_pool_metadata_read_write(pool->pmd); pool->process_bio = process_bio; pool->process_discard = process_discard; @@ -1448,32 +1588,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) } pool->pf.mode = new_mode; + /* + * The pool mode may have changed, sync it so bind_control_target() + * doesn't cause an unexpected mode transition on resume. + */ + pt->adjusted_pf.mode = new_mode; } -/* - * Rather than calling set_pool_mode directly, use these which describe the - * reason for mode degradation. - */ -static void out_of_data_space(struct pool *pool) +static void abort_transaction(struct pool *pool) { - DMERR_LIMIT("%s: no free data space available.", - dm_device_name(pool->pool_md)); - set_pool_mode(pool, PM_READ_ONLY); + const char *dev_name = dm_device_name(pool->pool_md); + + DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); + if (dm_pool_abort_metadata(pool->pmd)) { + DMERR("%s: failed to abort metadata transaction", dev_name); + set_pool_mode(pool, PM_FAIL); + } + + if (dm_pool_metadata_set_needs_check(pool->pmd)) { + DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); + set_pool_mode(pool, PM_FAIL); + } } static void metadata_operation_failed(struct pool *pool, const char *op, int r) { - dm_block_t free_blocks; - DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", dm_device_name(pool->pool_md), op, r); - if (r == -ENOSPC && - !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) && - !free_blocks) - DMERR_LIMIT("%s: no free metadata space available.", - dm_device_name(pool->pool_md)); - + abort_transaction(pool); set_pool_mode(pool, PM_READ_ONLY); } @@ -1524,6 +1667,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) thin_hook_bio(tc, bio); + if (tc->requeue_mode) { + bio_endio(bio, DM_ENDIO_REQUEUE); + return DM_MAPIO_SUBMITTED; + } + if (get_pool_mode(tc->pool) == PM_FAIL) { bio_io_error(bio); return DM_MAPIO_SUBMITTED; @@ -1687,7 +1835,7 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) /* * We want to make sure that a pool in PM_FAIL mode is never upgraded. */ - enum pool_mode old_mode = pool->pf.mode; + enum pool_mode old_mode = get_pool_mode(pool); enum pool_mode new_mode = pt->adjusted_pf.mode; /* @@ -1701,16 +1849,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) pool->pf = pt->adjusted_pf; pool->low_water_blocks = pt->low_water_blocks; - /* - * If we were in PM_FAIL mode, rollback of metadata failed. We're - * not going to recover without a thin_repair. So we never let the - * pool move out of the old mode. On the other hand a PM_READ_ONLY - * may have been due to a lack of metadata or data space, and may - * now work (ie. if the underlying devices have been resized). - */ - if (old_mode == PM_FAIL) - new_mode = old_mode; - set_pool_mode(pool, new_mode); return 0; @@ -2253,6 +2391,12 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) return -EINVAL; } else if (data_size > sb_data_size) { + if (dm_pool_metadata_needs_check(pool->pmd)) { + DMERR("%s: unable to grow the data device until repaired.", + dm_device_name(pool->pool_md)); + return 0; + } + if (sb_data_size) DMINFO("%s: growing the data device from %llu to %llu blocks", dm_device_name(pool->pool_md), @@ -2294,6 +2438,12 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) return -EINVAL; } else if (metadata_dev_size > sb_metadata_dev_size) { + if (dm_pool_metadata_needs_check(pool->pmd)) { + DMERR("%s: unable to grow the metadata device until repaired.", + dm_device_name(pool->pool_md)); + return 0; + } + warn_if_metadata_device_too_big(pool->md_dev); DMINFO("%s: growing the metadata device from %llu to %llu blocks", dm_device_name(pool->pool_md), @@ -2681,7 +2831,9 @@ static void pool_status(struct dm_target *ti, status_type_t type, else DMEMIT("- "); - if (pool->pf.mode == PM_READ_ONLY) + if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) + DMEMIT("out_of_data_space "); + else if (pool->pf.mode == PM_READ_ONLY) DMEMIT("ro "); else DMEMIT("rw "); @@ -2795,7 +2947,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 10, 0}, + .version = {1, 11, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -2997,10 +3149,23 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err) return 0; } -static void thin_postsuspend(struct dm_target *ti) +static void thin_presuspend(struct dm_target *ti) { + struct thin_c *tc = ti->private; + if (dm_noflush_suspending(ti)) - requeue_io((struct thin_c *)ti->private); + noflush_work(tc, do_noflush_start); +} + +static void thin_postsuspend(struct dm_target *ti) +{ + struct thin_c *tc = ti->private; + + /* + * The dm_noflush_suspending flag has been cleared by now, so + * unfortunately we must always run this. + */ + noflush_work(tc, do_noflush_stop); } /* @@ -3085,12 +3250,13 @@ static int thin_iterate_devices(struct dm_target *ti, static struct target_type thin_target = { .name = "thin", - .version = {1, 10, 0}, + .version = {1, 11, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, .map = thin_map, .end_io = thin_endio, + .presuspend = thin_presuspend, .postsuspend = thin_postsuspend, .status = thin_status, .iterate_devices = thin_iterate_devices, diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig index 19b268795415..0c2dec7aec20 100644 --- a/drivers/md/persistent-data/Kconfig +++ b/drivers/md/persistent-data/Kconfig @@ -6,3 +6,13 @@ config DM_PERSISTENT_DATA ---help--- Library providing immutable on-disk data structure support for device-mapper targets such as the thin provisioning target. + +config DM_DEBUG_BLOCK_STACK_TRACING + boolean "Keep stack trace of persistent data block lock holders" + depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA + select STACKTRACE + ---help--- + Enable this for messages that may help debug problems with the + block manager locking used by thin provisioning and caching. + + If unsure, say N. diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index e9bdd462f4f5..786b689bdfc7 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -91,6 +91,69 @@ struct block_op { dm_block_t block; }; +struct bop_ring_buffer { + unsigned begin; + unsigned end; + struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1]; +}; + +static void brb_init(struct bop_ring_buffer *brb) +{ + brb->begin = 0; + brb->end = 0; +} + +static bool brb_empty(struct bop_ring_buffer *brb) +{ + return brb->begin == brb->end; +} + +static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old) +{ + unsigned r = old + 1; + return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r; +} + +static int brb_push(struct bop_ring_buffer *brb, + enum block_op_type type, dm_block_t b) +{ + struct block_op *bop; + unsigned next = brb_next(brb, brb->end); + + /* + * We don't allow the last bop to be filled, this way we can + * differentiate between full and empty. + */ + if (next == brb->begin) + return -ENOMEM; + + bop = brb->bops + brb->end; + bop->type = type; + bop->block = b; + + brb->end = next; + + return 0; +} + +static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) +{ + struct block_op *bop; + + if (brb_empty(brb)) + return -ENODATA; + + bop = brb->bops + brb->begin; + result->type = bop->type; + result->block = bop->block; + + brb->begin = brb_next(brb, brb->begin); + + return 0; +} + +/*----------------------------------------------------------------*/ + struct sm_metadata { struct dm_space_map sm; @@ -101,25 +164,20 @@ struct sm_metadata { unsigned recursion_count; unsigned allocated_this_transaction; - unsigned nr_uncommitted; - struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; + struct bop_ring_buffer uncommitted; struct threshold threshold; }; static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) { - struct block_op *op; + int r = brb_push(&smm->uncommitted, type, b); - if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) { + if (r) { DMERR("too many recursive allocations"); return -ENOMEM; } - op = smm->uncommitted + smm->nr_uncommitted++; - op->type = type; - op->block = b; - return 0; } @@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm) return -ENOMEM; } - if (smm->recursion_count == 1 && smm->nr_uncommitted) { - while (smm->nr_uncommitted && !r) { - smm->nr_uncommitted--; - r = commit_bop(smm, smm->uncommitted + - smm->nr_uncommitted); + if (smm->recursion_count == 1) { + while (!brb_empty(&smm->uncommitted)) { + struct block_op bop; + + r = brb_pop(&smm->uncommitted, &bop); + if (r) { + DMERR("bug in bop ring buffer"); + break; + } + + r = commit_bop(smm, &bop); if (r) break; } @@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count) static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, uint32_t *result) { - int r, i; + int r; + unsigned i; struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); unsigned adjustment = 0; @@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, * We may have some uncommitted adjustments to add. This list * should always be really short. */ - for (i = 0; i < smm->nr_uncommitted; i++) { - struct block_op *op = smm->uncommitted + i; + for (i = smm->uncommitted.begin; + i != smm->uncommitted.end; + i = brb_next(&smm->uncommitted, i)) { + struct block_op *op = smm->uncommitted.bops + i; if (op->block != b) continue; @@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b, int *result) { - int r, i, adjustment = 0; + int r, adjustment = 0; + unsigned i; struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); uint32_t rc; @@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, * We may have some uncommitted adjustments to add. This list * should always be really short. */ - for (i = 0; i < smm->nr_uncommitted; i++) { - struct block_op *op = smm->uncommitted + i; + for (i = smm->uncommitted.begin; + i != smm->uncommitted.end; + i = brb_next(&smm->uncommitted, i)) { + + struct block_op *op = smm->uncommitted.bops + i; if (op->block != b) continue; @@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm, smm->begin = superblock + 1; smm->recursion_count = 0; smm->allocated_this_transaction = 0; - smm->nr_uncommitted = 0; + brb_init(&smm->uncommitted); threshold_init(&smm->threshold); memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); @@ -715,7 +786,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm, smm->begin = 0; smm->recursion_count = 0; smm->allocated_this_transaction = 0; - smm->nr_uncommitted = 0; + brb_init(&smm->uncommitted); threshold_init(&smm->threshold); memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 6d20fbde8d43..dcde56057fe1 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -181,7 +181,7 @@ static inline int __agg_has_partner(struct aggregator *agg) */ static inline void __disable_port(struct port *port) { - bond_set_slave_inactive_flags(port->slave); + bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER); } /** @@ -193,7 +193,7 @@ static inline void __enable_port(struct port *port) struct slave *slave = port->slave; if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev)) - bond_set_slave_active_flags(slave); + bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER); } /** @@ -2062,6 +2062,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work) struct list_head *iter; struct slave *slave; struct port *port; + bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; read_lock(&bond->lock); rcu_read_lock(); @@ -2119,8 +2120,19 @@ void bond_3ad_state_machine_handler(struct work_struct *work) } re_arm: + bond_for_each_slave_rcu(bond, slave, iter) { + if (slave->should_notify) { + should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; + break; + } + } rcu_read_unlock(); read_unlock(&bond->lock); + + if (should_notify_rtnl && rtnl_trylock()) { + bond_slave_state_notify(bond); + rtnl_unlock(); + } queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 1c6104d3501d..e5628fc725c3 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -829,21 +829,25 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) if (bond_is_lb(bond)) { bond_alb_handle_active_change(bond, new_active); if (old_active) - bond_set_slave_inactive_flags(old_active); + bond_set_slave_inactive_flags(old_active, + BOND_SLAVE_NOTIFY_NOW); if (new_active) - bond_set_slave_active_flags(new_active); + bond_set_slave_active_flags(new_active, + BOND_SLAVE_NOTIFY_NOW); } else { rcu_assign_pointer(bond->curr_active_slave, new_active); } if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) { if (old_active) - bond_set_slave_inactive_flags(old_active); + bond_set_slave_inactive_flags(old_active, + BOND_SLAVE_NOTIFY_NOW); if (new_active) { bool should_notify_peers = false; - bond_set_slave_active_flags(new_active); + bond_set_slave_active_flags(new_active, + BOND_SLAVE_NOTIFY_NOW); if (bond->params.fail_over_mac) bond_do_fail_over_mac(bond, new_active, @@ -1193,6 +1197,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) return -EBUSY; } + if (bond_dev == slave_dev) { + pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name); + return -EPERM; + } + /* vlan challenged mutual exclusion */ /* no need to lock since we're protected by rtnl_lock */ if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) { @@ -1463,14 +1472,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) switch (bond->params.mode) { case BOND_MODE_ACTIVEBACKUP: - bond_set_slave_inactive_flags(new_slave); + bond_set_slave_inactive_flags(new_slave, + BOND_SLAVE_NOTIFY_NOW); break; case BOND_MODE_8023AD: /* in 802.3ad mode, the internal mechanism * will activate the slaves in the selected * aggregator */ - bond_set_slave_inactive_flags(new_slave); + bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); /* if this is the first slave */ if (!prev_slave) { SLAVE_AD_INFO(new_slave).id = 1; @@ -1488,7 +1498,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) case BOND_MODE_TLB: case BOND_MODE_ALB: bond_set_active_slave(new_slave); - bond_set_slave_inactive_flags(new_slave); + bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW); break; default: pr_debug("This slave is always active in trunk mode\n"); @@ -1654,9 +1664,6 @@ static int __bond_release_one(struct net_device *bond_dev, return -EINVAL; } - /* release the slave from its bond */ - bond->slave_cnt--; - bond_sysfs_slave_del(slave); bond_upper_dev_unlink(bond_dev, slave_dev); @@ -1738,6 +1745,7 @@ static int __bond_release_one(struct net_device *bond_dev, unblock_netpoll_tx(); synchronize_rcu(); + bond->slave_cnt--; if (!bond_has_slaves(bond)) { call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev); @@ -2015,7 +2023,8 @@ static void bond_miimon_commit(struct bonding *bond) if (bond->params.mode == BOND_MODE_ACTIVEBACKUP || bond->params.mode == BOND_MODE_8023AD) - bond_set_slave_inactive_flags(slave); + bond_set_slave_inactive_flags(slave, + BOND_SLAVE_NOTIFY_NOW); pr_info("%s: link status definitely down for interface %s, disabling it\n", bond->dev->name, slave->dev->name); @@ -2562,7 +2571,8 @@ static void bond_ab_arp_commit(struct bonding *bond) slave->link = BOND_LINK_UP; if (bond->current_arp_slave) { bond_set_slave_inactive_flags( - bond->current_arp_slave); + bond->current_arp_slave, + BOND_SLAVE_NOTIFY_NOW); bond->current_arp_slave = NULL; } @@ -2582,7 +2592,8 @@ static void bond_ab_arp_commit(struct bonding *bond) slave->link_failure_count++; slave->link = BOND_LINK_DOWN; - bond_set_slave_inactive_flags(slave); + bond_set_slave_inactive_flags(slave, + BOND_SLAVE_NOTIFY_NOW); pr_info("%s: link status definitely down for interface %s, disabling it\n", bond->dev->name, slave->dev->name); @@ -2615,17 +2626,17 @@ do_failover: /* * Send ARP probes for active-backup mode ARP monitor. + * + * Called with rcu_read_lock hold. */ static bool bond_ab_arp_probe(struct bonding *bond) { struct slave *slave, *before = NULL, *new_slave = NULL, - *curr_arp_slave, *curr_active_slave; + *curr_arp_slave = rcu_dereference(bond->current_arp_slave), + *curr_active_slave = rcu_dereference(bond->curr_active_slave); struct list_head *iter; bool found = false; - - rcu_read_lock(); - curr_arp_slave = rcu_dereference(bond->current_arp_slave); - curr_active_slave = rcu_dereference(bond->curr_active_slave); + bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER; if (curr_arp_slave && curr_active_slave) pr_info("PROBE: c_arp %s && cas %s BAD\n", @@ -2634,32 +2645,23 @@ static bool bond_ab_arp_probe(struct bonding *bond) if (curr_active_slave) { bond_arp_send_all(bond, curr_active_slave); - rcu_read_unlock(); - return true; + return should_notify_rtnl; } - rcu_read_unlock(); /* if we don't have a curr_active_slave, search for the next available * backup slave from the current_arp_slave and make it the candidate * for becoming the curr_active_slave */ - if (!rtnl_trylock()) - return false; - /* curr_arp_slave might have gone away */ - curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave); - if (!curr_arp_slave) { - curr_arp_slave = bond_first_slave(bond); - if (!curr_arp_slave) { - rtnl_unlock(); - return true; - } + curr_arp_slave = bond_first_slave_rcu(bond); + if (!curr_arp_slave) + return should_notify_rtnl; } - bond_set_slave_inactive_flags(curr_arp_slave); + bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER); - bond_for_each_slave(bond, slave, iter) { + bond_for_each_slave_rcu(bond, slave, iter) { if (!found && !before && IS_UP(slave->dev)) before = slave; @@ -2677,7 +2679,8 @@ static bool bond_ab_arp_probe(struct bonding *bond) if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; - bond_set_slave_inactive_flags(slave); + bond_set_slave_inactive_flags(slave, + BOND_SLAVE_NOTIFY_LATER); pr_info("%s: backup interface %s is now down.\n", bond->dev->name, slave->dev->name); @@ -2689,26 +2692,31 @@ static bool bond_ab_arp_probe(struct bonding *bond) if (!new_slave && before) new_slave = before; - if (!new_slave) { - rtnl_unlock(); - return true; - } + if (!new_slave) + goto check_state; new_slave->link = BOND_LINK_BACK; - bond_set_slave_active_flags(new_slave); + bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); bond_arp_send_all(bond, new_slave); new_slave->jiffies = jiffies; rcu_assign_pointer(bond->current_arp_slave, new_slave); - rtnl_unlock(); - return true; +check_state: + bond_for_each_slave_rcu(bond, slave, iter) { + if (slave->should_notify) { + should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW; + break; + } + } + return should_notify_rtnl; } static void bond_activebackup_arp_mon(struct work_struct *work) { struct bonding *bond = container_of(work, struct bonding, arp_work.work); - bool should_notify_peers = false, should_commit = false; + bool should_notify_peers = false; + bool should_notify_rtnl = false; int delta_in_ticks; delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval); @@ -2717,11 +2725,12 @@ static void bond_activebackup_arp_mon(struct work_struct *work) goto re_arm; rcu_read_lock(); + should_notify_peers = bond_should_notify_peers(bond); - should_commit = bond_ab_arp_inspect(bond); - rcu_read_unlock(); - if (should_commit) { + if (bond_ab_arp_inspect(bond)) { + rcu_read_unlock(); + /* Race avoidance with bond_close flush of workqueue */ if (!rtnl_trylock()) { delta_in_ticks = 1; @@ -2730,23 +2739,28 @@ static void bond_activebackup_arp_mon(struct work_struct *work) } bond_ab_arp_commit(bond); + rtnl_unlock(); + rcu_read_lock(); } - if (!bond_ab_arp_probe(bond)) { - /* rtnl locking failed, re-arm */ - delta_in_ticks = 1; - should_notify_peers = false; - } + should_notify_rtnl = bond_ab_arp_probe(bond); + rcu_read_unlock(); re_arm: if (bond->params.arp_interval) queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); - if (should_notify_peers) { + if (should_notify_peers || should_notify_rtnl) { if (!rtnl_trylock()) return; - call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); + + if (should_notify_peers) + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, + bond->dev); + if (should_notify_rtnl) + bond_slave_state_notify(bond); + rtnl_unlock(); } } @@ -3046,9 +3060,11 @@ static int bond_open(struct net_device *bond_dev) bond_for_each_slave(bond, slave, iter) { if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP) && (slave != bond->curr_active_slave)) { - bond_set_slave_inactive_flags(slave); + bond_set_slave_inactive_flags(slave, + BOND_SLAVE_NOTIFY_NOW); } else { - bond_set_slave_active_flags(slave); + bond_set_slave_active_flags(slave, + BOND_SLAVE_NOTIFY_NOW); } } read_unlock(&bond->curr_slave_lock); diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h index 86ccfb9f71cc..2b0fdec695f7 100644 --- a/drivers/net/bonding/bonding.h +++ b/drivers/net/bonding/bonding.h @@ -195,7 +195,8 @@ struct slave { s8 new_link; u8 backup:1, /* indicates backup slave. Value corresponds with BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ - inactive:1; /* indicates inactive slave */ + inactive:1, /* indicates inactive slave */ + should_notify:1; /* indicateds whether the state changed */ u8 duplex; u32 original_mtu; u32 link_failure_count; @@ -303,6 +304,24 @@ static inline void bond_set_backup_slave(struct slave *slave) } } +static inline void bond_set_slave_state(struct slave *slave, + int slave_state, bool notify) +{ + if (slave->backup == slave_state) + return; + + slave->backup = slave_state; + if (notify) { + rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL); + slave->should_notify = 0; + } else { + if (slave->should_notify) + slave->should_notify = 0; + else + slave->should_notify = 1; + } +} + static inline void bond_slave_state_change(struct bonding *bond) { struct list_head *iter; @@ -316,6 +335,19 @@ static inline void bond_slave_state_change(struct bonding *bond) } } +static inline void bond_slave_state_notify(struct bonding *bond) +{ + struct list_head *iter; + struct slave *tmp; + + bond_for_each_slave(bond, tmp, iter) { + if (tmp->should_notify) { + rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL); + tmp->should_notify = 0; + } + } +} + static inline int bond_slave_state(struct slave *slave) { return slave->backup; @@ -343,6 +375,9 @@ static inline bool bond_is_active_slave(struct slave *slave) #define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \ BOND_ARP_VALIDATE_BACKUP) +#define BOND_SLAVE_NOTIFY_NOW true +#define BOND_SLAVE_NOTIFY_LATER false + static inline int slave_do_arp_validate(struct bonding *bond, struct slave *slave) { @@ -394,17 +429,19 @@ static inline void bond_netpoll_send_skb(const struct slave *slave, } #endif -static inline void bond_set_slave_inactive_flags(struct slave *slave) +static inline void bond_set_slave_inactive_flags(struct slave *slave, + bool notify) { if (!bond_is_lb(slave->bond)) - bond_set_backup_slave(slave); + bond_set_slave_state(slave, BOND_STATE_BACKUP, notify); if (!slave->bond->params.all_slaves_active) slave->inactive = 1; } -static inline void bond_set_slave_active_flags(struct slave *slave) +static inline void bond_set_slave_active_flags(struct slave *slave, + bool notify) { - bond_set_active_slave(slave); + bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify); slave->inactive = 0; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 320bef2dba42..61376abdab39 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -144,6 +144,8 @@ #define FLEXCAN_MB_CODE_MASK (0xf0ffffff) +#define FLEXCAN_TIMEOUT_US (50) + /* * FLEXCAN hardware feature flags * @@ -262,6 +264,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr) } #endif +static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) +{ + if (!priv->reg_xceiver) + return 0; + + return regulator_enable(priv->reg_xceiver); +} + +static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) +{ + if (!priv->reg_xceiver) + return 0; + + return regulator_disable(priv->reg_xceiver); +} + static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, u32 reg_esr) { @@ -269,26 +287,95 @@ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv, (reg_esr & FLEXCAN_ESR_ERR_BUS); } -static inline void flexcan_chip_enable(struct flexcan_priv *priv) +static int flexcan_chip_enable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->base; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = flexcan_read(®s->mcr); reg &= ~FLEXCAN_MCR_MDIS; flexcan_write(reg, ®s->mcr); - udelay(10); + while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + usleep_range(10, 20); + + if (flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK) + return -ETIMEDOUT; + + return 0; } -static inline void flexcan_chip_disable(struct flexcan_priv *priv) +static int flexcan_chip_disable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->base; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = flexcan_read(®s->mcr); reg |= FLEXCAN_MCR_MDIS; flexcan_write(reg, ®s->mcr); + + while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + usleep_range(10, 20); + + if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_LPM_ACK)) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_chip_freeze(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->base; + unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate; + u32 reg; + + reg = flexcan_read(®s->mcr); + reg |= FLEXCAN_MCR_HALT; + flexcan_write(reg, ®s->mcr); + + while (timeout-- && !(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) + usleep_range(100, 200); + + if (!(flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_chip_unfreeze(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->base; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + u32 reg; + + reg = flexcan_read(®s->mcr); + reg &= ~FLEXCAN_MCR_HALT; + flexcan_write(reg, ®s->mcr); + + while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK)) + usleep_range(10, 20); + + if (flexcan_read(®s->mcr) & FLEXCAN_MCR_FRZ_ACK) + return -ETIMEDOUT; + + return 0; +} + +static int flexcan_chip_softreset(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->base; + unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; + + flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); + while (timeout-- && (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST)) + usleep_range(10, 20); + + if (flexcan_read(®s->mcr) & FLEXCAN_MCR_SOFTRST) + return -ETIMEDOUT; + + return 0; } static int flexcan_get_berr_counter(const struct net_device *dev, @@ -709,19 +796,14 @@ static int flexcan_chip_start(struct net_device *dev) u32 reg_mcr, reg_ctrl; /* enable module */ - flexcan_chip_enable(priv); + err = flexcan_chip_enable(priv); + if (err) + return err; /* soft reset */ - flexcan_write(FLEXCAN_MCR_SOFTRST, ®s->mcr); - udelay(10); - - reg_mcr = flexcan_read(®s->mcr); - if (reg_mcr & FLEXCAN_MCR_SOFTRST) { - netdev_err(dev, "Failed to softreset can module (mcr=0x%08x)\n", - reg_mcr); - err = -ENODEV; - goto out; - } + err = flexcan_chip_softreset(priv); + if (err) + goto out_chip_disable; flexcan_set_bittiming(dev); @@ -788,16 +870,14 @@ static int flexcan_chip_start(struct net_device *dev) if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES) flexcan_write(0x0, ®s->rxfgmask); - if (priv->reg_xceiver) { - err = regulator_enable(priv->reg_xceiver); - if (err) - goto out; - } + err = flexcan_transceiver_enable(priv); + if (err) + goto out_chip_disable; /* synchronize with the can bus */ - reg_mcr = flexcan_read(®s->mcr); - reg_mcr &= ~FLEXCAN_MCR_HALT; - flexcan_write(reg_mcr, ®s->mcr); + err = flexcan_chip_unfreeze(priv); + if (err) + goto out_transceiver_disable; priv->can.state = CAN_STATE_ERROR_ACTIVE; @@ -810,7 +890,9 @@ static int flexcan_chip_start(struct net_device *dev) return 0; - out: + out_transceiver_disable: + flexcan_transceiver_disable(priv); + out_chip_disable: flexcan_chip_disable(priv); return err; } @@ -825,18 +907,17 @@ static void flexcan_chip_stop(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->base; - u32 reg; + + /* freeze + disable module */ + flexcan_chip_freeze(priv); + flexcan_chip_disable(priv); /* Disable all interrupts */ flexcan_write(0, ®s->imask1); + flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, + ®s->ctrl); - /* Disable + halt module */ - reg = flexcan_read(®s->mcr); - reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT; - flexcan_write(reg, ®s->mcr); - - if (priv->reg_xceiver) - regulator_disable(priv->reg_xceiver); + flexcan_transceiver_disable(priv); priv->can.state = CAN_STATE_STOPPED; return; @@ -866,7 +947,7 @@ static int flexcan_open(struct net_device *dev) /* start chip and queuing */ err = flexcan_chip_start(dev); if (err) - goto out_close; + goto out_free_irq; can_led_event(dev, CAN_LED_EVENT_OPEN); @@ -875,6 +956,8 @@ static int flexcan_open(struct net_device *dev) return 0; + out_free_irq: + free_irq(dev->irq, dev); out_close: close_candev(dev); out_disable_per: @@ -945,12 +1028,16 @@ static int register_flexcandev(struct net_device *dev) goto out_disable_ipg; /* select "bus clock", chip must be disabled */ - flexcan_chip_disable(priv); + err = flexcan_chip_disable(priv); + if (err) + goto out_disable_per; reg = flexcan_read(®s->ctrl); reg |= FLEXCAN_CTRL_CLK_SRC; flexcan_write(reg, ®s->ctrl); - flexcan_chip_enable(priv); + err = flexcan_chip_enable(priv); + if (err) + goto out_chip_disable; /* set freeze, halt and activate FIFO, restrict register access */ reg = flexcan_read(®s->mcr); @@ -967,14 +1054,15 @@ static int register_flexcandev(struct net_device *dev) if (!(reg & FLEXCAN_MCR_FEN)) { netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); err = -ENODEV; - goto out_disable_per; + goto out_chip_disable; } err = register_candev(dev); - out_disable_per: /* disable core and turn off clocks */ + out_chip_disable: flexcan_chip_disable(priv); + out_disable_per: clk_disable_unprepare(priv->clk_per); out_disable_ipg: clk_disable_unprepare(priv->clk_ipg); @@ -1104,9 +1192,10 @@ static int flexcan_probe(struct platform_device *pdev) static int flexcan_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + struct flexcan_priv *priv = netdev_priv(dev); unregister_flexcandev(dev); - + netif_napi_del(&priv->napi); free_candev(dev); return 0; @@ -1117,8 +1206,11 @@ static int flexcan_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); + int err; - flexcan_chip_disable(priv); + err = flexcan_chip_disable(priv); + if (err) + return err; if (netif_running(dev)) { netif_stop_queue(dev); @@ -1139,9 +1231,7 @@ static int flexcan_resume(struct device *device) netif_device_attach(dev); netif_start_queue(dev); } - flexcan_chip_enable(priv); - - return 0; + return flexcan_chip_enable(priv); } #endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index 1f7b5aa114fa..8a7bf7dad898 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -1484,6 +1484,10 @@ static int b44_open(struct net_device *dev) add_timer(&bp->timer); b44_enable_ints(bp); + + if (bp->flags & B44_FLAG_EXTERNAL_PHY) + phy_start(bp->phydev); + netif_start_queue(dev); out: return err; @@ -1646,6 +1650,9 @@ static int b44_close(struct net_device *dev) netif_stop_queue(dev); + if (bp->flags & B44_FLAG_EXTERNAL_PHY) + phy_stop(bp->phydev); + napi_disable(&bp->napi); del_timer_sync(&bp->timer); @@ -2222,7 +2229,12 @@ static void b44_adjust_link(struct net_device *dev) } if (status_changed) { - b44_check_phy(bp); + u32 val = br32(bp, B44_TX_CTRL); + if (bp->flags & B44_FLAG_FULL_DUPLEX) + val |= TX_CTRL_DUPLEX; + else + val &= ~TX_CTRL_DUPLEX; + bw32(bp, B44_TX_CTRL, val); phy_print_status(phydev); } } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 66c0df78c3ff..dbcff509dc3f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3875,7 +3875,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) xmit_type); } - /* Add the macs to the parsing BD this is a vf */ + /* Add the macs to the parsing BD if this is a vf or if + * Tx Switching is enabled. + */ if (IS_VF(bp)) { /* override GRE parameters in BD */ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, @@ -3887,6 +3889,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) &pbd_e2->data.mac_addr.dst_mid, &pbd_e2->data.mac_addr.dst_lo, eth->h_dest); + } else if (bp->flags & TX_SWITCHING) { + bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, + &pbd_e2->data.mac_addr.dst_mid, + &pbd_e2->data.mac_addr.dst_lo, + eth->h_dest); } SET_FLAG(pbd_e2_parsing_data, diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3167ed6593b0..3b6d0ba86c71 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6843,8 +6843,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) work_mask |= opaque_key; - if ((desc->err_vlan & RXD_ERR_MASK) != 0 && - (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { + if (desc->err_vlan & RXD_ERR_MASK) { drop_it: tg3_recycle_rx(tnapi, tpr, opaque_key, desc_idx, *post_ptr); diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index ef472385bce4..04321e5a356e 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h @@ -2608,7 +2608,11 @@ struct tg3_rx_buffer_desc { #define RXD_ERR_TOO_SMALL 0x00400000 #define RXD_ERR_NO_RESOURCES 0x00800000 #define RXD_ERR_HUGE_FRAME 0x01000000 -#define RXD_ERR_MASK 0xffff0000 + +#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \ + RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \ + RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \ + RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME) u32 reserved; u32 opaque; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index cf64f3d0b60d..4ad1187e82fb 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -707,7 +707,8 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) else skb_checksum_none_assert(skb); - if (flags & BNA_CQ_EF_VLAN) + if ((flags & BNA_CQ_EF_VLAN) && + (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag)); if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) @@ -2094,7 +2095,9 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config) rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE; } - rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED; + rx_config->vlan_strip_status = + (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ? + BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED; } static void @@ -3245,11 +3248,6 @@ bnad_set_rx_mode(struct net_device *netdev) BNA_RXMODE_ALLMULTI; bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); - if (bnad->cfg_flags & BNAD_CF_PROMISC) - bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); - else - bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); - spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -3374,6 +3372,27 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) return 0; } +static int bnad_set_features(struct net_device *dev, netdev_features_t features) +{ + struct bnad *bnad = netdev_priv(dev); + netdev_features_t changed = features ^ dev->features; + + if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) { + unsigned long flags; + + spin_lock_irqsave(&bnad->bna_lock, flags); + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + bna_rx_vlan_strip_enable(bnad->rx_info[0].rx); + else + bna_rx_vlan_strip_disable(bnad->rx_info[0].rx); + + spin_unlock_irqrestore(&bnad->bna_lock, flags); + } + + return 0; +} + #ifdef CONFIG_NET_POLL_CONTROLLER static void bnad_netpoll(struct net_device *netdev) @@ -3421,6 +3440,7 @@ static const struct net_device_ops bnad_netdev_ops = { .ndo_change_mtu = bnad_change_mtu, .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid, + .ndo_set_features = bnad_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bnad_netpoll #endif @@ -3433,14 +3453,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac) netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX; + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; - netdev->features |= netdev->hw_features | - NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER; if (using_dac) netdev->features |= NETIF_F_HIGHDMA; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 43ab35fea48d..34e2488767d9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -6179,6 +6179,7 @@ static struct pci_driver cxgb4_driver = { .id_table = cxgb4_pci_tbl, .probe = init_one, .remove = remove_one, + .shutdown = remove_one, .err_handler = &cxgb4_eeh, }; diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 8d09615da585..05529e273050 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h @@ -350,11 +350,13 @@ struct be_drv_stats { u32 roce_drops_crc; }; +/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */ +#define BE_RESET_VLAN_TAG_ID 0xFFFF + struct be_vf_cfg { unsigned char mac_addr[ETH_ALEN]; int if_handle; int pmac_id; - u16 def_vid; u16 vlan_tag; u32 tx_rate; }; diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 04ac9c6a0d39..36c80612e21a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -913,24 +913,14 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, return BE3_chip(adapter) && be_ipv6_exthdr_check(skb); } -static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, - struct sk_buff *skb, - bool *skip_hw_vlan) +static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, + struct sk_buff *skb, + bool *skip_hw_vlan) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; unsigned int eth_hdr_len; struct iphdr *ip; - /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less - * may cause a transmit stall on that port. So the work-around is to - * pad short packets (<= 32 bytes) to a 36-byte length. - */ - if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { - if (skb_padto(skb, 36)) - goto tx_drop; - skb->len = 36; - } - /* For padded packets, BE HW modifies tot_len field in IP header * incorrecly when VLAN tag is inserted by HW. * For padded packets, Lancer computes incorrect checksum. @@ -959,7 +949,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, vlan_tx_tag_present(skb)) { skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) - goto tx_drop; + goto err; } /* HW may lockup when VLAN HW tagging is requested on @@ -981,15 +971,39 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, be_vlan_tag_tx_chk(adapter, skb)) { skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan); if (unlikely(!skb)) - goto tx_drop; + goto err; } return skb; tx_drop: dev_kfree_skb_any(skb); +err: return NULL; } +static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter, + struct sk_buff *skb, + bool *skip_hw_vlan) +{ + /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or + * less may cause a transmit stall on that port. So the work-around is + * to pad short packets (<= 32 bytes) to a 36-byte length. + */ + if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) { + if (skb_padto(skb, 36)) + return NULL; + skb->len = 36; + } + + if (BEx_chip(adapter) || lancer_chip(adapter)) { + skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan); + if (!skb) + return NULL; + } + + return skb; +} + static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -1157,6 +1171,14 @@ ret: return status; } +static void be_clear_promisc(struct be_adapter *adapter) +{ + adapter->promiscuous = false; + adapter->flags &= ~BE_FLAGS_VLAN_PROMISC; + + be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); +} + static void be_set_rx_mode(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); @@ -1170,9 +1192,7 @@ static void be_set_rx_mode(struct net_device *netdev) /* BE was previously in promiscuous mode; disable it */ if (adapter->promiscuous) { - adapter->promiscuous = false; - be_cmd_rx_filter(adapter, IFF_PROMISC, OFF); - + be_clear_promisc(adapter); if (adapter->vlans_added) be_vid_config(adapter); } @@ -1287,24 +1307,20 @@ static int be_set_vf_vlan(struct net_device *netdev, if (vlan || qos) { vlan |= qos << VLAN_PRIO_SHIFT; - if (vf_cfg->vlan_tag != vlan) { - /* If this is new value, program it. Else skip. */ - vf_cfg->vlan_tag = vlan; + if (vf_cfg->vlan_tag != vlan) status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_cfg->if_handle, 0); - } } else { /* Reset Transparent Vlan Tagging. */ - vf_cfg->vlan_tag = 0; - vlan = vf_cfg->def_vid; - status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, - vf_cfg->if_handle, 0); + status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, + vf + 1, vf_cfg->if_handle, 0); } - - if (status) + if (!status) + vf_cfg->vlan_tag = vlan; + else dev_info(&adapter->pdev->dev, - "VLAN %d config on VF %d failed\n", vlan, vf); + "VLAN %d config on VF %d failed\n", vlan, vf); return status; } @@ -3013,11 +3029,11 @@ static int be_vf_setup_init(struct be_adapter *adapter) static int be_vf_setup(struct be_adapter *adapter) { + struct device *dev = &adapter->pdev->dev; struct be_vf_cfg *vf_cfg; - u16 def_vlan, lnk_speed; int status, old_vfs, vf; - struct device *dev = &adapter->pdev->dev; u32 privileges; + u16 lnk_speed; old_vfs = pci_num_vf(adapter->pdev); if (old_vfs) { @@ -3084,12 +3100,6 @@ static int be_vf_setup(struct be_adapter *adapter) if (!status) vf_cfg->tx_rate = lnk_speed; - status = be_cmd_get_hsw_config(adapter, &def_vlan, - vf + 1, vf_cfg->if_handle, NULL); - if (status) - goto err; - vf_cfg->def_vid = def_vlan; - if (!old_vfs) be_cmd_enable_vf(adapter, vf + 1); } diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 903362a7b584..479a7cba45c0 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -389,12 +389,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) netdev_err(ndev, "Tx DMA memory map failed\n"); return NETDEV_TX_OK; } - /* Send it on its way. Tell FEC it's ready, interrupt when done, - * it's the last BD of the frame, and to put the CRC on the end. - */ - status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR - | BD_ENET_TX_LAST | BD_ENET_TX_TC); - bdp->cbd_sc = status; if (fep->bufdesc_ex) { @@ -416,6 +410,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } + /* Send it on its way. Tell FEC it's ready, interrupt when done, + * it's the last BD of the frame, and to put the CRC on the end. + */ + status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR + | BD_ENET_TX_LAST | BD_ENET_TX_TC); + bdp->cbd_sc = status; + bdp_pre = fec_enet_get_prevdesc(bdp, fep); if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 6b65f7795215..7aec6c833973 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -51,8 +51,8 @@ #define DRV_NAME "mlx4_core" #define PFX DRV_NAME ": " -#define DRV_VERSION "1.1" -#define DRV_RELDATE "Dec, 2011" +#define DRV_VERSION "2.2-1" +#define DRV_RELDATE "Feb, 2014" #define MLX4_FS_UDP_UC_EN (1 << 1) #define MLX4_FS_TCP_UC_EN (1 << 2) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 9ca223bc90fc..b57e8c87a34e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -57,8 +57,8 @@ #include "en_port.h" #define DRV_NAME "mlx4_en" -#define DRV_VERSION "2.0" -#define DRV_RELDATE "Dec 2011" +#define DRV_VERSION "2.2-1" +#define DRV_RELDATE "Feb 2014" #define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a064f06e0cb8..23b7e2d35a93 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -46,8 +46,8 @@ #include "mlx5_core.h" #define DRIVER_NAME "mlx5_core" -#define DRIVER_VERSION "1.0" -#define DRIVER_RELDATE "June 2013" +#define DRIVER_VERSION "2.2-1" +#define DRIVER_RELDATE "Feb 2014" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library"); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 4146664d4d6a..27c4f131863b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -340,6 +340,7 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter) if (qlcnic_sriov_vf_check(adapter)) return -EINVAL; num_msix = 1; + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; adapter->drv_tx_rings = QLCNIC_SINGLE_RING; } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c index 77f1bce432d2..7d4f54912bad 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c @@ -807,7 +807,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, !type->tc_param_valid) return; - if (tc < 0 || (tc > QLC_DCB_MAX_TC)) + if (tc < 0 || (tc >= QLC_DCB_MAX_TC)) return; tc_cfg = &type->tc_cfg[tc]; @@ -843,7 +843,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, !type->tc_param_valid) return; - if (pgid < 0 || pgid > QLC_DCB_MAX_PG) + if (pgid < 0 || pgid >= QLC_DCB_MAX_PG) return; pgcfg = &type->pg_cfg[pgid]; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index ba78c7481fa3..1222865cfb73 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -816,9 +816,10 @@ static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter) if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) { qlcnic_disable_multi_tx(adapter); + adapter->drv_sds_rings = QLCNIC_SINGLE_RING; err = qlcnic_enable_msi_legacy(adapter); - if (!err) + if (err) return err; } } @@ -3863,7 +3864,7 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, strcpy(buf, "Tx"); } - if (!qlcnic_use_msi_x && !qlcnic_use_msi) { + if (!QLCNIC_IS_MSI_FAMILY(adapter)) { netdev_err(netdev, "No RSS/TSS support in INT-x mode\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 09acf15c3a56..e5277a632671 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -13,8 +13,6 @@ #define QLC_VF_MIN_TX_RATE 100 #define QLC_VF_MAX_TX_RATE 9999 #define QLC_MAC_OPCODE_MASK 0x7 -#define QLC_MAC_STAR_ADD 6 -#define QLC_MAC_STAR_DEL 7 #define QLC_VF_FLOOD_BIT BIT_16 #define QLC_FLOOD_MODE 0x5 @@ -1206,13 +1204,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter, struct qlcnic_vport *vp = vf->vp; u8 op, new_op; - if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) || - ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) { - netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n", - vf->pci_func); - return -EINVAL; - } - if (!(cmd->req.arg[1] & BIT_8)) return -EINVAL; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 91a67ae8f17b..e9779653cd4c 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -7118,6 +7118,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) } mutex_init(&tp->wk.mutex); + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); /* Get MAC address */ for (i = 0; i < ETH_ALEN; i++) diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index eb75fbd11a01..d7a36829649a 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1668,6 +1668,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev) struct efx_ptp_data *ptp = efx->ptp_data; int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE); + if (!ptp) { + if (net_ratelimit()) + netif_warn(efx, drv, efx->net_dev, + "Received PTP event but PTP not set up\n"); + return; + } + if (!ptp->enabled) return; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index a2e7d2c96e36..078ad0ec8593 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1705,7 +1705,7 @@ static int stmmac_open(struct net_device *dev) priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); - alloc_dma_desc_resources(priv); + ret = alloc_dma_desc_resources(priv); if (ret < 0) { pr_err("%s: DMA descriptors allocation failed\n", __func__); goto dma_desc_error; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 651087b5c8da..ffd4d12acf6d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1164,11 +1164,17 @@ static void cpsw_init_host_port(struct cpsw_priv *priv) static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) { + u32 slave_port; + + slave_port = cpsw_get_slave_port(priv, slave->slave_num); + if (!slave->phy) return; phy_stop(slave->phy); phy_disconnect(slave->phy); slave->phy = NULL; + cpsw_ale_control_set(priv->ale, slave_port, + ALE_PORT_STATE, ALE_PORT_STATE_DISABLE); } static int cpsw_ndo_open(struct net_device *ndev) diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index a5d21893670d..1831fb7cd017 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -506,6 +506,9 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu) static struct lock_class_key macvlan_netdev_xmit_lock_key; static struct lock_class_key macvlan_netdev_addr_lock_key; +#define ALWAYS_ON_FEATURES \ + (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX) + #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ @@ -539,7 +542,7 @@ static int macvlan_init(struct net_device *dev) dev->state = (dev->state & ~MACVLAN_STATE_MASK) | (lowerdev->state & MACVLAN_STATE_MASK); dev->features = lowerdev->features & MACVLAN_FEATURES; - dev->features |= NETIF_F_LLTX; + dev->features |= ALWAYS_ON_FEATURES; dev->gso_max_size = lowerdev->gso_max_size; dev->iflink = lowerdev->ifindex; dev->hard_header_len = lowerdev->hard_header_len; @@ -699,7 +702,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev, features = netdev_increment_features(vlan->lowerdev->features, features, mask); - features |= NETIF_F_LLTX; + features |= ALWAYS_ON_FEATURES; return features; } diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 82514e72b3d8..4b970f7624c0 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -916,6 +916,8 @@ int genphy_read_status(struct phy_device *phydev) int err; int lpa; int lpagb = 0; + int common_adv; + int common_adv_gb = 0; /* Update the link, but return if there was an error */ err = genphy_update_link(phydev); @@ -937,7 +939,7 @@ int genphy_read_status(struct phy_device *phydev) phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb); - lpagb &= adv << 2; + common_adv_gb = lpagb & adv << 2; } lpa = phy_read(phydev, MII_LPA); @@ -950,25 +952,25 @@ int genphy_read_status(struct phy_device *phydev) if (adv < 0) return adv; - lpa &= adv; + common_adv = lpa & adv; phydev->speed = SPEED_10; phydev->duplex = DUPLEX_HALF; phydev->pause = 0; phydev->asym_pause = 0; - if (lpagb & (LPA_1000FULL | LPA_1000HALF)) { + if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) { phydev->speed = SPEED_1000; - if (lpagb & LPA_1000FULL) + if (common_adv_gb & LPA_1000FULL) phydev->duplex = DUPLEX_FULL; - } else if (lpa & (LPA_100FULL | LPA_100HALF)) { + } else if (common_adv & (LPA_100FULL | LPA_100HALF)) { phydev->speed = SPEED_100; - if (lpa & LPA_100FULL) + if (common_adv & LPA_100FULL) phydev->duplex = DUPLEX_FULL; } else - if (lpa & LPA_10FULL) + if (common_adv & LPA_10FULL) phydev->duplex = DUPLEX_FULL; if (phydev->duplex == DUPLEX_FULL) { diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8fe9cb7d0f72..26f8635b027d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1686,7 +1686,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; dev->features = dev->hw_features; - dev->vlan_features = dev->features; + dev->vlan_features = dev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); INIT_LIST_HEAD(&tun->disabled); err = tun_attach(tun, file, false); diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 955df81a4358..d2e6fdb25e28 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1395,6 +1395,19 @@ static const struct driver_info ax88178a_info = { .tx_fixup = ax88179_tx_fixup, }; +static const struct driver_info dlink_dub1312_info = { + .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", + .bind = ax88179_bind, + .unbind = ax88179_unbind, + .status = ax88179_status, + .link_reset = ax88179_link_reset, + .reset = ax88179_reset, + .stop = ax88179_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .rx_fixup = ax88179_rx_fixup, + .tx_fixup = ax88179_tx_fixup, +}; + static const struct driver_info sitecom_info = { .description = "Sitecom USB 3.0 to Gigabit Adapter", .bind = ax88179_bind, @@ -1421,6 +1434,19 @@ static const struct driver_info samsung_info = { .tx_fixup = ax88179_tx_fixup, }; +static const struct driver_info lenovo_info = { + .description = "Lenovo OneLinkDock Gigabit LAN", + .bind = ax88179_bind, + .unbind = ax88179_unbind, + .status = ax88179_status, + .link_reset = ax88179_link_reset, + .reset = ax88179_reset, + .stop = ax88179_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .rx_fixup = ax88179_rx_fixup, + .tx_fixup = ax88179_tx_fixup, +}; + static const struct usb_device_id products[] = { { /* ASIX AX88179 10/100/1000 */ @@ -1431,6 +1457,10 @@ static const struct usb_device_id products[] = { USB_DEVICE(0x0b95, 0x178a), .driver_info = (unsigned long)&ax88178a_info, }, { + /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ + USB_DEVICE(0x2001, 0x4a00), + .driver_info = (unsigned long)&dlink_dub1312_info, +}, { /* Sitecom USB 3.0 to Gigabit Adapter */ USB_DEVICE(0x0df6, 0x0072), .driver_info = (unsigned long)&sitecom_info, @@ -1438,6 +1468,10 @@ static const struct usb_device_id products[] = { /* Samsung USB Ethernet Adapter */ USB_DEVICE(0x04e8, 0xa100), .driver_info = (unsigned long)&samsung_info, +}, { + /* Lenovo OneLinkDock Gigabit LAN */ + USB_DEVICE(0x17ef, 0x304b), + .driver_info = (unsigned long)&lenovo_info, }, { }, }; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 2ec2041b62d4..5b374370f71c 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -285,7 +285,8 @@ static void veth_setup(struct net_device *dev) dev->ethtool_ops = &veth_ethtool_ops; dev->features |= NETIF_F_LLTX; dev->features |= VETH_FEATURES; - dev->vlan_features = dev->features; + dev->vlan_features = dev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX); dev->destructor = veth_dev_free; dev->hw_features = VETH_FEATURES; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index d75f8edf4fb3..5632a99cbbd2 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1711,7 +1711,8 @@ static int virtnet_probe(struct virtio_device *vdev) /* If we can receive ANY GSO packets, we must allocate large ones. */ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || - virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) vi->big_packets = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h index 1cc13569b17b..1b6b4d0cfa97 100644 --- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h @@ -57,7 +57,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, - {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, + {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282}, {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, @@ -96,7 +96,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { {0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000}, {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c}, - {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce}, + {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa}, {0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550}, }; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 11eab9f01fd8..303ce27964c1 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1534,7 +1534,7 @@ EXPORT_SYMBOL(ath9k_hw_check_nav); bool ath9k_hw_check_alive(struct ath_hw *ah) { int count = 50; - u32 reg; + u32 reg, last_val; if (AR_SREV_9300(ah)) return !ath9k_hw_detect_mac_hang(ah); @@ -1542,9 +1542,13 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) if (AR_SREV_9285_12_OR_LATER(ah)) return true; + last_val = REG_READ(ah, AR_OBS_BUS_1); do { reg = REG_READ(ah, AR_OBS_BUS_1); + if (reg != last_val) + return true; + last_val = reg; if ((reg & 0x7E7FFFEF) == 0x00702400) continue; @@ -1556,6 +1560,8 @@ bool ath9k_hw_check_alive(struct ath_hw *ah) default: return true; } + + udelay(1); } while (count-- > 0); return false; diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index a0ebdd000fc2..82e340d3ec60 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c @@ -732,11 +732,18 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc, return NULL; /* - * mark descriptor as zero-length and set the 'more' - * flag to ensure that both buffers get discarded + * Re-check previous descriptor, in case it has been filled + * in the mean time. */ - rs->rs_datalen = 0; - rs->rs_more = true; + ret = ath9k_hw_rxprocdesc(ah, ds, rs); + if (ret == -EINPROGRESS) { + /* + * mark descriptor as zero-length and set the 'more' + * flag to ensure that both buffers get discarded + */ + rs->rs_datalen = 0; + rs->rs_more = true; + } } list_del(&bf->list); @@ -985,32 +992,32 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hdr *hdr; bool discard_current = sc->rx.discard_next; - int ret = 0; /* * Discard corrupt descriptors which are marked in * ath_get_next_rx_buf(). */ - sc->rx.discard_next = rx_stats->rs_more; if (discard_current) - return -EINVAL; + goto corrupt; + + sc->rx.discard_next = false; /* * Discard zero-length packets. */ if (!rx_stats->rs_datalen) { RX_STAT_INC(rx_len_err); - return -EINVAL; + goto corrupt; } - /* - * rs_status follows rs_datalen so if rs_datalen is too large - * we can take a hint that hardware corrupted it, so ignore - * those frames. - */ + /* + * rs_status follows rs_datalen so if rs_datalen is too large + * we can take a hint that hardware corrupted it, so ignore + * those frames. + */ if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) { RX_STAT_INC(rx_len_err); - return -EINVAL; + goto corrupt; } /* Only use status info from the last fragment */ @@ -1024,10 +1031,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, * This is different from the other corrupt descriptor * condition handled above. */ - if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) { - ret = -EINVAL; - goto exit; - } + if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) + goto corrupt; hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len); @@ -1043,18 +1048,15 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime)) RX_STAT_INC(rx_spectral); - ret = -EINVAL; - goto exit; + return -EINVAL; } /* * everything but the rate is checked here, the rate check is done * separately to avoid doing two lookups for a rate for each frame. */ - if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) { - ret = -EINVAL; - goto exit; - } + if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) + return -EINVAL; if (ath_is_mybeacon(common, hdr)) { RX_STAT_INC(rx_beacons); @@ -1064,15 +1066,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, /* * This shouldn't happen, but have a safety check anyway. */ - if (WARN_ON(!ah->curchan)) { - ret = -EINVAL; - goto exit; - } + if (WARN_ON(!ah->curchan)) + return -EINVAL; - if (ath9k_process_rate(common, hw, rx_stats, rx_status)) { - ret =-EINVAL; - goto exit; - } + if (ath9k_process_rate(common, hw, rx_stats, rx_status)) + return -EINVAL; ath9k_process_rssi(common, hw, rx_stats, rx_status); @@ -1087,9 +1085,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc, sc->rx.num_pkts++; #endif -exit: - sc->rx.discard_next = false; - return ret; + return 0; + +corrupt: + sc->rx.discard_next = rx_stats->rs_more; + return -EINVAL; } static void ath9k_rx_skb_postprocess(struct ath_common *common, diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 0a75e2f68c9d..f042a18c8495 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1444,14 +1444,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, for (tidno = 0, tid = &an->tid[tidno]; tidno < IEEE80211_NUM_TIDS; tidno++, tid++) { - if (!tid->sched) - continue; - ac = tid->ac; txq = ac->txq; ath_txq_lock(sc, txq); + if (!tid->sched) { + ath_txq_unlock(sc, txq); + continue; + } + buffered = ath_tid_has_buffered(tid); tid->sched = false; @@ -2184,14 +2186,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, txq->stopped = true; } + if (txctl->an) + tid = ath_get_skb_tid(sc, txctl->an, skb); + if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) { ath_txq_unlock(sc, txq); txq = sc->tx.uapsdq; ath_txq_lock(sc, txq); } else if (txctl->an && ieee80211_is_data_present(hdr->frame_control)) { - tid = ath_get_skb_tid(sc, txctl->an, skb); - WARN_ON(tid->ac->txq != txctl->txq); if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 3e991897d7ca..119ee6eaf1c3 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -457,7 +457,6 @@ struct brcmf_sdio { u8 tx_hdrlen; /* sdio bus header length for tx packet */ bool txglom; /* host tx glomming enable flag */ - struct sk_buff *txglom_sgpad; /* scatter-gather padding buffer */ u16 head_align; /* buffer pointer alignment */ u16 sgentry_align; /* scatter-gather buffer alignment */ }; @@ -1944,9 +1943,8 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus, if (lastfrm && chain_pad) tail_pad += blksize - chain_pad; if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) { - pkt_pad = bus->txglom_sgpad; - if (pkt_pad == NULL) - brcmu_pkt_buf_get_skb(tail_pad + tail_chop); + pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop + + bus->head_align); if (pkt_pad == NULL) return -ENOMEM; ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad); @@ -1957,6 +1955,7 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus, tail_chop); *(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop; skb_trim(pkt, pkt->len - tail_chop); + skb_trim(pkt_pad, tail_pad + tail_chop); __skb_queue_after(pktq, pkt, pkt_pad); } else { ntail = pkt->data_len + tail_pad - @@ -2011,7 +2010,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq, return ret; head_pad = (u16)ret; if (head_pad) - memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen); + memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad); total_len += pkt_next->len; @@ -3486,10 +3485,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev) bus->txglom = false; value = 1; pad_size = bus->sdiodev->func[2]->cur_blksize << 1; - bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size); - if (!bus->txglom_sgpad) - brcmf_err("allocating txglom padding skb failed, reduced performance\n"); - err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom", &value, sizeof(u32)); if (err < 0) { @@ -4053,7 +4048,6 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus) brcmf_sdio_chip_detach(&bus->ci); } - brcmu_pkt_buf_free_skb(bus->txglom_sgpad); kfree(bus->rxbuf); kfree(bus->hdrbuf); kfree(bus); diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c index d36e252d2ccb..596525528f50 100644 --- a/drivers/net/wireless/hostap/hostap_ap.c +++ b/drivers/net/wireless/hostap/hostap_ap.c @@ -147,7 +147,7 @@ static void ap_free_sta(struct ap_data *ap, struct sta_info *sta) if (!sta->ap && sta->u.sta.challenge) kfree(sta->u.sta.challenge); - del_timer(&sta->timer); + del_timer_sync(&sta->timer); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ kfree(sta); diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c index c0d070c5df5e..9cdd91cdf661 100644 --- a/drivers/net/wireless/iwlwifi/dvm/sta.c +++ b/drivers/net/wireless/iwlwifi/dvm/sta.c @@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id, sizeof(priv->tid_data[sta_id][tid])); priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; + priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS; priv->num_stations--; diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index a6839dfcb82d..398dd096674c 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -1291,8 +1291,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data; struct iwl_ht_agg *agg; struct sk_buff_head reclaimed_skbs; - struct ieee80211_tx_info *info; - struct ieee80211_hdr *hdr; struct sk_buff *skb; int sta_id; int tid; @@ -1379,22 +1377,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv, freed = 0; skb_queue_walk(&reclaimed_skbs, skb) { - hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else WARN_ON_ONCE(1); - info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]); + memset(&info->status, 0, sizeof(info->status)); + /* Packet was transmitted successfully, failures come as single + * frames because before failing a frame the firmware transmits + * it without aggregation at least once. + */ + info->flags |= IEEE80211_TX_STAT_ACK; + if (freed == 1) { /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ info = IEEE80211_SKB_CB(skb); memset(&info->status, 0, sizeof(info->status)); - info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_AMPDU; info->status.ampdu_ack_len = ba_resp->txed_2_done; info->status.ampdu_len = ba_resp->txed; diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h index e4ead86f06d6..2b0ba1fc3c82 100644 --- a/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -152,7 +152,7 @@ enum iwl_power_scheme { IWL_POWER_SCHEME_LP }; -#define IWL_CONN_MAX_LISTEN_INTERVAL 70 +#define IWL_CONN_MAX_LISTEN_INTERVAL 10 #define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\ diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4df12fa9d336..76ee486039d7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -822,16 +822,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data; struct sk_buff_head reclaimed_skbs; struct iwl_mvm_tid_data *tid_data; - struct ieee80211_tx_info *info; struct ieee80211_sta *sta; struct iwl_mvm_sta *mvmsta; - struct ieee80211_hdr *hdr; struct sk_buff *skb; int sta_id, tid, freed; - /* "flow" corresponds to Tx queue */ u16 scd_flow = le16_to_cpu(ba_notif->scd_flow); - /* "ssn" is start of block-ack Tx window, corresponds to index * (in Tx queue's circular buffer) of first TFD/frame in window */ u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn); @@ -888,22 +884,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, freed = 0; skb_queue_walk(&reclaimed_skbs, skb) { - hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_hdr *hdr = (void *)skb->data; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (ieee80211_is_data_qos(hdr->frame_control)) freed++; else WARN_ON_ONCE(1); - info = IEEE80211_SKB_CB(skb); iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]); + memset(&info->status, 0, sizeof(info->status)); + /* Packet was transmitted successfully, failures come as single + * frames because before failing a frame the firmware transmits + * it without aggregation at least once. + */ + info->flags |= IEEE80211_TX_STAT_ACK; + if (freed == 1) { /* this is the first skb we deliver in this batch */ /* put the rate scaling data there */ - info = IEEE80211_SKB_CB(skb); - memset(&info->status, 0, sizeof(info->status)); - info->flags |= IEEE80211_TX_STAT_ACK; info->flags |= IEEE80211_TX_STAT_AMPDU; info->status.ampdu_ack_len = ba_notif->txed_2_done; info->status.ampdu_len = ba_notif->txed; diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 32f75007a825..cb6d189bc3e6 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -621,7 +621,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy, id = *pos++; elen = *pos++; left -= 2; - if (elen > left || elen == 0) { + if (elen > left) { lbs_deb_scan("scan response: invalid IE fmt\n"); goto done; } diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c index 03688aa14e8a..7fe7b53fb17a 100644 --- a/drivers/net/wireless/mwifiex/pcie.c +++ b/drivers/net/wireless/mwifiex/pcie.c @@ -1211,6 +1211,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter) rd_index = card->rxbd_rdptr & reg->rx_mask; skb_data = card->rx_buf_list[rd_index]; + /* If skb allocation was failed earlier for Rx packet, + * rx_buf_list[rd_index] would have been left with a NULL. + */ + if (!skb_data) + return -ENOMEM; + MWIFIEX_SKB_PACB(skb_data, &buf_pa); pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE, PCI_DMA_FROMDEVICE); @@ -1525,6 +1531,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) if (adapter->ps_state == PS_STATE_SLEEP_CFM) { mwifiex_process_sleep_confirm_resp(adapter, skb->data, skb->len); + mwifiex_pcie_enable_host_int(adapter); + if (mwifiex_write_reg(adapter, + PCIE_CPU_INT_EVENT, + CPU_INTR_SLEEP_CFM_DONE)) { + dev_warn(adapter->dev, + "Write register failed\n"); + return -1; + } while (reg->sleep_cookie && (count++ < 10) && mwifiex_pcie_ok_to_access_hw(adapter)) usleep_range(50, 60); @@ -1993,23 +2007,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter) adapter->int_status |= pcie_ireg; spin_unlock_irqrestore(&adapter->int_lock, flags); - if (pcie_ireg & HOST_INTR_CMD_DONE) { - if ((adapter->ps_state == PS_STATE_SLEEP_CFM) || - (adapter->ps_state == PS_STATE_SLEEP)) { - mwifiex_pcie_enable_host_int(adapter); - if (mwifiex_write_reg(adapter, - PCIE_CPU_INT_EVENT, - CPU_INTR_SLEEP_CFM_DONE) - ) { - dev_warn(adapter->dev, - "Write register failed\n"); - return; - - } - } - } else if (!adapter->pps_uapsd_mode && - adapter->ps_state == PS_STATE_SLEEP && - mwifiex_pcie_ok_to_access_hw(adapter)) { + if (!adapter->pps_uapsd_mode && + adapter->ps_state == PS_STATE_SLEEP && + mwifiex_pcie_ok_to_access_hw(adapter)) { /* Potentially for PCIe we could get other * interrupts like shared. Don't change power * state until cookie is set */ diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c index e8ebbd4bc3cd..208748804a55 100644 --- a/drivers/net/wireless/mwifiex/usb.c +++ b/drivers/net/wireless/mwifiex/usb.c @@ -22,8 +22,6 @@ #define USB_VERSION "1.0" -static const char usbdriver_name[] = "usb8xxx"; - static struct mwifiex_if_ops usb_ops; static struct semaphore add_remove_card_sem; static struct usb_card_rec *usb_card; @@ -527,13 +525,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf) MWIFIEX_BSS_ROLE_ANY), MWIFIEX_ASYNC_CMD); -#ifdef CONFIG_PM - /* Resume handler may be called due to remote wakeup, - * force to exit suspend anyway - */ - usb_disable_autosuspend(card->udev); -#endif /* CONFIG_PM */ - return 0; } @@ -567,13 +558,12 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) } static struct usb_driver mwifiex_usb_driver = { - .name = usbdriver_name, + .name = "mwifiex_usb", .probe = mwifiex_usb_probe, .disconnect = mwifiex_usb_disconnect, .id_table = mwifiex_usb_table, .suspend = mwifiex_usb_suspend, .resume = mwifiex_usb_resume, - .supports_autosuspend = 1, }; static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter) diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c index 13eaeed03898..981cf6e7c73b 100644 --- a/drivers/net/wireless/mwifiex/wmm.c +++ b/drivers/net/wireless/mwifiex/wmm.c @@ -559,7 +559,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv) mwifiex_wmm_delete_all_ralist(priv); memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid)); - if (priv->adapter->if_ops.clean_pcie_ring) + if (priv->adapter->if_ops.clean_pcie_ring && + !priv->adapter->surprise_removed) priv->adapter->if_ops.clean_pcie_ring(priv->adapter); spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); } diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index f9daa9e183f2..e30d80033cbc 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -907,6 +907,7 @@ static int handle_incoming_queue(struct net_device *dev, /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); + skb_reset_network_header(skb); if (checksum_setup(dev, skb)) { kfree_skb(skb); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index be361b7cd30f..1e4e69384baa 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -217,7 +217,7 @@ config PINCTRL_IMX28 select PINCTRL_MXS config PINCTRL_MSM - tristate + bool select PINMUX select PINCONF select GENERIC_PINCONF diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c index 9ccf681dad2f..f9fabe9bf47d 100644 --- a/drivers/pinctrl/pinctrl-sunxi.c +++ b/drivers/pinctrl/pinctrl-sunxi.c @@ -14,6 +14,7 @@ #include <linux/clk.h> #include <linux/gpio.h> #include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> @@ -584,7 +585,7 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, spin_lock_irqsave(&pctl->lock, flags); regval = readl(pctl->membase + reg); - regval &= ~IRQ_CFG_IRQ_MASK; + regval &= ~(IRQ_CFG_IRQ_MASK << index); writel(regval | (mode << index), pctl->membase + reg); spin_unlock_irqrestore(&pctl->lock, flags); @@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = { static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_get_chip(irq); struct sunxi_pinctrl *pctl = irq_get_handler_data(irq); const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG); @@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) if (reg) { int irqoffset; + chained_irq_enter(chip, desc); for_each_set_bit(irqoffset, ®, SUNXI_IRQ_NUMBER) { int pin_irq = irq_find_mapping(pctl->domain, irqoffset); generic_handle_irq(pin_irq); } + chained_irq_exit(chip, desc); } } diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h index 01c494f8a14f..552b0e97077a 100644 --- a/drivers/pinctrl/pinctrl-sunxi.h +++ b/drivers/pinctrl/pinctrl-sunxi.h @@ -511,7 +511,7 @@ static inline u32 sunxi_pull_offset(u16 pin) static inline u32 sunxi_irq_cfg_reg(u16 irq) { - u8 reg = irq / IRQ_CFG_IRQ_PER_REG; + u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04; return reg + IRQ_CFG_REG; } @@ -523,7 +523,7 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq) static inline u32 sunxi_irq_ctrl_reg(u16 irq) { - u8 reg = irq / IRQ_CTRL_IRQ_PER_REG; + u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04; return reg + IRQ_CTRL_REG; } @@ -535,7 +535,7 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq) static inline u32 sunxi_irq_status_reg(u16 irq) { - u8 reg = irq / IRQ_STATUS_IRQ_PER_REG; + u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04; return reg + IRQ_STATUS_REG; } diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 77d103fe39d9..567d6918d50b 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -89,7 +89,8 @@ enum { /* GPSR6 */ FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14, - FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23, + FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, + FN_IP13_22, FN_IP13_24_23, FN_SD1_CLK, FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0, FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7, FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17, @@ -788,6 +789,7 @@ static const u16 pinmux_data[] = { PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN), PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC), PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN), + PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK), /* IPSR0 */ PINMUX_IPSR_DATA(IP0_0, D0), @@ -3825,7 +3827,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_6_11_FN, FN_IP13_25, GP_6_10_FN, FN_IP13_24_23, GP_6_9_FN, FN_IP13_22, - 0, 0, + GP_6_8_FN, FN_SD1_CLK, GP_6_7_FN, FN_IP13_21_19, GP_6_6_FN, FN_IP13_18_16, GP_6_5_FN, FN_IP13_15, diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c index a0d6152701cd..617a4916b50f 100644 --- a/drivers/pinctrl/sirf/pinctrl-sirf.c +++ b/drivers/pinctrl/sirf/pinctrl-sirf.c @@ -598,7 +598,7 @@ static unsigned int sirfsoc_gpio_irq_startup(struct irq_data *d) { struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); - if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq)) + if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE)) dev_err(bank->chip.gc.dev, "unable to lock HW IRQ %lu for IRQ\n", d->hwirq); @@ -611,7 +611,7 @@ static void sirfsoc_gpio_irq_shutdown(struct irq_data *d) struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); sirfsoc_gpio_irq_mask(d); - gpio_unlock_as_irq(&bank->chip.gc, d->hwirq); + gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE); } static struct irq_chip sirfsoc_irq_chip = { diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h index b4b0d83f9ef6..7061ac0ad428 100644 --- a/drivers/rapidio/devices/tsi721.h +++ b/drivers/rapidio/devices/tsi721.h @@ -678,6 +678,7 @@ struct tsi721_bdma_chan { struct list_head free_list; dma_cookie_t completed_cookie; struct tasklet_struct tasklet; + bool active; }; #endif /* CONFIG_RAPIDIO_DMA_ENGINE */ diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index 502663f5f7c6..91245f5dbe81 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) { /* Disable BDMA channel interrupts */ iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); - - tasklet_schedule(&bdma_chan->tasklet); + if (bdma_chan->active) + tasklet_schedule(&bdma_chan->tasklet); } #ifdef CONFIG_PCI_MSI @@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan) } #endif /* CONFIG_PCI_MSI */ - tasklet_enable(&bdma_chan->tasklet); + bdma_chan->active = true; tsi721_bdma_interrupt_enable(bdma_chan, 1); return bdma_chan->bd_num - 1; @@ -576,9 +576,7 @@ err_out: static void tsi721_free_chan_resources(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); -#ifdef CONFIG_PCI_MSI struct tsi721_device *priv = to_tsi721(dchan->device); -#endif LIST_HEAD(list); dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); @@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan) BUG_ON(!list_empty(&bdma_chan->active_list)); BUG_ON(!list_empty(&bdma_chan->queue)); - tasklet_disable(&bdma_chan->tasklet); + tsi721_bdma_interrupt_enable(bdma_chan, 0); + bdma_chan->active = false; + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + + bdma_chan->id].vector); + synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + + bdma_chan->id].vector); + } else +#endif + synchronize_irq(priv->pdev->irq); + + tasklet_kill(&bdma_chan->tasklet); spin_lock_bh(&bdma_chan->lock); list_splice_init(&bdma_chan->free_list, &list); spin_unlock_bh(&bdma_chan->lock); - tsi721_bdma_interrupt_enable(bdma_chan, 0); - #ifdef CONFIG_PCI_MSI if (priv->flags & TSI721_USING_MSIX) { free_irq(priv->msix[TSI721_VECT_DMA0_DONE + @@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv) bdma_chan->dchan.cookie = 1; bdma_chan->dchan.chan_id = i; bdma_chan->id = i; + bdma_chan->active = false; spin_lock_init(&bdma_chan->lock); @@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv) tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, (unsigned long)bdma_chan); - tasklet_disable(&bdma_chan->tasklet); list_add_tail(&bdma_chan->dchan.device_node, &mport->dma.channels); } diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index d1ac4caaf1b0..afca1bc24f26 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -953,6 +953,8 @@ static int machine_constraints_current(struct regulator_dev *rdev, return 0; } +static int _regulator_do_enable(struct regulator_dev *rdev); + /** * set_machine_constraints - sets regulator constraints * @rdev: regulator source @@ -1013,10 +1015,9 @@ static int set_machine_constraints(struct regulator_dev *rdev, /* If the constraints say the regulator should be on at this point * and we have control then make sure it is enabled. */ - if ((rdev->constraints->always_on || rdev->constraints->boot_on) && - ops->enable) { - ret = ops->enable(rdev); - if (ret < 0) { + if (rdev->constraints->always_on || rdev->constraints->boot_on) { + ret = _regulator_do_enable(rdev); + if (ret < 0 && ret != -EINVAL) { rdev_err(rdev, "failed to enable\n"); goto out; } @@ -1907,8 +1908,6 @@ static int _regulator_do_disable(struct regulator_dev *rdev) trace_regulator_disable_complete(rdev_get_name(rdev)); - _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, - NULL); return 0; } @@ -1932,6 +1931,8 @@ static int _regulator_disable(struct regulator_dev *rdev) rdev_err(rdev, "failed to disable\n"); return ret; } + _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE, + NULL); } rdev->use_count = 0; @@ -1984,20 +1985,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev) { int ret = 0; - /* force disable */ - if (rdev->desc->ops->disable) { - /* ah well, who wants to live forever... */ - ret = rdev->desc->ops->disable(rdev); - if (ret < 0) { - rdev_err(rdev, "failed to force disable\n"); - return ret; - } - /* notify other consumers that power has been forced off */ - _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | - REGULATOR_EVENT_DISABLE, NULL); + ret = _regulator_do_disable(rdev); + if (ret < 0) { + rdev_err(rdev, "failed to force disable\n"); + return ret; } - return ret; + _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE | + REGULATOR_EVENT_DISABLE, NULL); + + return 0; } /** @@ -3630,23 +3627,18 @@ int regulator_suspend_finish(void) mutex_lock(®ulator_list_mutex); list_for_each_entry(rdev, ®ulator_list, list) { - struct regulator_ops *ops = rdev->desc->ops; - mutex_lock(&rdev->mutex); - if ((rdev->use_count > 0 || rdev->constraints->always_on) && - ops->enable) { - error = ops->enable(rdev); + if (rdev->use_count > 0 || rdev->constraints->always_on) { + error = _regulator_do_enable(rdev); if (error) ret = error; } else { if (!have_full_constraints()) goto unlock; - if (!ops->disable) - goto unlock; if (!_regulator_is_enabled(rdev)) goto unlock; - error = ops->disable(rdev); + error = _regulator_do_disable(rdev); if (error) ret = error; } @@ -3820,7 +3812,7 @@ static int __init regulator_init_complete(void) ops = rdev->desc->ops; c = rdev->constraints; - if (!ops->disable || (c && c->always_on)) + if (c && c->always_on) continue; mutex_lock(&rdev->mutex); @@ -3841,7 +3833,7 @@ static int __init regulator_init_complete(void) /* We log since this may kill the system if it * goes wrong. */ rdev_info(rdev, "disabling\n"); - ret = ops->disable(rdev); + ret = _regulator_do_disable(rdev); if (ret != 0) rdev_err(rdev, "couldn't disable: %d\n", ret); } else { diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 7afd373b9595..c4cde9c08f1f 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -580,10 +580,12 @@ static int s3c_rtc_suspend(struct device *dev) clk_enable(rtc_clk); /* save TICNT for anyone using periodic interrupts */ - ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); if (s3c_rtc_cpu_type == TYPE_S3C64XX) { ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON); ticnt_en_save &= S3C64XX_RTCCON_TICEN; + ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT); + } else { + ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT); } s3c_rtc_enable(pdev, 0); @@ -605,10 +607,15 @@ static int s3c_rtc_resume(struct device *dev) clk_enable(rtc_clk); s3c_rtc_enable(pdev, 1); - writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); - if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) { - tmp = readw(s3c_rtc_base + S3C2410_RTCCON); - writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON); + if (s3c_rtc_cpu_type == TYPE_S3C64XX) { + writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT); + if (ticnt_en_save) { + tmp = readw(s3c_rtc_base + S3C2410_RTCCON); + writew(tmp | ticnt_en_save, + s3c_rtc_base + S3C2410_RTCCON); + } + } else { + writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT); } if (device_may_wakeup(dev) && wake_en) { diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index c3a83df07894..795ed61a5496 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1660,7 +1660,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) QDIO_FLAG_CLEANUP_USING_CLEAR); if (rc) QETH_CARD_TEXT_(card, 3, "1err%d", rc); - qdio_free(CARD_DDEV(card)); atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); break; case QETH_QDIO_CLEANING: @@ -2605,6 +2604,7 @@ static int qeth_mpc_initialize(struct qeth_card *card) return 0; out_qdio: qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); + qdio_free(CARD_DDEV(card)); return rc; } @@ -4906,9 +4906,11 @@ retry: if (retries < 3) QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", dev_name(&card->gdev->dev)); + rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); + qdio_free(CARD_DDEV(card)); rc = ccw_device_set_online(CARD_RDEV(card)); if (rc) goto retriable; @@ -4918,7 +4920,6 @@ retry: rc = ccw_device_set_online(CARD_DDEV(card)); if (rc) goto retriable; - rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); retriable: if (rc == -ERESTARTSYS) { QETH_DBF_TEXT(SETUP, 2, "break1"); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 0710550093ce..908d82529ee9 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1091,6 +1091,7 @@ out_remove: ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); + qdio_free(CARD_DDEV(card)); if (recover_flag == CARD_STATE_RECOVER) card->state = CARD_STATE_RECOVER; else @@ -1132,6 +1133,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, rc = (rc2) ? rc2 : rc3; if (rc) QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + qdio_free(CARD_DDEV(card)); if (recover_flag == CARD_STATE_UP) card->state = CARD_STATE_RECOVER; /* let user_space know that device is offline */ @@ -1194,6 +1196,7 @@ static void qeth_l2_shutdown(struct ccwgroup_device *gdev) qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_qdio_clear_card(card, 0); qeth_clear_qdio_buffers(card); + qdio_free(CARD_DDEV(card)); } static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 0f430424c3b8..3524d34ff694 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3447,6 +3447,7 @@ out_remove: ccw_device_set_offline(CARD_DDEV(card)); ccw_device_set_offline(CARD_WDEV(card)); ccw_device_set_offline(CARD_RDEV(card)); + qdio_free(CARD_DDEV(card)); if (recover_flag == CARD_STATE_RECOVER) card->state = CARD_STATE_RECOVER; else @@ -3493,6 +3494,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, rc = (rc2) ? rc2 : rc3; if (rc) QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + qdio_free(CARD_DDEV(card)); if (recover_flag == CARD_STATE_UP) card->state = CARD_STATE_RECOVER; /* let user_space know that device is offline */ @@ -3545,6 +3547,7 @@ static void qeth_l3_shutdown(struct ccwgroup_device *gdev) qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); qeth_qdio_clear_card(card, 0); qeth_clear_qdio_buffers(card); + qdio_free(CARD_DDEV(card)); } static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 7f1a7ce4b771..b83ec378d04f 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) spin_unlock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); iscsit_free_cmd(cmd, false); } } @@ -3708,7 +3708,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state break; case ISTATE_REMOVE: spin_lock_bh(&conn->cmd_lock); - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, false); @@ -4151,7 +4151,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) spin_lock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); @@ -4196,6 +4196,10 @@ int iscsit_close_connection( iscsit_stop_timers_for_cmds(conn); iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); + + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + iscsit_free_queue_reqs_for_conn(conn); /* diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 33be1fb1df32..4ca8fd2a70db 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_for_each_entry_safe(cmd, cmd_tmp, &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd, true); @@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_for_each_entry_safe(cmd, cmd_tmp, &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd, true); @@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery( } cr = cmd->cr; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); return --cr->cmd_count; } @@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) continue; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); @@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) /* * Only perform connection recovery on ISCSI_OP_SCSI_CMD or * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call - * list_del(&cmd->i_conn_node); to release the command to the + * list_del_init(&cmd->i_conn_node); to release the command to the * session pool and remove it from the connection's list. * * Also stop the DataOUT timer, which will be restarted after @@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) " CID: %hu\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, conn->cid); - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); @@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) */ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); @@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) cmd->sess = conn->sess; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_all_datain_reqs(cmd); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 39761837608d..44a5471de00f 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np( list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { spin_lock(&tpg->tpg_state_lock); - if (tpg->tpg_state == TPG_STATE_FREE) { + if (tpg->tpg_state != TPG_STATE_ACTIVE) { spin_unlock(&tpg->tpg_state_lock); continue; } diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 42f18fc1067b..77e6531fb0a1 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -1079,25 +1079,31 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, left = sectors * dev->prot_length; for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { - - len = min(psg->length, left); - if (offset >= sg->length) { - sg = sg_next(sg); - offset = 0; - } + unsigned int psg_len, copied = 0; paddr = kmap_atomic(sg_page(psg)) + psg->offset; - addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; - - if (read) - memcpy(paddr, addr, len); - else - memcpy(addr, paddr, len); - - left -= len; - offset += len; + psg_len = min(left, psg->length); + while (psg_len) { + len = min(psg_len, sg->length - offset); + addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; + + if (read) + memcpy(paddr + copied, addr, len); + else + memcpy(addr, paddr + copied, len); + + left -= len; + offset += len; + copied += len; + psg_len -= len; + + if (offset >= sg->length) { + sg = sg_next(sg); + offset = 0; + } + kunmap_atomic(addr); + } kunmap_atomic(paddr); - kunmap_atomic(addr); } } diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 4fb7a8f83c8a..54af4e933695 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn) if (pfn_valid(pfn)) { bool reserved; struct page *tail = pfn_to_page(pfn); - struct page *head = compound_trans_head(tail); + struct page *head = compound_head(tail); reserved = !!(PageReserved(head)); if (head != tail) { /* * "head" is not a dangling pointer - * (compound_trans_head takes care of that) + * (compound_head takes care of that) * but the hugepage may have been split * from under us (and we may not hold a * reference count on the head page so it can diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 0129b78a6908..4f70f383132c 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -458,11 +458,10 @@ static int bio_integrity_verify(struct bio *bio) struct blk_integrity_exchg bix; struct bio_vec *bv; sector_t sector = bio->bi_integrity->bip_iter.bi_sector; - unsigned int sectors, total, ret; + unsigned int sectors, ret = 0; void *prot_buf = bio->bi_integrity->bip_buf; int i; - ret = total = 0; bix.disk_name = bio->bi_bdev->bd_disk->disk_name; bix.sector_size = bi->sector_size; @@ -484,8 +483,6 @@ static int bio_integrity_verify(struct bio *bio) sectors = bv->bv_len / bi->sector_size; sector += sectors; prot_buf += sectors * bi->tuple_size; - total += sectors * bi->tuple_size; - BUG_ON(total > bio->bi_integrity->bip_iter.bi_size); kunmap_atomic(kaddr); } diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c index 968eab5bc1f5..68537e8b7a09 100644 --- a/fs/hfsplus/options.c +++ b/fs/hfsplus/options.c @@ -75,7 +75,7 @@ int hfsplus_parse_options_remount(char *input, int *force) int token; if (!input) - return 0; + return 1; while ((p = strsep(&input, ",")) != NULL) { if (!*p) diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index aaa50611ec66..d7b5108789e2 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot) */ if (status < 0) mlog_errno(status); + /* + * Clear dq_off so that we search for the structure in quota file next + * time we acquire it. The structure might be deleted and reallocated + * elsewhere by another node while our dquot structure is on freelist. + */ + dquot->dq_off = 0; clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); out_trans: ocfs2_commit_trans(osb, handle); @@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot) status = ocfs2_lock_global_qf(info, 1); if (status < 0) goto out; - if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { - status = ocfs2_qinfo_lock(info, 0); - if (status < 0) - goto out_dq; - status = qtree_read_dquot(&info->dqi_gi, dquot); - ocfs2_qinfo_unlock(info, 0); - if (status < 0) - goto out_dq; - } - set_bit(DQ_READ_B, &dquot->dq_flags); + status = ocfs2_qinfo_lock(info, 0); + if (status < 0) + goto out_dq; + /* + * We always want to read dquot structure from disk because we don't + * know what happened with it while it was on freelist. + */ + status = qtree_read_dquot(&info->dqi_gi, dquot); + ocfs2_qinfo_unlock(info, 0); + if (status < 0) + goto out_dq; OCFS2_DQUOT(dquot)->dq_use_count++; OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c index 2e4344be3b96..2001862bf2b1 100644 --- a/fs/ocfs2/quota_local.c +++ b/fs/ocfs2/quota_local.c @@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot) ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh); out: - /* Clear the read bit so that next time someone uses this - * dquot he reads fresh info from disk and allocates local - * dquot structure */ - clear_bit(DQ_READ_B, &dquot->dq_flags); return status; } diff --git a/fs/proc/page.c b/fs/proc/page.c index 02174a610315..e647c55275d9 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -121,9 +121,8 @@ u64 stable_page_flags(struct page *page) * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon * to make sure a given page is a thp, not a non-huge compound page. */ - else if (PageTransCompound(page) && - (PageLRU(compound_trans_head(page)) || - PageAnon(compound_trans_head(page)))) + else if (PageTransCompound(page) && (PageLRU(compound_head(page)) || + PageAnon(compound_head(page)))) u |= 1 << KPF_THP; /* diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h index a1116a3b54ef..8c1603b10665 100644 --- a/include/dt-bindings/clock/tegra124-car.h +++ b/include/dt-bindings/clock/tegra124-car.h @@ -36,10 +36,10 @@ #define TEGRA124_CLK_PWM 17 #define TEGRA124_CLK_I2S2 18 /* 20 (register bit affects vi and vi_sensor) */ -#define TEGRA124_CLK_GR_2D 21 +/* 21 */ #define TEGRA124_CLK_USBD 22 #define TEGRA124_CLK_ISP 23 -#define TEGRA124_CLK_GR_3D 24 +/* 26 */ /* 25 */ #define TEGRA124_CLK_DISP2 26 #define TEGRA124_CLK_DISP1 27 diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 18ba8a627f46..2ff2e8d982be 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -121,8 +121,7 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); -void blk_mq_insert_request(struct request_queue *, struct request *, - bool, bool); +void blk_mq_insert_request(struct request *, bool, bool, bool); void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); @@ -134,7 +133,13 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); -void blk_mq_end_io(struct request *rq, int error); +bool blk_mq_end_io_partial(struct request *rq, int error, + unsigned int nr_bytes); +static inline void blk_mq_end_io(struct request *rq, int error) +{ + bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq)); + BUG_ON(!done); +} void blk_mq_complete_request(struct request *rq); diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 5d7782e42b8f..c3683bdf28fe 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -200,6 +200,7 @@ struct fw_device { unsigned irmc:1; unsigned bc_implemented:2; + work_func_t workfn; struct delayed_work work; struct fw_attribute_group attribute_group; }; diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index db512014e061..b826239bdce0 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -157,46 +157,6 @@ static inline int hpage_nr_pages(struct page *page) return HPAGE_PMD_NR; return 1; } -/* - * compound_trans_head() should be used instead of compound_head(), - * whenever the "page" passed as parameter could be the tail of a - * transparent hugepage that could be undergoing a - * __split_huge_page_refcount(). The page structure layout often - * changes across releases and it makes extensive use of unions. So if - * the page structure layout will change in a way that - * page->first_page gets clobbered by __split_huge_page_refcount, the - * implementation making use of smp_rmb() will be required. - * - * Currently we define compound_trans_head as compound_head, because - * page->private is in the same union with page->first_page, and - * page->private isn't clobbered. However this also means we're - * currently leaving dirt into the page->private field of anonymous - * pages resulting from a THP split, instead of setting page->private - * to zero like for every other page that has PG_private not set. But - * anonymous pages don't use page->private so this is not a problem. - */ -#if 0 -/* This will be needed if page->private will be clobbered in split_huge_page */ -static inline struct page *compound_trans_head(struct page *page) -{ - if (PageTail(page)) { - struct page *head; - head = page->first_page; - smp_rmb(); - /* - * head may be a dangling pointer. - * __split_huge_page_refcount clears PageTail before - * overwriting first_page, so if PageTail is still - * there it means the head pointer isn't dangling. - */ - if (PageTail(page)) - return head; - } - return page; -} -#else -#define compound_trans_head(page) compound_head(page) -#endif extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, pmd_t *pmdp); @@ -226,7 +186,6 @@ static inline int split_huge_page(struct page *page) do { } while (0) #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ do { } while (0) -#define compound_trans_head(page) compound_head(page) static inline int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) { diff --git a/include/linux/mm.h b/include/linux/mm.h index f28f46eade6a..c1b7414c7bef 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -175,7 +175,7 @@ extern unsigned int kobjsize(const void *objp); * Special vmas that are non-mergable, non-mlock()able. * Note: mm/huge_memory.c VM_NO_THP depends on this definition. */ -#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP) +#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) /* * mapping from the currently active vm_flags protection bits (the @@ -399,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page, static inline struct page *compound_head(struct page *page) { - if (unlikely(PageTail(page))) - return page->first_page; + if (unlikely(PageTail(page))) { + struct page *head = page->first_page; + + /* + * page->first_page may be a dangling pointer to an old + * compound page, so recheck that it is still a tail + * page before returning. + */ + smp_rmb(); + if (likely(PageTail(page))) + return head; + } return page; } @@ -757,7 +767,7 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid) #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS static inline int page_cpupid_xchg_last(struct page *page, int cpupid) { - return xchg(&page->_last_cpupid, cpupid); + return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); } static inline int page_cpupid_last(struct page *page) @@ -766,7 +776,7 @@ static inline int page_cpupid_last(struct page *page) } static inline void page_cpupid_reset_last(struct page *page) { - page->_last_cpupid = -1; + page->_last_cpupid = -1 & LAST_CPUPID_MASK; } #else static inline int page_cpupid_last(struct page *page) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3ebbbe7b6d05..5e1e6f2d98c2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2725,7 +2725,7 @@ static inline void nf_reset(struct sk_buff *skb) static inline void nf_reset_trace(struct sk_buff *skb) { -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) skb->nf_trace = 0; #endif } @@ -2742,6 +2742,9 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) dst->nf_bridge = src->nf_bridge; nf_bridge_get(src->nf_bridge); #endif +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES) + dst->nf_trace = src->nf_trace; +#endif } static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index accc497f8d72..7159a0a933df 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -60,6 +60,12 @@ struct tp_module { unsigned int num_tracepoints; struct tracepoint * const *tracepoints_ptrs; }; +bool trace_module_has_bad_taint(struct module *mod); +#else +static inline bool trace_module_has_bad_taint(struct module *mod) +{ + return false; +} #endif /* CONFIG_MODULES */ struct tracepoint_iter { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 48ed75c21260..e77c10405d51 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -129,6 +129,7 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_parm *p); void ip_tunnel_setup(struct net_device *dev, int net_id); +void ip_tunnel_dst_reset_all(struct ip_tunnel *t); /* Extract dsfield from inner protocol */ static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph, diff --git a/include/net/tcp.h b/include/net/tcp.h index 56fc366da6d5..8c4dd63134d4 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1303,7 +1303,8 @@ struct tcp_fastopen_request { /* Fast Open cookie. Size 0 means a cookie request */ struct tcp_fastopen_cookie cookie; struct msghdr *data; /* data in MSG_FASTOPEN */ - u16 copied; /* queued in tcp_connect() */ + size_t size; + int copied; /* queued in tcp_connect() */ }; void tcp_free_fastopen_req(struct tcp_sock *tp); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index afa5730fb3bd..fb5654a8ca3c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1648,6 +1648,11 @@ static inline int xfrm_aevent_is_on(struct net *net) } #endif +static inline int aead_len(struct xfrm_algo_aead *alg) +{ + return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); +} + static inline int xfrm_alg_len(const struct xfrm_algo *alg) { return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); @@ -1686,6 +1691,12 @@ static inline int xfrm_replay_clone(struct xfrm_state *x, return 0; } +static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig) +{ + return kmemdup(orig, aead_len(orig), GFP_KERNEL); +} + + static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) { return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL); diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index ae5a17111968..4483fadfa68d 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -12,6 +12,7 @@ struct iscsit_transport { int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); void (*iscsit_free_np)(struct iscsi_np *); + void (*iscsit_wait_conn)(struct iscsi_conn *); void (*iscsit_free_conn)(struct iscsi_conn *); int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4410ac6a55f1..e6b1b66afe52 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, * Temporarilly set tasks mems_allowed to target nodes of migration, * so that the migration code can allocate pages on these nodes. * - * Call holding cpuset_mutex, so current's cpuset won't change - * during this call, as manage_mutex holds off any cpuset_attach() - * calls. Therefore we don't need to take task_lock around the - * call to guarantee_online_mems(), as we know no one is changing - * our task's cpuset. - * * While the mm_struct we are migrating is typically from some * other task, the task_struct mems_allowed that we are hacking * is for our current task, which must allocate new pages for that @@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); + rcu_read_lock(); mems_cs = effective_nodemask_cpuset(task_cs(tsk)); guarantee_online_mems(mems_cs, &tsk->mems_allowed); + rcu_read_unlock(); } /* @@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) task_lock(current); cs = nearest_hardwall_ancestor(task_cs(current)); + allowed = node_isset(node, cs->mems_allowed); task_unlock(current); - allowed = node_isset(node, cs->mems_allowed); mutex_unlock(&callback_mutex); return allowed; } diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index cf68bb36fe58..f14033700c25 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -10,6 +10,7 @@ #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/topology.h> #include <linux/seq_file.h> #include <linux/slab.h> diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 481a13c43b17..d3bf660cb57f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, static void wake_threads_waitq(struct irq_desc *desc) { - if (atomic_dec_and_test(&desc->threads_active) && - waitqueue_active(&desc->wait_for_threads)) + if (atomic_dec_and_test(&desc->threads_active)) wake_up(&desc->wait_for_threads); } diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c index 5b8838b56d1c..5b9bb42b2d47 100644 --- a/kernel/sched/cpudeadline.c +++ b/kernel/sched/cpudeadline.c @@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx) static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl) { - WARN_ON(!cpu_present(idx) || idx == IDX_INVALID); + WARN_ON(idx == IDX_INVALID || !cpu_present(idx)); if (dl_time_before(new_dl, cp->elements[idx].dl)) { cp->elements[idx].dl = new_dl; @@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p, } out: - WARN_ON(!cpu_present(best_cpu) && best_cpu != -1); + WARN_ON(best_cpu != -1 && !cpu_present(best_cpu)); return best_cpu; } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 15cbc17fbf84..6e79b3faa4cd 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -135,7 +135,6 @@ static void update_dl_migration(struct dl_rq *dl_rq) static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { struct task_struct *p = dl_task_of(dl_se); - dl_rq = &rq_of_dl_rq(dl_rq)->dl; if (p->nr_cpus_allowed > 1) dl_rq->dl_nr_migratory++; @@ -146,7 +145,6 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) { struct task_struct *p = dl_task_of(dl_se); - dl_rq = &rq_of_dl_rq(dl_rq)->dl; if (p->nr_cpus_allowed > 1) dl_rq->dl_nr_migratory--; @@ -564,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) return 1; } +extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); + /* * Update the current task's runtime statistics (provided it is still * a -deadline task and has not been removed from the dl_rq). @@ -627,11 +627,13 @@ static void update_curr_dl(struct rq *rq) struct rt_rq *rt_rq = &rq->rt; raw_spin_lock(&rt_rq->rt_runtime_lock); - rt_rq->rt_time += delta_exec; /* * We'll let actual RT tasks worry about the overflow here, we - * have our own CBS to keep us inline -- see above. + * have our own CBS to keep us inline; only account when RT + * bandwidth is relevant. */ + if (sched_rt_bandwidth_account(rt_rq)) + rt_rq->rt_time += delta_exec; raw_spin_unlock(&rt_rq->rt_runtime_lock); } } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 78157099b167..9b4c4f320130 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7001,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) struct cfs_rq *cfs_rq = cfs_rq_of(se); /* - * Ensure the task's vruntime is normalized, so that when its + * Ensure the task's vruntime is normalized, so that when it's * switched back to the fair class the enqueue_entity(.flags=0) will * do the right thing. * - * If it was on_rq, then the dequeue_entity(.flags=0) will already - * have normalized the vruntime, if it was !on_rq, then only when + * If it's on_rq, then the dequeue_entity(.flags=0) will already + * have normalized the vruntime, if it's !on_rq, then only when * the task is sleeping will it still have non-normalized vruntime. */ - if (!se->on_rq && p->state != TASK_RUNNING) { + if (!p->on_rq && p->state != TASK_RUNNING) { /* * Fix up our vruntime so that the current sleep doesn't * cause 'unlimited' sleep bonus. diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a2740b775b45..1999021042c7 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) #endif /* CONFIG_RT_GROUP_SCHED */ +bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) +{ + struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); + + return (hrtimer_active(&rt_b->rt_period_timer) || + rt_rq->rt_time < rt_b->rt_runtime); +} + #ifdef CONFIG_SMP /* * We ran out of runtime, see if we can borrow some from our neighbours. diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e71ffd4eccb5..f3989ceb5cd5 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1777,6 +1777,16 @@ static void trace_module_add_events(struct module *mod) { struct ftrace_event_call **call, **start, **end; + if (!mod->num_trace_events) + return; + + /* Don't add infrastructure for mods without tracepoints */ + if (trace_module_has_bad_taint(mod)) { + pr_err("%s: module has bad taint, not creating trace events\n", + mod->name); + return; + } + start = mod->trace_events; end = mod->trace_events + mod->num_trace_events; diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 29f26540e9c9..031cc5655a51 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter) EXPORT_SYMBOL_GPL(tracepoint_iter_reset); #ifdef CONFIG_MODULES +bool trace_module_has_bad_taint(struct module *mod) +{ + return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); +} + static int tracepoint_module_coming(struct module *mod) { struct tp_module *tp_mod, *iter; @@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod) * module headers (for forced load), to make sure we don't cause a crash. * Staging and out-of-tree GPL modules are fine. */ - if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) + if (trace_module_has_bad_taint(mod)) return 0; mutex_lock(&tracepoints_mutex); tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 2defd1308b04..98f2d7e91a91 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -424,111 +424,134 @@ void debug_dma_dump_mappings(struct device *dev) EXPORT_SYMBOL(debug_dma_dump_mappings); /* - * For each page mapped (initial page in the case of - * dma_alloc_coherent/dma_map_{single|page}, or each page in a - * scatterlist) insert into this tree using the pfn as the key. At + * For each mapping (initial cacheline in the case of + * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a + * scatterlist, or the cacheline specified in dma_map_single) insert + * into this tree using the cacheline as the key. At * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If - * the pfn already exists at insertion time add a tag as a reference + * the entry already exists at insertion time add a tag as a reference * count for the overlapping mappings. For now, the overlap tracking - * just ensures that 'unmaps' balance 'maps' before marking the pfn - * idle, but we should also be flagging overlaps as an API violation. + * just ensures that 'unmaps' balance 'maps' before marking the + * cacheline idle, but we should also be flagging overlaps as an API + * violation. * * Memory usage is mostly constrained by the maximum number of available * dma-debug entries in that we need a free dma_debug_entry before - * inserting into the tree. In the case of dma_map_{single|page} and - * dma_alloc_coherent there is only one dma_debug_entry and one pfn to - * track per event. dma_map_sg(), on the other hand, - * consumes a single dma_debug_entry, but inserts 'nents' entries into - * the tree. + * inserting into the tree. In the case of dma_map_page and + * dma_alloc_coherent there is only one dma_debug_entry and one + * dma_active_cacheline entry to track per event. dma_map_sg(), on the + * other hand, consumes a single dma_debug_entry, but inserts 'nents' + * entries into the tree. * * At any time debug_dma_assert_idle() can be called to trigger a - * warning if the given page is in the active set. + * warning if any cachelines in the given page are in the active set. */ -static RADIX_TREE(dma_active_pfn, GFP_NOWAIT); +static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT); static DEFINE_SPINLOCK(radix_lock); -#define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) +#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1) +#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT) +#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT) -static int active_pfn_read_overlap(unsigned long pfn) +static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry) +{ + return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) + + (entry->offset >> L1_CACHE_SHIFT); +} + +static int active_cacheline_read_overlap(phys_addr_t cln) { int overlap = 0, i; for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) - if (radix_tree_tag_get(&dma_active_pfn, pfn, i)) + if (radix_tree_tag_get(&dma_active_cacheline, cln, i)) overlap |= 1 << i; return overlap; } -static int active_pfn_set_overlap(unsigned long pfn, int overlap) +static int active_cacheline_set_overlap(phys_addr_t cln, int overlap) { int i; - if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0) + if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0) return overlap; for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--) if (overlap & 1 << i) - radix_tree_tag_set(&dma_active_pfn, pfn, i); + radix_tree_tag_set(&dma_active_cacheline, cln, i); else - radix_tree_tag_clear(&dma_active_pfn, pfn, i); + radix_tree_tag_clear(&dma_active_cacheline, cln, i); return overlap; } -static void active_pfn_inc_overlap(unsigned long pfn) +static void active_cacheline_inc_overlap(phys_addr_t cln) { - int overlap = active_pfn_read_overlap(pfn); + int overlap = active_cacheline_read_overlap(cln); - overlap = active_pfn_set_overlap(pfn, ++overlap); + overlap = active_cacheline_set_overlap(cln, ++overlap); /* If we overflowed the overlap counter then we're potentially * leaking dma-mappings. Otherwise, if maps and unmaps are * balanced then this overflow may cause false negatives in - * debug_dma_assert_idle() as the pfn may be marked idle + * debug_dma_assert_idle() as the cacheline may be marked idle * prematurely. */ - WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP, - "DMA-API: exceeded %d overlapping mappings of pfn %lx\n", - ACTIVE_PFN_MAX_OVERLAP, pfn); + WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP, + "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n", + ACTIVE_CACHELINE_MAX_OVERLAP, &cln); } -static int active_pfn_dec_overlap(unsigned long pfn) +static int active_cacheline_dec_overlap(phys_addr_t cln) { - int overlap = active_pfn_read_overlap(pfn); + int overlap = active_cacheline_read_overlap(cln); - return active_pfn_set_overlap(pfn, --overlap); + return active_cacheline_set_overlap(cln, --overlap); } -static int active_pfn_insert(struct dma_debug_entry *entry) +static int active_cacheline_insert(struct dma_debug_entry *entry) { + phys_addr_t cln = to_cacheline_number(entry); unsigned long flags; int rc; + /* If the device is not writing memory then we don't have any + * concerns about the cpu consuming stale data. This mitigates + * legitimate usages of overlapping mappings. + */ + if (entry->direction == DMA_TO_DEVICE) + return 0; + spin_lock_irqsave(&radix_lock, flags); - rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry); + rc = radix_tree_insert(&dma_active_cacheline, cln, entry); if (rc == -EEXIST) - active_pfn_inc_overlap(entry->pfn); + active_cacheline_inc_overlap(cln); spin_unlock_irqrestore(&radix_lock, flags); return rc; } -static void active_pfn_remove(struct dma_debug_entry *entry) +static void active_cacheline_remove(struct dma_debug_entry *entry) { + phys_addr_t cln = to_cacheline_number(entry); unsigned long flags; + /* ...mirror the insert case */ + if (entry->direction == DMA_TO_DEVICE) + return; + spin_lock_irqsave(&radix_lock, flags); /* since we are counting overlaps the final put of the - * entry->pfn will occur when the overlap count is 0. - * active_pfn_dec_overlap() returns -1 in that case + * cacheline will occur when the overlap count is 0. + * active_cacheline_dec_overlap() returns -1 in that case */ - if (active_pfn_dec_overlap(entry->pfn) < 0) - radix_tree_delete(&dma_active_pfn, entry->pfn); + if (active_cacheline_dec_overlap(cln) < 0) + radix_tree_delete(&dma_active_cacheline, cln); spin_unlock_irqrestore(&radix_lock, flags); } /** * debug_dma_assert_idle() - assert that a page is not undergoing dma - * @page: page to lookup in the dma_active_pfn tree + * @page: page to lookup in the dma_active_cacheline tree * * Place a call to this routine in cases where the cpu touching the page * before the dma completes (page is dma_unmapped) will lead to data @@ -536,22 +559,38 @@ static void active_pfn_remove(struct dma_debug_entry *entry) */ void debug_dma_assert_idle(struct page *page) { + static struct dma_debug_entry *ents[CACHELINES_PER_PAGE]; + struct dma_debug_entry *entry = NULL; + void **results = (void **) &ents; + unsigned int nents, i; unsigned long flags; - struct dma_debug_entry *entry; + phys_addr_t cln; if (!page) return; + cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; spin_lock_irqsave(&radix_lock, flags); - entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page)); + nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln, + CACHELINES_PER_PAGE); + for (i = 0; i < nents; i++) { + phys_addr_t ent_cln = to_cacheline_number(ents[i]); + + if (ent_cln == cln) { + entry = ents[i]; + break; + } else if (ent_cln >= cln + CACHELINES_PER_PAGE) + break; + } spin_unlock_irqrestore(&radix_lock, flags); if (!entry) return; + cln = to_cacheline_number(entry); err_printk(entry->dev, entry, - "DMA-API: cpu touching an active dma mapped page " - "[pfn=0x%lx]\n", entry->pfn); + "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n", + &cln); } /* @@ -568,9 +607,9 @@ static void add_dma_entry(struct dma_debug_entry *entry) hash_bucket_add(bucket, entry); put_hash_bucket(bucket, &flags); - rc = active_pfn_insert(entry); + rc = active_cacheline_insert(entry); if (rc == -ENOMEM) { - pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n"); + pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n"); global_disable = true; } @@ -631,7 +670,7 @@ static void dma_entry_free(struct dma_debug_entry *entry) { unsigned long flags; - active_pfn_remove(entry); + active_cacheline_remove(entry); /* * add to beginning of the list - this way the entries are diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 7811ed3b4e70..bd4a8dfdf0b8 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1253,8 +1253,10 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item) node = indirect_to_ptr(node); max_index = radix_tree_maxindex(node->height); - if (cur_index > max_index) + if (cur_index > max_index) { + rcu_read_unlock(); break; + } cur_index = __locate(node, item, cur_index, &found_index); rcu_read_unlock(); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4df39b1bde91..1546655a2d78 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1961,7 +1961,7 @@ out: return ret; } -#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE) +#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE) int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, int advice) @@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item) static struct page *page_trans_compound_anon(struct page *page) { if (PageTransCompound(page)) { - struct page *head = compound_trans_head(page); + struct page *head = compound_head(page); /* * head may actually be splitted and freed from under * us but it's ok here. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ce7a8cc7b404..5b6b0039f725 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1127,8 +1127,8 @@ skip_node: * skipping css reference should be safe. */ if (next_css) { - if ((next_css->flags & CSS_ONLINE) && - (next_css == &root->css || css_tryget(next_css))) + if ((next_css == &root->css) || + ((next_css->flags & CSS_ONLINE) && css_tryget(next_css))) return mem_cgroup_from_css(next_css); prev_css = next_css; @@ -6595,6 +6595,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_event *event, *tmp; + struct cgroup_subsys_state *iter; /* * Unregister events and notify userspace. @@ -6611,7 +6612,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) kmem_cgroup_css_offline(memcg); mem_cgroup_invalidate_reclaim_iterators(memcg); - mem_cgroup_reparent_charges(memcg); + + /* + * This requires that offlining is serialized. Right now that is + * guaranteed because css_killed_work_fn() holds the cgroup_mutex. + */ + css_for_each_descendant_post(iter, css) + mem_cgroup_reparent_charges(mem_cgroup_from_css(iter)); + mem_cgroup_destroy_all_caches(memcg); vmpressure_cleanup(&memcg->vmpressure); } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 2f2f34a4e77d..90002ea43638 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1651,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags) { int ret; unsigned long pfn = page_to_pfn(page); - struct page *hpage = compound_trans_head(page); + struct page *hpage = compound_head(page); if (PageHWPoison(page)) { pr_info("soft offline: %#lx page already poisoned\n", pfn); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e3758a09a009..3bac76ae4b30 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order) __SetPageHead(page); for (i = 1; i < nr_pages; i++) { struct page *p = page + i; - __SetPageTail(p); set_page_count(p, 0); p->first_page = page; + /* Make sure p->first_page is always valid for PageTail() */ + smp_wmb(); + __SetPageTail(p); } } @@ -1236,6 +1238,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) } local_irq_restore(flags); } +static bool gfp_thisnode_allocation(gfp_t gfp_mask) +{ + return (gfp_mask & GFP_THISNODE) == GFP_THISNODE; +} +#else +static bool gfp_thisnode_allocation(gfp_t gfp_mask) +{ + return false; +} #endif /* @@ -1572,7 +1583,13 @@ again: get_pageblock_migratetype(page)); } - __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); + /* + * NOTE: GFP_THISNODE allocations do not partake in the kswapd + * aging protocol, so they can't be fair. + */ + if (!gfp_thisnode_allocation(gfp_flags)) + __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); + __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); local_irq_restore(flags); @@ -1944,8 +1961,12 @@ zonelist_scan: * ultimately fall back to remote zones that do not * partake in the fairness round-robin cycle of this * zonelist. + * + * NOTE: GFP_THISNODE allocations do not partake in + * the kswapd aging protocol, so they can't be fair. */ - if (alloc_flags & ALLOC_WMARK_LOW) { + if ((alloc_flags & ALLOC_WMARK_LOW) && + !gfp_thisnode_allocation(gfp_mask)) { if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0) continue; if (!zone_local(preferred_zone, zone)) @@ -2501,8 +2522,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, * allowed per node queues are empty and that nodes are * over allocated. */ - if (IS_ENABLED(CONFIG_NUMA) && - (gfp_mask & GFP_THISNODE) == GFP_THISNODE) + if (gfp_thisnode_allocation(gfp_mask)) goto nopage; restart: diff --git a/mm/swap.c b/mm/swap.c index b31ba67d440a..0092097b3f4c 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -98,7 +98,7 @@ static void put_compound_page(struct page *page) } /* __split_huge_page_refcount can run under us */ - page_head = compound_trans_head(page); + page_head = compound_head(page); /* * THP can not break up slab pages so avoid taking @@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page) */ unsigned long flags; bool got; - struct page *page_head = compound_trans_head(page); + struct page *page_head = compound_head(page); /* Ref to put_compound_page() comment. */ if (!__compound_tail_refcounted(page_head)) { diff --git a/net/can/raw.c b/net/can/raw.c index 8be757cca2ec..081e81fd017f 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -121,13 +121,9 @@ static void raw_rcv(struct sk_buff *oskb, void *data) if (!ro->recv_own_msgs && oskb->sk == sk) return; - /* do not pass frames with DLC > 8 to a legacy socket */ - if (!ro->fd_frames) { - struct canfd_frame *cfd = (struct canfd_frame *)oskb->data; - - if (unlikely(cfd->len > CAN_MAX_DLEN)) - return; - } + /* do not pass non-CAN2.0 frames to a legacy socket */ + if (!ro->fd_frames && oskb->len != CAN_MTU) + return; /* clone the given skb to be able to enqueue it into the rcv queue */ skb = skb_clone(oskb, GFP_ATOMIC); @@ -738,9 +734,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; - struct raw_sock *ro = raw_sk(sk); struct sk_buff *skb; - int rxmtu; int err = 0; int noblock; @@ -751,20 +745,10 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, if (!skb) return err; - /* - * when serving a legacy socket the DLC <= 8 is already checked inside - * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy - * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU - */ - if (!ro->fd_frames) - rxmtu = CAN_MTU; - else - rxmtu = skb->len; - - if (size < rxmtu) + if (size < skb->len) msg->msg_flags |= MSG_TRUNC; else - size = rxmtu; + size = skb->len; err = memcpy_toiovec(msg->msg_iov, skb->data, size); if (err < 0) { diff --git a/net/core/neighbour.c b/net/core/neighbour.c index b9e9e0d38672..e16129019c66 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -766,9 +766,6 @@ static void neigh_periodic_work(struct work_struct *work) nht = rcu_dereference_protected(tbl->nht, lockdep_is_held(&tbl->lock)); - if (atomic_read(&tbl->entries) < tbl->gc_thresh1) - goto out; - /* * periodically recompute ReachableTime from random function */ @@ -781,6 +778,9 @@ static void neigh_periodic_work(struct work_struct *work) neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); } + if (atomic_read(&tbl->entries) < tbl->gc_thresh1) + goto out; + for (i = 0 ; i < (1 << nht->hash_shift); i++) { np = &nht->hash_buckets[i]; @@ -3046,7 +3046,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, if (!t) goto err; - for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) { + for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) { t->neigh_vars[i].data += (long) p; t->neigh_vars[i].extra1 = dev; t->neigh_vars[i].extra2 = p; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 5976ef0846bd..5d6236d9fdce 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -707,9 +707,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) new->mark = old->mark; new->skb_iif = old->skb_iif; __nf_copy(new, old); -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) - new->nf_trace = old->nf_trace; -#endif #ifdef CONFIG_NET_SCHED new->tc_index = old->tc_index; #ifdef CONFIG_NET_CLS_ACT diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 327060c6c874..7ae0d7f6dbd0 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -297,7 +297,7 @@ static bool seq_nr_after(u16 a, u16 b) void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx) { - if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) { + if ((dev_idx < 0) || (dev_idx >= HSR_MAX_SLAVE)) { WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx); return; } diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ecd2c3f245ce..19ab78aca547 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1296,8 +1296,11 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, segs = ERR_PTR(-EPROTONOSUPPORT); - /* Note : following gso_segment() might change skb->encapsulation */ - udpfrag = !skb->encapsulation && proto == IPPROTO_UDP; + if (skb->encapsulation && + skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) + udpfrag = proto == IPPROTO_UDP && encap; + else + udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; ops = rcu_dereference(inet_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 8971780aec7c..73c6b63bba74 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -422,9 +422,6 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->tc_index = from->tc_index; #endif nf_copy(to, from); -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) - to->nf_trace = from->nf_trace; -#endif #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) to->ipvs_property = from->ipvs_property; #endif diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 50228be5c17b..78a89e61925d 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -93,13 +93,14 @@ static void tunnel_dst_reset(struct ip_tunnel *t) tunnel_dst_set(t, NULL); } -static void tunnel_dst_reset_all(struct ip_tunnel *t) +void ip_tunnel_dst_reset_all(struct ip_tunnel *t) { int i; for_each_possible_cpu(i) __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); } +EXPORT_SYMBOL(ip_tunnel_dst_reset_all); static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) { @@ -119,52 +120,6 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) return (struct rtable *)dst; } -/* Often modified stats are per cpu, other are shared (netdev->stats) */ -struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *tot) -{ - int i; - - for_each_possible_cpu(i) { - const struct pcpu_sw_netstats *tstats = - per_cpu_ptr(dev->tstats, i); - u64 rx_packets, rx_bytes, tx_packets, tx_bytes; - unsigned int start; - - do { - start = u64_stats_fetch_begin_bh(&tstats->syncp); - rx_packets = tstats->rx_packets; - tx_packets = tstats->tx_packets; - rx_bytes = tstats->rx_bytes; - tx_bytes = tstats->tx_bytes; - } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); - - tot->rx_packets += rx_packets; - tot->tx_packets += tx_packets; - tot->rx_bytes += rx_bytes; - tot->tx_bytes += tx_bytes; - } - - tot->multicast = dev->stats.multicast; - - tot->rx_crc_errors = dev->stats.rx_crc_errors; - tot->rx_fifo_errors = dev->stats.rx_fifo_errors; - tot->rx_length_errors = dev->stats.rx_length_errors; - tot->rx_frame_errors = dev->stats.rx_frame_errors; - tot->rx_errors = dev->stats.rx_errors; - - tot->tx_fifo_errors = dev->stats.tx_fifo_errors; - tot->tx_carrier_errors = dev->stats.tx_carrier_errors; - tot->tx_dropped = dev->stats.tx_dropped; - tot->tx_aborted_errors = dev->stats.tx_aborted_errors; - tot->tx_errors = dev->stats.tx_errors; - - tot->collisions = dev->stats.collisions; - - return tot; -} -EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64); - static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p, __be16 flags, __be32 key) { @@ -759,7 +714,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn, if (set_mtu) dev->mtu = mtu; } - tunnel_dst_reset_all(t); + ip_tunnel_dst_reset_all(t); netdev_state_change(dev); } @@ -1088,7 +1043,7 @@ void ip_tunnel_uninit(struct net_device *dev) if (itn->fb_tunnel_dev != dev) ip_tunnel_del(netdev_priv(dev)); - tunnel_dst_reset_all(tunnel); + ip_tunnel_dst_reset_all(tunnel); } EXPORT_SYMBOL_GPL(ip_tunnel_uninit); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 6156f4ef5e91..6f847dd56dbc 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -108,7 +108,6 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) nf_reset(skb); secpath_reset(skb); skb_clear_hash_if_not_l4(skb); - skb_dst_drop(skb); skb->vlan_tci = 0; skb_set_queue_mapping(skb, 0); skb->pkt_type = PACKET_HOST; @@ -148,3 +147,49 @@ error: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(iptunnel_handle_offloads); + +/* Often modified stats are per cpu, other are shared (netdev->stats) */ +struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *tot) +{ + int i; + + for_each_possible_cpu(i) { + const struct pcpu_sw_netstats *tstats = + per_cpu_ptr(dev->tstats, i); + u64 rx_packets, rx_bytes, tx_packets, tx_bytes; + unsigned int start; + + do { + start = u64_stats_fetch_begin_bh(&tstats->syncp); + rx_packets = tstats->rx_packets; + tx_packets = tstats->tx_packets; + rx_bytes = tstats->rx_bytes; + tx_bytes = tstats->tx_bytes; + } while (u64_stats_fetch_retry_bh(&tstats->syncp, start)); + + tot->rx_packets += rx_packets; + tot->tx_packets += tx_packets; + tot->rx_bytes += rx_bytes; + tot->tx_bytes += tx_bytes; + } + + tot->multicast = dev->stats.multicast; + + tot->rx_crc_errors = dev->stats.rx_crc_errors; + tot->rx_fifo_errors = dev->stats.rx_fifo_errors; + tot->rx_length_errors = dev->stats.rx_length_errors; + tot->rx_frame_errors = dev->stats.rx_frame_errors; + tot->rx_errors = dev->stats.rx_errors; + + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; + tot->tx_carrier_errors = dev->stats.tx_carrier_errors; + tot->tx_dropped = dev->stats.tx_dropped; + tot->tx_aborted_errors = dev->stats.tx_aborted_errors; + tot->tx_errors = dev->stats.tx_errors; + + tot->collisions = dev->stats.collisions; + + return tot; +} +EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64); diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index d551e31b416e..7c676671329d 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c @@ -1198,8 +1198,8 @@ static int snmp_translate(struct nf_conn *ct, map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); } else { /* DNAT replies */ - map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip); - map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip); + map.from = NOCT1(&ct->tuplehash[!dir].tuple.src.u3.ip); + map.to = NOCT1(&ct->tuplehash[dir].tuple.dst.u3.ip); } if (map.from == map.to) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 9f3a2db9109e..97c8f5620c43 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1044,7 +1044,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp) } } -static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size) +static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, + int *copied, size_t size) { struct tcp_sock *tp = tcp_sk(sk); int err, flags; @@ -1059,11 +1060,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size) if (unlikely(tp->fastopen_req == NULL)) return -ENOBUFS; tp->fastopen_req->data = msg; + tp->fastopen_req->size = size; flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; err = __inet_stream_connect(sk->sk_socket, msg->msg_name, msg->msg_namelen, flags); - *size = tp->fastopen_req->copied; + *copied = tp->fastopen_req->copied; tcp_free_fastopen_req(tp); return err; } @@ -1083,7 +1085,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, flags = msg->msg_flags; if (flags & MSG_FASTOPEN) { - err = tcp_sendmsg_fastopen(sk, msg, &copied_syn); + err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size); if (err == -EINPROGRESS && copied_syn > 0) goto out; else if (err) diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index ad37bf18ae4b..2388275adb9b 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -290,8 +290,7 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) left = tp->snd_cwnd - in_flight; if (sk_can_gso(sk) && left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && - left * tp->mss_cache < sk->sk_gso_max_size && - left < sk->sk_gso_max_segs) + left < tp->xmit_size_goal_segs) return true; return left <= tcp_max_tso_deferred_mss(tp); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 227cba79fa6b..eeaac399420d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1945,8 +1945,9 @@ void tcp_enter_loss(struct sock *sk, int how) if (skb == tcp_send_head(sk)) break; - if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) + if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) tp->undo_marker = 0; + TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3be16727f058..f0eb4e337ec8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -864,8 +864,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, if (unlikely(skb->fclone == SKB_FCLONE_ORIG && fclone->fclone == SKB_FCLONE_CLONE)) - NET_INC_STATS_BH(sock_net(sk), - LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); + NET_INC_STATS(sock_net(sk), + LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); if (unlikely(skb_cloned(skb))) skb = pskb_copy(skb, gfp_mask); @@ -2337,6 +2337,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); unsigned int cur_mss; + int err; /* Inconslusive MTU probe */ if (icsk->icsk_mtup.probe_size) { @@ -2400,11 +2401,15 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) skb_headroom(skb) >= 0xFFFF)) { struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); - return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : - -ENOBUFS; + err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : + -ENOBUFS; } else { - return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); + err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); } + + if (likely(!err)) + TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; + return err; } int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) @@ -2908,7 +2913,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - MAX_TCP_OPTION_SPACE; - syn_data = skb_copy_expand(syn, skb_headroom(syn), space, + space = min_t(size_t, space, fo->size); + + /* limit to order-0 allocations */ + space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); + + syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space, sk->sk_allocation); if (syn_data == NULL) goto fallback; diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index d92e5586783e..438a73aa777c 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -138,6 +138,7 @@ config INET6_XFRM_MODE_ROUTEOPTIMIZATION config IPV6_VTI tristate "Virtual (secure) IPv6: tunneling" select IPV6_TUNNEL + select NET_IP_TUNNEL depends on INET6_XFRM_MODE_TUNNEL ---help--- Tunneling means encapsulating data of one protocol type within diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c index 140748debc4a..8af3eb57f438 100644 --- a/net/ipv6/exthdrs_core.c +++ b/net/ipv6/exthdrs_core.c @@ -212,7 +212,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, found = (nexthdr == target); if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { - if (target < 0) + if (target < 0 || found) break; return -ENOENT; } diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 1e8683b135bb..59f95affceb0 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -89,7 +89,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, unsigned int unfrag_ip6hlen; u8 *prevhdr; int offset = 0; - bool tunnel; + bool encap, udpfrag; int nhoff; if (unlikely(skb_shinfo(skb)->gso_type & @@ -110,8 +110,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) goto out; - tunnel = SKB_GSO_CB(skb)->encap_level > 0; - if (tunnel) + encap = SKB_GSO_CB(skb)->encap_level > 0; + if (encap) features = skb->dev->hw_enc_features & netif_skb_features(skb); SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); @@ -121,6 +121,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); + if (skb->encapsulation && + skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP)) + udpfrag = proto == IPPROTO_UDP && encap; + else + udpfrag = proto == IPPROTO_UDP && !skb->encapsulation; + ops = rcu_dereference(inet6_offloads[proto]); if (likely(ops && ops->callbacks.gso_segment)) { skb_reset_transport_header(skb); @@ -133,13 +139,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, for (skb = segs; skb; skb = skb->next) { ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h)); - if (tunnel) { - skb_reset_inner_headers(skb); - skb->encapsulation = 1; - } skb->network_header = (u8 *)ipv6h - skb->head; - if (!tunnel && proto == IPPROTO_UDP) { + if (udpfrag) { unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); fptr->frag_off = htons(offset); @@ -148,6 +150,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, offset += (ntohs(ipv6h->payload_len) - sizeof(struct frag_hdr)); } + if (encap) + skb_reset_inner_headers(skb); } out: diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 070a2fae2375..16f91a2e7888 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -530,9 +530,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) to->tc_index = from->tc_index; #endif nf_copy(to, from); -#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) - to->nf_trace = from->nf_trace; -#endif skb_copy_secmark(to, from); } diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index fb9beb78f00b..587bbdcb22b4 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -135,6 +135,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, fl6.flowi6_proto = IPPROTO_ICMPV6; fl6.saddr = np->saddr; fl6.daddr = *daddr; + fl6.flowi6_mark = sk->sk_mark; fl6.fl6_icmp_type = user_icmph.icmp6_type; fl6.fl6_icmp_code = user_icmph.icmp6_code; security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 3dfbcf1dcb1c..b4d74c86586c 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -475,6 +475,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev) ipip6_tunnel_unlink(sitn, tunnel); ipip6_tunnel_del_prl(tunnel, NULL); } + ip_tunnel_dst_reset_all(tunnel); dev_put(dev); } @@ -1082,6 +1083,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p) t->parms.link = p->link; ipip6_tunnel_bind_dev(t->dev); } + ip_tunnel_dst_reset_all(t); netdev_state_change(t->dev); } @@ -1112,6 +1114,7 @@ static int ipip6_tunnel_update_6rd(struct ip_tunnel *t, t->ip6rd.relay_prefix = relay_prefix; t->ip6rd.prefixlen = ip6rd->prefixlen; t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen; + ip_tunnel_dst_reset_all(t); netdev_state_change(t->dev); return 0; } @@ -1271,6 +1274,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL); break; } + ip_tunnel_dst_reset_all(t); netdev_state_change(dev); break; @@ -1326,6 +1330,9 @@ static const struct net_device_ops ipip6_netdev_ops = { static void ipip6_dev_free(struct net_device *dev) { + struct ip_tunnel *tunnel = netdev_priv(dev); + + free_percpu(tunnel->dst_cache); free_percpu(dev->tstats); free_netdev(dev); } @@ -1375,6 +1382,12 @@ static int ipip6_tunnel_init(struct net_device *dev) u64_stats_init(&ipip6_tunnel_stats->syncp); } + tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); + if (!tunnel->dst_cache) { + free_percpu(dev->tstats); + return -ENOMEM; + } + return 0; } @@ -1405,6 +1418,12 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) u64_stats_init(&ipip6_fb_stats->syncp); } + tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); + if (!tunnel->dst_cache) { + free_percpu(dev->tstats); + return -ENOMEM; + } + dev_hold(dev); rcu_assign_pointer(sitn->tunnels_wc[0], tunnel); return 0; diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index e7359f9eaa8d..b261ee8b83fc 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -113,7 +113,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; - ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb)); + fptr->identification = skb_shinfo(skb)->ip6_frag_id; /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 3701930c6649..5e44e3179e02 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1692,14 +1692,8 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue); void ieee80211_add_pending_skb(struct ieee80211_local *local, struct sk_buff *skb); -void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, - struct sk_buff_head *skbs, - void (*fn)(void *data), void *data); -static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local, - struct sk_buff_head *skbs) -{ - ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL); -} +void ieee80211_add_pending_skbs(struct ieee80211_local *local, + struct sk_buff_head *skbs); void ieee80211_flush_queues(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index fc1d82465b3c..245dce969b31 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -222,6 +222,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, switch (vht_oper->chan_width) { case IEEE80211_VHT_CHANWIDTH_USE_HT: vht_chandef.width = chandef->width; + vht_chandef.center_freq1 = chandef->center_freq1; break; case IEEE80211_VHT_CHANWIDTH_80MHZ: vht_chandef.width = NL80211_CHAN_WIDTH_80; @@ -271,6 +272,28 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, ret = 0; out: + /* + * When tracking the current AP, don't do any further checks if the + * new chandef is identical to the one we're currently using for the + * connection. This keeps us from playing ping-pong with regulatory, + * without it the following can happen (for example): + * - connect to an AP with 80 MHz, world regdom allows 80 MHz + * - AP advertises regdom US + * - CRDA loads regdom US with 80 MHz prohibited (old database) + * - the code below detects an unsupported channel, downgrades, and + * we disconnect from the AP in the caller + * - disconnect causes CRDA to reload world regdomain and the game + * starts anew. + * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881) + * + * It seems possible that there are still scenarios with CSA or real + * bandwidth changes where a this could happen, but those cases are + * less common and wouldn't completely prevent using the AP. + */ + if (tracking && + cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef)) + return ret; + /* don't print the message below for VHT mismatch if VHT is disabled */ if (ret & IEEE80211_STA_DISABLE_VHT) vht_chandef = *chandef; @@ -3753,6 +3776,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata, chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); if (WARN_ON(!chanctx_conf)) { rcu_read_unlock(); + sta_info_free(local, new_sta); return -EINVAL; } rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c24ca0d0f469..3e57f96c9666 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1128,6 +1128,13 @@ static void sta_ps_end(struct sta_info *sta) sta->sta.addr, sta->sta.aid); if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { + /* + * Clear the flag only if the other one is still set + * so that the TX path won't start TX'ing new frames + * directly ... In the case that the driver flag isn't + * set ieee80211_sta_ps_deliver_wakeup() will clear it. + */ + clear_sta_flag(sta, WLAN_STA_PS_STA); ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n", sta->sta.addr, sta->sta.aid); return; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index decd30c1e290..a023b432143b 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -91,7 +91,7 @@ static int sta_info_hash_del(struct ieee80211_local *local, return -ENOENT; } -static void cleanup_single_sta(struct sta_info *sta) +static void __cleanup_single_sta(struct sta_info *sta) { int ac, i; struct tid_ampdu_tx *tid_tx; @@ -99,7 +99,8 @@ static void cleanup_single_sta(struct sta_info *sta) struct ieee80211_local *local = sdata->local; struct ps_data *ps; - if (test_sta_flag(sta, WLAN_STA_PS_STA)) { + if (test_sta_flag(sta, WLAN_STA_PS_STA) || + test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { if (sta->sdata->vif.type == NL80211_IFTYPE_AP || sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ps = &sdata->bss->ps; @@ -109,6 +110,7 @@ static void cleanup_single_sta(struct sta_info *sta) return; clear_sta_flag(sta, WLAN_STA_PS_STA); + clear_sta_flag(sta, WLAN_STA_PS_DRIVER); atomic_dec(&ps->num_sta_ps); sta_info_recalc_tim(sta); @@ -139,7 +141,14 @@ static void cleanup_single_sta(struct sta_info *sta) ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending); kfree(tid_tx); } +} +static void cleanup_single_sta(struct sta_info *sta) +{ + struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + + __cleanup_single_sta(sta); sta_info_free(local, sta); } @@ -330,6 +339,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, rcu_read_unlock(); spin_lock_init(&sta->lock); + spin_lock_init(&sta->ps_lock); INIT_WORK(&sta->drv_unblock_wk, sta_unblock); INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); mutex_init(&sta->ampdu_mlme.mtx); @@ -487,21 +497,26 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) goto out_err; } - /* notify driver */ - err = sta_info_insert_drv_state(local, sdata, sta); - if (err) - goto out_err; - local->num_sta++; local->sta_generation++; smp_mb(); + /* simplify things and don't accept BA sessions yet */ + set_sta_flag(sta, WLAN_STA_BLOCK_BA); + /* make the station visible */ sta_info_hash_add(local, sta); list_add_rcu(&sta->list, &local->sta_list); + /* notify driver */ + err = sta_info_insert_drv_state(local, sdata, sta); + if (err) + goto out_remove; + set_sta_flag(sta, WLAN_STA_INSERTED); + /* accept BA sessions now */ + clear_sta_flag(sta, WLAN_STA_BLOCK_BA); ieee80211_recalc_min_chandef(sdata); ieee80211_sta_debugfs_add(sta); @@ -522,6 +537,12 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) mesh_accept_plinks_update(sdata); return 0; + out_remove: + sta_info_hash_del(local, sta); + list_del_rcu(&sta->list); + local->num_sta--; + synchronize_net(); + __cleanup_single_sta(sta); out_err: mutex_unlock(&local->sta_mtx); rcu_read_lock(); @@ -1071,10 +1092,14 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, } EXPORT_SYMBOL(ieee80211_find_sta); -static void clear_sta_ps_flags(void *_sta) +/* powersave support code */ +void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) { - struct sta_info *sta = _sta; struct ieee80211_sub_if_data *sdata = sta->sdata; + struct ieee80211_local *local = sdata->local; + struct sk_buff_head pending; + int filtered = 0, buffered = 0, ac; + unsigned long flags; struct ps_data *ps; if (sdata->vif.type == NL80211_IFTYPE_AP || @@ -1085,20 +1110,6 @@ static void clear_sta_ps_flags(void *_sta) else return; - clear_sta_flag(sta, WLAN_STA_PS_DRIVER); - if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA)) - atomic_dec(&ps->num_sta_ps); -} - -/* powersave support code */ -void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) -{ - struct ieee80211_sub_if_data *sdata = sta->sdata; - struct ieee80211_local *local = sdata->local; - struct sk_buff_head pending; - int filtered = 0, buffered = 0, ac; - unsigned long flags; - clear_sta_flag(sta, WLAN_STA_SP); BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1); @@ -1109,6 +1120,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) skb_queue_head_init(&pending); + /* sync with ieee80211_tx_h_unicast_ps_buf */ + spin_lock(&sta->ps_lock); /* Send all buffered frames to the station */ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { int count = skb_queue_len(&pending), tmp; @@ -1127,7 +1140,12 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) buffered += tmp - count; } - ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta); + ieee80211_add_pending_skbs(local, &pending); + clear_sta_flag(sta, WLAN_STA_PS_DRIVER); + clear_sta_flag(sta, WLAN_STA_PS_STA); + spin_unlock(&sta->ps_lock); + + atomic_dec(&ps->num_sta_ps); /* This station just woke up and isn't aware of our SMPS state */ if (!ieee80211_smps_is_restrictive(sta->known_smps_mode, diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index d77ff7090630..d3a6d8208f2f 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -267,6 +267,7 @@ struct ieee80211_tx_latency_stat { * @drv_unblock_wk: used for driver PS unblocking * @listen_interval: listen interval of this station, when we're acting as AP * @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly + * @ps_lock: used for powersave (when mac80211 is the AP) related locking * @ps_tx_buf: buffers (per AC) of frames to transmit to this station * when it leaves power saving state or polls * @tx_filtered: buffers (per AC) of frames we already tried to @@ -356,10 +357,8 @@ struct sta_info { /* use the accessors defined below */ unsigned long _flags; - /* - * STA powersave frame queues, no more than the internal - * locking required. - */ + /* STA powersave lock and frame queues */ + spinlock_t ps_lock; struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS]; struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS]; unsigned long driver_buffered_tids; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 97a02d3f7d87..4080c615636f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -478,6 +478,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) sta->sta.addr, sta->sta.aid, ac); if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) purge_old_ps_buffers(tx->local); + + /* sync with ieee80211_sta_ps_deliver_wakeup */ + spin_lock(&sta->ps_lock); + /* + * STA woke up the meantime and all the frames on ps_tx_buf have + * been queued to pending queue. No reordering can happen, go + * ahead and Tx the packet. + */ + if (!test_sta_flag(sta, WLAN_STA_PS_STA) && + !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) { + spin_unlock(&sta->ps_lock); + return TX_CONTINUE; + } + if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) { struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]); ps_dbg(tx->sdata, @@ -492,6 +506,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb); + spin_unlock(&sta->ps_lock); if (!timer_pending(&local->sta_cleanup)) mod_timer(&local->sta_cleanup, diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 676dc0967f37..b8700d417a9c 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -435,9 +435,8 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local, spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); } -void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, - struct sk_buff_head *skbs, - void (*fn)(void *data), void *data) +void ieee80211_add_pending_skbs(struct ieee80211_local *local, + struct sk_buff_head *skbs) { struct ieee80211_hw *hw = &local->hw; struct sk_buff *skb; @@ -461,9 +460,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, __skb_queue_tail(&local->pending[queue], skb); } - if (fn) - fn(data); - for (i = 0; i < hw->queues; i++) __ieee80211_wake_queue(hw, i, IEEE80211_QUEUE_STOP_REASON_SKB_ADD); @@ -1741,6 +1737,26 @@ int ieee80211_reconfig(struct ieee80211_local *local) IEEE80211_QUEUE_STOP_REASON_SUSPEND); /* + * Reconfigure sched scan if it was interrupted by FW restart or + * suspend. + */ + mutex_lock(&local->mtx); + sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, + lockdep_is_held(&local->mtx)); + if (sched_scan_sdata && local->sched_scan_req) + /* + * Sched scan stopped, but we don't want to report it. Instead, + * we're trying to reschedule. + */ + if (__ieee80211_request_sched_scan_start(sched_scan_sdata, + local->sched_scan_req)) + sched_scan_stopped = true; + mutex_unlock(&local->mtx); + + if (sched_scan_stopped) + cfg80211_sched_scan_stopped(local->hw.wiphy); + + /* * If this is for hw restart things are still running. * We may want to change that later, however. */ @@ -1768,26 +1784,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) WARN_ON(1); #endif - /* - * Reconfigure sched scan if it was interrupted by FW restart or - * suspend. - */ - mutex_lock(&local->mtx); - sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata, - lockdep_is_held(&local->mtx)); - if (sched_scan_sdata && local->sched_scan_req) - /* - * Sched scan stopped, but we don't want to report it. Instead, - * we're trying to reschedule. - */ - if (__ieee80211_request_sched_scan_start(sched_scan_sdata, - local->sched_scan_req)) - sched_scan_stopped = true; - mutex_unlock(&local->mtx); - - if (sched_scan_stopped) - cfg80211_sched_scan_stopped(local->hw.wiphy); - return 0; } diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index 21211c60ca98..d51422c778de 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -154,6 +154,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, return IEEE80211_AC_BE; } + if (skb->protocol == sdata->control_port_protocol) { + skb->priority = 7; + return ieee80211_downgrade_queue(sdata, skb); + } + /* use the data classifier to determine what 802.1d tag the * data frame has */ rcu_read_lock(); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index bb322d0beb48..b9f0e0374322 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -1310,27 +1310,22 @@ ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[]) } static int -ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[]) +ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[]) { #ifdef CONFIG_NF_NAT_NEEDED int ret; - if (cda[CTA_NAT_DST]) { - ret = ctnetlink_parse_nat_setup(ct, - NF_NAT_MANIP_DST, - cda[CTA_NAT_DST]); - if (ret < 0) - return ret; - } - if (cda[CTA_NAT_SRC]) { - ret = ctnetlink_parse_nat_setup(ct, - NF_NAT_MANIP_SRC, - cda[CTA_NAT_SRC]); - if (ret < 0) - return ret; - } - return 0; + ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST, + cda[CTA_NAT_DST]); + if (ret < 0) + return ret; + + ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC, + cda[CTA_NAT_SRC]); + return ret; #else + if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC]) + return 0; return -EOPNOTSUPP; #endif } @@ -1659,11 +1654,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone, goto err2; } - if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) { - err = ctnetlink_change_nat(ct, cda); - if (err < 0) - goto err2; - } + err = ctnetlink_setup_nat(ct, cda); + if (err < 0) + goto err2; nf_ct_acct_ext_add(ct, GFP_ATOMIC); nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index d3f5cd6dd962..52ca952b802c 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -432,15 +432,15 @@ nf_nat_setup_info(struct nf_conn *ct, } EXPORT_SYMBOL(nf_nat_setup_info); -unsigned int -nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) +static unsigned int +__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip) { /* Force range to this IP; let proto decide mapping for * per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED). * Use reply in case it's already been mangled (eg local packet). */ union nf_inet_addr ip = - (HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ? + (manip == NF_NAT_MANIP_SRC ? ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 : ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3); struct nf_nat_range range = { @@ -448,7 +448,13 @@ nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) .min_addr = ip, .max_addr = ip, }; - return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum)); + return nf_nat_setup_info(ct, &range, manip); +} + +unsigned int +nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum) +{ + return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum)); } EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding); @@ -702,9 +708,9 @@ static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = { static int nfnetlink_parse_nat(const struct nlattr *nat, - const struct nf_conn *ct, struct nf_nat_range *range) + const struct nf_conn *ct, struct nf_nat_range *range, + const struct nf_nat_l3proto *l3proto) { - const struct nf_nat_l3proto *l3proto; struct nlattr *tb[CTA_NAT_MAX+1]; int err; @@ -714,38 +720,46 @@ nfnetlink_parse_nat(const struct nlattr *nat, if (err < 0) return err; - rcu_read_lock(); - l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); - if (l3proto == NULL) { - err = -EAGAIN; - goto out; - } err = l3proto->nlattr_to_range(tb, range); if (err < 0) - goto out; + return err; if (!tb[CTA_NAT_PROTO]) - goto out; + return 0; - err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); -out: - rcu_read_unlock(); - return err; + return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range); } +/* This function is called under rcu_read_lock() */ static int nfnetlink_parse_nat_setup(struct nf_conn *ct, enum nf_nat_manip_type manip, const struct nlattr *attr) { struct nf_nat_range range; + const struct nf_nat_l3proto *l3proto; int err; - err = nfnetlink_parse_nat(attr, ct, &range); + /* Should not happen, restricted to creating new conntracks + * via ctnetlink. + */ + if (WARN_ON_ONCE(nf_nat_initialized(ct, manip))) + return -EEXIST; + + /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to + * attach the null binding, otherwise this may oops. + */ + l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct)); + if (l3proto == NULL) + return -EAGAIN; + + /* No NAT information has been passed, allocate the null-binding */ + if (attr == NULL) + return __nf_nat_alloc_null_binding(ct, manip); + + err = nfnetlink_parse_nat(attr, ct, &range, l3proto); if (err < 0) return err; - if (nf_nat_initialized(ct, manip)) - return -EEXIST; return nf_nat_setup_info(ct, &range, manip); } diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index e8254ad2e5a9..425cf39af890 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -116,7 +116,7 @@ static void nft_meta_get_eval(const struct nft_expr *expr, skb->sk->sk_socket->file->f_cred->fsgid); read_unlock_bh(&skb->sk->sk_callback_lock); break; -#ifdef CONFIG_NET_CLS_ROUTE +#ifdef CONFIG_IP_ROUTE_CLASSID case NFT_META_RTCLASSID: { const struct dst_entry *dst = skb_dst(skb); @@ -199,7 +199,7 @@ static int nft_meta_init_validate_get(uint32_t key) case NFT_META_OIFTYPE: case NFT_META_SKUID: case NFT_META_SKGID: -#ifdef CONFIG_NET_CLS_ROUTE +#ifdef CONFIG_IP_ROUTE_CLASSID case NFT_META_RTCLASSID: #endif #ifdef CONFIG_NETWORK_SECMARK diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c index a2aeb318678f..85daa84bfdfe 100644 --- a/net/netfilter/nft_payload.c +++ b/net/netfilter/nft_payload.c @@ -135,7 +135,8 @@ nft_payload_select_ops(const struct nft_ctx *ctx, if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data)) return ERR_PTR(-EINVAL); - if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER) + if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) && + base != NFT_PAYLOAD_LL_HEADER) return &nft_payload_fast_ops; else return &nft_payload_ops; diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c index 8a310f239c93..b718a52a4654 100644 --- a/net/netfilter/nft_reject_inet.c +++ b/net/netfilter/nft_reject_inet.c @@ -21,9 +21,9 @@ static void nft_reject_inet_eval(const struct nft_expr *expr, { switch (pkt->ops->pf) { case NFPROTO_IPV4: - nft_reject_ipv4_eval(expr, data, pkt); + return nft_reject_ipv4_eval(expr, data, pkt); case NFPROTO_IPV6: - nft_reject_ipv6_eval(expr, data, pkt); + return nft_reject_ipv6_eval(expr, data, pkt); } } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index fdf51353cf78..04748ab649c2 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -1489,8 +1489,8 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, if (addr->sa_family != AF_NETLINK) return -EINVAL; - /* Only superuser is allowed to send multicasts */ - if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) + if ((nladdr->nl_groups || nladdr->nl_pid) && + !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) return -EPERM; if (!nlk->portid) diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index 46bda010bf11..56db888b1cd5 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -301,7 +301,7 @@ static int nci_open_device(struct nci_dev *ndev) rc = __nci_request(ndev, nci_reset_req, 0, msecs_to_jiffies(NCI_RESET_TIMEOUT)); - if (ndev->ops->setup(ndev)) + if (ndev->ops->setup) ndev->ops->setup(ndev); if (!rc) { diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 1cb413fead89..4f505a006896 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c @@ -334,18 +334,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB])); - if (q->qdisc != &noop_qdisc) { - err = fifo_set_limit(q->qdisc, qopt->limit); - if (err) - goto done; - } else if (qopt->limit > 0) { - child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); - if (IS_ERR(child)) { - err = PTR_ERR(child); - goto done; - } - } - buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U); mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U); @@ -390,6 +378,18 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt) goto done; } + if (q->qdisc != &noop_qdisc) { + err = fifo_set_limit(q->qdisc, qopt->limit); + if (err) + goto done; + } else if (qopt->limit > 0) { + child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit); + if (IS_ERR(child)) { + err = PTR_ERR(child); + goto done; + } + } + sch_tree_lock(sch); if (child) { qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); diff --git a/net/sctp/associola.c b/net/sctp/associola.c index f558433537b8..ee13d28d39d1 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1239,78 +1239,107 @@ void sctp_assoc_update(struct sctp_association *asoc, } /* Update the retran path for sending a retransmitted packet. - * Round-robin through the active transports, else round-robin - * through the inactive transports as this is the next best thing - * we can try. + * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints: + * + * When there is outbound data to send and the primary path + * becomes inactive (e.g., due to failures), or where the + * SCTP user explicitly requests to send data to an + * inactive destination transport address, before reporting + * an error to its ULP, the SCTP endpoint should try to send + * the data to an alternate active destination transport + * address if one exists. + * + * When retransmitting data that timed out, if the endpoint + * is multihomed, it should consider each source-destination + * address pair in its retransmission selection policy. + * When retransmitting timed-out data, the endpoint should + * attempt to pick the most divergent source-destination + * pair from the original source-destination pair to which + * the packet was transmitted. + * + * Note: Rules for picking the most divergent source-destination + * pair are an implementation decision and are not specified + * within this document. + * + * Our basic strategy is to round-robin transports in priorities + * according to sctp_state_prio_map[] e.g., if no such + * transport with state SCTP_ACTIVE exists, round-robin through + * SCTP_UNKNOWN, etc. You get the picture. */ -void sctp_assoc_update_retran_path(struct sctp_association *asoc) +static const u8 sctp_trans_state_to_prio_map[] = { + [SCTP_ACTIVE] = 3, /* best case */ + [SCTP_UNKNOWN] = 2, + [SCTP_PF] = 1, + [SCTP_INACTIVE] = 0, /* worst case */ +}; + +static u8 sctp_trans_score(const struct sctp_transport *trans) { - struct sctp_transport *t, *next; - struct list_head *head = &asoc->peer.transport_addr_list; - struct list_head *pos; + return sctp_trans_state_to_prio_map[trans->state]; +} - if (asoc->peer.transport_count == 1) - return; +static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, + struct sctp_transport *best) +{ + if (best == NULL) + return curr; - /* Find the next transport in a round-robin fashion. */ - t = asoc->peer.retran_path; - pos = &t->transports; - next = NULL; + return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best; +} - while (1) { - /* Skip the head. */ - if (pos->next == head) - pos = head->next; - else - pos = pos->next; +void sctp_assoc_update_retran_path(struct sctp_association *asoc) +{ + struct sctp_transport *trans = asoc->peer.retran_path; + struct sctp_transport *trans_next = NULL; - t = list_entry(pos, struct sctp_transport, transports); + /* We're done as we only have the one and only path. */ + if (asoc->peer.transport_count == 1) + return; + /* If active_path and retran_path are the same and active, + * then this is the only active path. Use it. + */ + if (asoc->peer.active_path == asoc->peer.retran_path && + asoc->peer.active_path->state == SCTP_ACTIVE) + return; - /* We have exhausted the list, but didn't find any - * other active transports. If so, use the next - * transport. - */ - if (t == asoc->peer.retran_path) { - t = next; + /* Iterate from retran_path's successor back to retran_path. */ + for (trans = list_next_entry(trans, transports); 1; + trans = list_next_entry(trans, transports)) { + /* Manually skip the head element. */ + if (&trans->transports == &asoc->peer.transport_addr_list) + continue; + if (trans->state == SCTP_UNCONFIRMED) + continue; + trans_next = sctp_trans_elect_best(trans, trans_next); + /* Active is good enough for immediate return. */ + if (trans_next->state == SCTP_ACTIVE) break; - } - - /* Try to find an active transport. */ - - if ((t->state == SCTP_ACTIVE) || - (t->state == SCTP_UNKNOWN)) { + /* We've reached the end, time to update path. */ + if (trans == asoc->peer.retran_path) break; - } else { - /* Keep track of the next transport in case - * we don't find any active transport. - */ - if (t->state != SCTP_UNCONFIRMED && !next) - next = t; - } } - if (t) - asoc->peer.retran_path = t; - else - t = asoc->peer.retran_path; + if (trans_next != NULL) + asoc->peer.retran_path = trans_next; - pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc, - &t->ipaddr.sa); + pr_debug("%s: association:%p updated new path to addr:%pISpc\n", + __func__, asoc, &asoc->peer.retran_path->ipaddr.sa); } -/* Choose the transport for sending retransmit packet. */ -struct sctp_transport *sctp_assoc_choose_alter_transport( - struct sctp_association *asoc, struct sctp_transport *last_sent_to) +struct sctp_transport * +sctp_assoc_choose_alter_transport(struct sctp_association *asoc, + struct sctp_transport *last_sent_to) { /* If this is the first time packet is sent, use the active path, * else use the retran path. If the last packet was sent over the * retran path, update the retran path and use it. */ - if (!last_sent_to) + if (last_sent_to == NULL) { return asoc->peer.active_path; - else { + } else { if (last_sent_to == asoc->peer.retran_path) sctp_assoc_update_retran_path(asoc); + return asoc->peer.retran_path; } } diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index bd859154000e..5d6883ff00c3 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -495,11 +495,12 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands, } /* If the transport error count is greater than the pf_retrans - * threshold, and less than pathmaxrtx, then mark this transport - * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1, - * point 1 + * threshold, and less than pathmaxrtx, and if the current state + * is not SCTP_UNCONFIRMED, then mark this transport as Partially + * Failed, see SCTP Quick Failover Draft, section 5.1 */ if ((transport->state != SCTP_PF) && + (transport->state != SCTP_UNCONFIRMED) && (asoc->pf_retrans < transport->pathmaxrxt) && (transport->error_count > asoc->pf_retrans)) { diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 591b44d3b7de..ae65b6b5973a 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -758,6 +758,13 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, struct sctp_chunk auth; sctp_ierror_t ret; + /* Make sure that we and the peer are AUTH capable */ + if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { + kfree_skb(chunk->auth_chunk); + sctp_association_free(new_asoc); + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); + } + /* set-up our fake chunk so that we can process it */ auth.skb = chunk->auth_chunk; auth.asoc = chunk->asoc; diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index a38c89969c68..574b86193b15 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -610,8 +610,13 @@ static struct notifier_block notifier = { int tipc_bearer_setup(void) { + int err; + + err = register_netdevice_notifier(¬ifier); + if (err) + return err; dev_add_pack(&tipc_packet_type); - return register_netdevice_notifier(¬ifier); + return 0; } void tipc_bearer_cleanup(void) diff --git a/net/tipc/config.c b/net/tipc/config.c index c301a9a592d8..e74eef2e7490 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -181,7 +181,7 @@ static struct sk_buff *cfg_set_own_addr(void) if (tipc_own_addr) return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (cannot change node address once assigned)"); - tipc_core_start_net(addr); + tipc_net_start(addr); return tipc_cfg_reply_none(); } diff --git a/net/tipc/core.c b/net/tipc/core.c index f9e88d8b04ca..80c20647b3d2 100644 --- a/net/tipc/core.c +++ b/net/tipc/core.c @@ -77,37 +77,13 @@ struct sk_buff *tipc_buf_acquire(u32 size) } /** - * tipc_core_stop_net - shut down TIPC networking sub-systems - */ -static void tipc_core_stop_net(void) -{ - tipc_net_stop(); - tipc_bearer_cleanup(); -} - -/** - * start_net - start TIPC networking sub-systems - */ -int tipc_core_start_net(unsigned long addr) -{ - int res; - - tipc_net_start(addr); - res = tipc_bearer_setup(); - if (res < 0) - goto err; - return res; - -err: - tipc_core_stop_net(); - return res; -} - -/** * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode */ static void tipc_core_stop(void) { + tipc_handler_stop(); + tipc_net_stop(); + tipc_bearer_cleanup(); tipc_netlink_stop(); tipc_cfg_stop(); tipc_subscr_stop(); @@ -122,30 +98,65 @@ static void tipc_core_stop(void) */ static int tipc_core_start(void) { - int res; + int err; get_random_bytes(&tipc_random, sizeof(tipc_random)); - res = tipc_handler_start(); - if (!res) - res = tipc_ref_table_init(tipc_max_ports, tipc_random); - if (!res) - res = tipc_nametbl_init(); - if (!res) - res = tipc_netlink_start(); - if (!res) - res = tipc_socket_init(); - if (!res) - res = tipc_register_sysctl(); - if (!res) - res = tipc_subscr_start(); - if (!res) - res = tipc_cfg_init(); - if (res) { - tipc_handler_stop(); - tipc_core_stop(); - } - return res; + err = tipc_handler_start(); + if (err) + goto out_handler; + + err = tipc_ref_table_init(tipc_max_ports, tipc_random); + if (err) + goto out_reftbl; + + err = tipc_nametbl_init(); + if (err) + goto out_nametbl; + + err = tipc_netlink_start(); + if (err) + goto out_netlink; + + err = tipc_socket_init(); + if (err) + goto out_socket; + + err = tipc_register_sysctl(); + if (err) + goto out_sysctl; + + err = tipc_subscr_start(); + if (err) + goto out_subscr; + + err = tipc_cfg_init(); + if (err) + goto out_cfg; + + err = tipc_bearer_setup(); + if (err) + goto out_bearer; + + return 0; +out_bearer: + tipc_cfg_stop(); +out_cfg: + tipc_subscr_stop(); +out_subscr: + tipc_unregister_sysctl(); +out_sysctl: + tipc_socket_stop(); +out_socket: + tipc_netlink_stop(); +out_netlink: + tipc_nametbl_stop(); +out_nametbl: + tipc_ref_table_stop(); +out_reftbl: + tipc_handler_stop(); +out_handler: + return err; } static int __init tipc_init(void) @@ -174,8 +185,6 @@ static int __init tipc_init(void) static void __exit tipc_exit(void) { - tipc_handler_stop(); - tipc_core_stop_net(); tipc_core_stop(); pr_info("Deactivated\n"); } diff --git a/net/tipc/core.h b/net/tipc/core.h index 5569d96b4da3..4dfe137587bb 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -90,7 +90,6 @@ extern int tipc_random __read_mostly; /* * Routines available to privileged subsystems */ -int tipc_core_start_net(unsigned long); int tipc_handler_start(void); void tipc_handler_stop(void); int tipc_netlink_start(void); diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 92a1533af4e0..48302be175ce 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -945,9 +945,6 @@ void tipc_nametbl_stop(void) { u32 i; - if (!table.types) - return; - /* Verify name table is empty, then release it */ write_lock_bh(&tipc_nametbl_lock); for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index 9f72a6376362..3aaf73de9e2d 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -83,8 +83,6 @@ static struct genl_ops tipc_genl_ops[] = { }, }; -static int tipc_genl_family_registered; - int tipc_netlink_start(void) { int res; @@ -94,16 +92,10 @@ int tipc_netlink_start(void) pr_err("Failed to register netlink interface\n"); return res; } - - tipc_genl_family_registered = 1; return 0; } void tipc_netlink_stop(void) { - if (!tipc_genl_family_registered) - return; - genl_unregister_family(&tipc_genl_family); - tipc_genl_family_registered = 0; } diff --git a/net/tipc/ref.c b/net/tipc/ref.c index 2a2a938dc22c..de3d593e2fee 100644 --- a/net/tipc/ref.c +++ b/net/tipc/ref.c @@ -126,9 +126,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start) */ void tipc_ref_table_stop(void) { - if (!tipc_ref_table.entries) - return; - vfree(tipc_ref_table.entries); tipc_ref_table.entries = NULL; } diff --git a/net/tipc/server.c b/net/tipc/server.c index b635ca347a87..373979789a73 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -573,7 +573,6 @@ int tipc_server_start(struct tipc_server *s) kmem_cache_destroy(s->rcvbuf_cache); return ret; } - s->enabled = 1; return ret; } @@ -583,10 +582,6 @@ void tipc_server_stop(struct tipc_server *s) int total = 0; int id; - if (!s->enabled) - return; - - s->enabled = 0; spin_lock_bh(&s->idr_lock); for (id = 0; total < s->idr_in_use; id++) { con = idr_find(&s->conn_idr, id); diff --git a/net/tipc/server.h b/net/tipc/server.h index 98b23f20bc0f..be817b0b547e 100644 --- a/net/tipc/server.h +++ b/net/tipc/server.h @@ -56,7 +56,6 @@ * @name: server name * @imp: message importance * @type: socket type - * @enabled: identify whether server is launched or not */ struct tipc_server { struct idr conn_idr; @@ -74,7 +73,6 @@ struct tipc_server { const char name[TIPC_SERVER_NAME_LEN]; int imp; int type; - int enabled; }; int tipc_conn_sendmsg(struct tipc_server *s, int conid, diff --git a/net/tipc/socket.c b/net/tipc/socket.c index aab4948f0aff..a4cf274455aa 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -70,8 +70,6 @@ static const struct proto_ops msg_ops; static struct proto tipc_proto; static struct proto tipc_proto_kern; -static int sockets_enabled; - /* * Revised TIPC socket locking policy: * @@ -2027,8 +2025,6 @@ int tipc_socket_init(void) proto_unregister(&tipc_proto); goto out; } - - sockets_enabled = 1; out: return res; } @@ -2038,10 +2034,6 @@ int tipc_socket_init(void) */ void tipc_socket_stop(void) { - if (!sockets_enabled) - return; - - sockets_enabled = 0; sock_unregister(tipc_family_ops.family); proto_unregister(&tipc_proto); } diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 9b897fca7487..f0541370e68e 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1700,7 +1700,7 @@ static void reg_process_hint(struct regulatory_request *reg_request) return; case NL80211_REGDOM_SET_BY_USER: treatment = reg_process_hint_user(reg_request); - if (treatment == REG_REQ_OK || + if (treatment == REG_REQ_IGNORE || treatment == REG_REQ_ALREADY_SET) return; schedule_delayed_work(®_timeout, msecs_to_jiffies(3142)); @@ -2373,6 +2373,7 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd, int set_regdom(const struct ieee80211_regdomain *rd) { struct regulatory_request *lr; + bool user_reset = false; int r; if (!reg_is_valid_request(rd->alpha2)) { @@ -2389,6 +2390,7 @@ int set_regdom(const struct ieee80211_regdomain *rd) break; case NL80211_REGDOM_SET_BY_USER: r = reg_set_rd_user(rd, lr); + user_reset = true; break; case NL80211_REGDOM_SET_BY_DRIVER: r = reg_set_rd_driver(rd, lr); @@ -2402,8 +2404,14 @@ int set_regdom(const struct ieee80211_regdomain *rd) } if (r) { - if (r == -EALREADY) + switch (r) { + case -EALREADY: reg_set_request_processed(); + break; + default: + /* Back to world regulatory in case of errors */ + restore_regulatory_settings(user_reset); + } kfree(rd); return r; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 4b98b25793c5..1d5c7bf29938 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1158,7 +1158,7 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, if (hlist_unhashed(&pol->bydst)) return NULL; - hlist_del(&pol->bydst); + hlist_del_init(&pol->bydst); hlist_del(&pol->byidx); list_del(&pol->walk.all); net->xfrm.policy_count[dir]--; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index a26b7aa79475..40f1b3e92e78 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1159,6 +1159,11 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp) } x->props.aalgo = orig->props.aalgo; + if (orig->aead) { + x->aead = xfrm_algo_aead_clone(orig->aead); + if (!x->aead) + goto error; + } if (orig->ealg) { x->ealg = xfrm_algo_clone(orig->ealg); if (!x->ealg) @@ -1201,6 +1206,9 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp) x->props.flags = orig->props.flags; x->props.extra_flags = orig->props.extra_flags; + x->tfcpad = orig->tfcpad; + x->replay_maxdiff = orig->replay_maxdiff; + x->replay_maxage = orig->replay_maxage; x->curlft.add_time = orig->curlft.add_time; x->km.state = orig->km.state; x->km.seq = orig->km.seq; @@ -1215,11 +1223,12 @@ out: return NULL; } -/* net->xfrm.xfrm_state_lock is held */ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net) { unsigned int h; - struct xfrm_state *x; + struct xfrm_state *x = NULL; + + spin_lock_bh(&net->xfrm.xfrm_state_lock); if (m->reqid) { h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr, @@ -1236,7 +1245,7 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n m->old_family)) continue; xfrm_state_hold(x); - return x; + break; } } else { h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr, @@ -1251,11 +1260,13 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n m->old_family)) continue; xfrm_state_hold(x); - return x; + break; } } - return NULL; + spin_unlock_bh(&net->xfrm.xfrm_state_lock); + + return x; } EXPORT_SYMBOL(xfrm_migrate_state_find); @@ -1451,7 +1462,7 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, { int err = 0; struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); - struct net *net = xs_net(*dst); + struct net *net = xs_net(*src); if (!afinfo) return -EAFNOSUPPORT; diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 1ae3ec7c18b0..c274179d60a2 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -32,11 +32,6 @@ #include <linux/in6.h> #endif -static inline int aead_len(struct xfrm_algo_aead *alg) -{ - return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); -} - static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) { struct nlattr *rt = attrs[type]; diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh index ef474098d9f1..17fa901418ae 100644 --- a/scripts/gen_initramfs_list.sh +++ b/scripts/gen_initramfs_list.sh @@ -257,7 +257,7 @@ case "$arg" in && compr="lzop -9 -f" echo "$output_file" | grep -q "\.lz4$" \ && [ -x "`which lz4 2> /dev/null`" ] \ - && compr="lz4 -9 -f" + && compr="lz4 -l -9 -f" echo "$output_file" | grep -q "\.cpio$" && compr="cat" shift ;; diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 10085de886fe..276e84b8a8e5 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -330,8 +330,7 @@ static void write_src(void) printf("\tPTR\t_text + %#llx\n", table[i].addr - _text); else - printf("\tPTR\t_text - %#llx\n", - _text - table[i].addr); + printf("\tPTR\t%#llx\n", table[i].addr); } else { printf("\tPTR\t%#llx\n", table[i].addr); } diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 40610984a1b5..99a45fdc1bbf 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -1502,6 +1502,16 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) #define R_ARM_JUMP24 29 #endif +#ifndef R_ARM_THM_CALL +#define R_ARM_THM_CALL 10 +#endif +#ifndef R_ARM_THM_JUMP24 +#define R_ARM_THM_JUMP24 30 +#endif +#ifndef R_ARM_THM_JUMP19 +#define R_ARM_THM_JUMP19 51 +#endif + static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) { unsigned int r_typ = ELF_R_TYPE(r->r_info); @@ -1515,6 +1525,9 @@ static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) case R_ARM_PC24: case R_ARM_CALL: case R_ARM_JUMP24: + case R_ARM_THM_CALL: + case R_ARM_THM_JUMP24: + case R_ARM_THM_JUMP19: /* From ARM ABI: ((S + A) | T) - P */ r->r_addend = (int)(long)(elf->hdr + sechdr->sh_offset + diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index df3652ad15ef..8ed0bcc01386 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -1026,6 +1026,9 @@ static void ad1884_fixup_thinkpad(struct hda_codec *codec, spec->gen.keep_eapd_on = 1; spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook; spec->eapd_nid = 0x12; + /* Analog PC Beeper - allow firmware/ACPI beeps */ + spec->beep_amp = HDA_COMPOSE_AMP_VAL(0x20, 3, 3, HDA_INPUT); + spec->gen.beep_nid = 0; /* no digital beep */ } } @@ -1092,6 +1095,7 @@ static int patch_ad1884(struct hda_codec *codec) spec = codec->spec; spec->gen.mixer_nid = 0x20; + spec->gen.mixer_merge_nid = 0x21; spec->gen.beep_nid = 0x10; set_beep_amp(spec, 0x10, 0, HDA_OUTPUT); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index ec304f3ae3b4..850296a1e0ff 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4253,6 +4253,7 @@ static const struct hda_fixup alc269_fixups[] = { }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700), @@ -5163,7 +5164,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_AUTO_MUTE), - SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_AUTO_MUTE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP), SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP), diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 44b0ba4feab3..1bed780e21d9 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -883,6 +883,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, } break; + case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */ case USB_ID(0x046d, 0x0808): case USB_ID(0x046d, 0x0809): case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile index da8b7aa3d351..07b0b7542511 100644 --- a/tools/lib/lockdep/Makefile +++ b/tools/lib/lockdep/Makefile @@ -87,8 +87,8 @@ endif # BUILD_SRC # We process the rest of the Makefile if this is the final invocation of make ifeq ($(skip-makefile),) -srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR)) -objtree := $(CURDIR) +srctree := $(realpath $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))) +objtree := $(realpath $(CURDIR)) src := $(srctree) obj := $(objtree) @@ -112,7 +112,7 @@ export Q VERBOSE LIBLOCKDEP_VERSION = $(LL_VERSION).$(LL_PATCHLEVEL).$(LL_EXTRAVERSION) -INCLUDES = -I. -I/usr/local/include -I./uinclude $(CONFIG_INCLUDES) +INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include $(CONFIG_INCLUDES) # Set compile option CFLAGS if not set elsewhere CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c index f8465a811aa5..23bd69cb5ade 100644 --- a/tools/lib/lockdep/preload.c +++ b/tools/lib/lockdep/preload.c @@ -418,7 +418,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) __attribute__((constructor)) static void init_preload(void) { - if (__init_state != done) + if (__init_state == done) return; #ifndef __GLIBC__ diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh index 5334ad9d39b7..5334ad9d39b7 100644..100755 --- a/tools/lib/lockdep/run_tests.sh +++ b/tools/lib/lockdep/run_tests.sh diff --git a/tools/lib/lockdep/uinclude/asm/hash.h b/tools/lib/lockdep/uinclude/asm/hash.h new file mode 100644 index 000000000000..d82b170bb216 --- /dev/null +++ b/tools/lib/lockdep/uinclude/asm/hash.h @@ -0,0 +1,6 @@ +#ifndef __ASM_GENERIC_HASH_H +#define __ASM_GENERIC_HASH_H + +/* Stub */ + +#endif /* __ASM_GENERIC_HASH_H */ diff --git a/tools/lib/lockdep/uinclude/linux/rcu.h b/tools/lib/lockdep/uinclude/linux/rcu.h index 4c99fcb5da27..042ee8e463c9 100644 --- a/tools/lib/lockdep/uinclude/linux/rcu.h +++ b/tools/lib/lockdep/uinclude/linux/rcu.h @@ -13,4 +13,9 @@ static inline int rcu_is_cpu_idle(void) return 1; } +static inline bool rcu_is_watching(void) +{ + return false; +} + #endif |