diff options
276 files changed, 3728 insertions, 1118 deletions
diff --git a/Documentation/security/Yama.txt b/Documentation/security/Yama.txt index e369de2d48cd..dd908cf64ecf 100644 --- a/Documentation/security/Yama.txt +++ b/Documentation/security/Yama.txt @@ -46,14 +46,13 @@ restrictions, it can call prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, ...) so that any otherwise allowed process (even those in external pid namespaces) may attach. -These restrictions do not change how ptrace via PTRACE_TRACEME operates. - -The sysctl settings are: +The sysctl settings (writable only with CAP_SYS_PTRACE) are: 0 - classic ptrace permissions: a process can PTRACE_ATTACH to any other process running under the same uid, as long as it is dumpable (i.e. did not transition uids, start privileged, or have called - prctl(PR_SET_DUMPABLE...) already). + prctl(PR_SET_DUMPABLE...) already). Similarly, PTRACE_TRACEME is + unchanged. 1 - restricted ptrace: a process must have a predefined relationship with the inferior it wants to call PTRACE_ATTACH on. By default, @@ -61,12 +60,13 @@ The sysctl settings are: classic criteria is also met. To change the relationship, an inferior can call prctl(PR_SET_PTRACER, debugger, ...) to declare an allowed debugger PID to call PTRACE_ATTACH on the inferior. + Using PTRACE_TRACEME is unchanged. 2 - admin-only attach: only processes with CAP_SYS_PTRACE may use ptrace - with PTRACE_ATTACH. + with PTRACE_ATTACH, or through children calling PTRACE_TRACEME. -3 - no attach: no processes may use ptrace with PTRACE_ATTACH. Once set, - this sysctl cannot be changed to a lower value. +3 - no attach: no processes may use ptrace with PTRACE_ATTACH nor via + PTRACE_TRACEME. Once set, this sysctl value cannot be changed. The original children-only logic was based on the restrictions in grsecurity. diff --git a/MAINTAINERS b/MAINTAINERS index 94b823f71e94..61ad79ea2b08 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -827,24 +827,24 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c ARM/INTEL IOP32X ARM ARCHITECTURE M: Lennert Buytenhek <kernel@wantstofly.org> -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP33X ARM ARCHITECTURE -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP13XX ARM ARCHITECTURE M: Lennert Buytenhek <kernel@wantstofly.org> -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IQ81342EX MACHINE SUPPORT M: Lennert Buytenhek <kernel@wantstofly.org> -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -869,7 +869,7 @@ F: drivers/pcmcia/pxa2xx_stargate2.c ARM/INTEL XSC3 (MANZANO) ARM CORE M: Lennert Buytenhek <kernel@wantstofly.org> -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -925,14 +925,14 @@ S: Maintained ARM/NOMADIK ARCHITECTURE M: Alessandro Rubini <rubini@unipv.it> -M: Linus Walleij <linus.walleij@stericsson.com> +M: Linus Walleij <linus.walleij@linaro.org> M: STEricsson <STEricsson_nomadik_linux@list.st.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-nomadik/ F: arch/arm/plat-nomadik/ F: drivers/i2c/busses/i2c-nomadik.c -T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT M: Nelson Castillo <arhuaco@freaks-unidos.net> @@ -1146,7 +1146,7 @@ F: drivers/usb/host/ehci-w90x900.c F: drivers/video/nuc900fb.c ARM/U300 MACHINE SUPPORT -M: Linus Walleij <linus.walleij@stericsson.com> +M: Linus Walleij <linus.walleij@linaro.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: arch/arm/mach-u300/ @@ -1161,15 +1161,20 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git ARM/Ux500 ARM ARCHITECTURE M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> -M: Linus Walleij <linus.walleij@stericsson.com> +M: Linus Walleij <linus.walleij@linaro.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-ux500/ +F: drivers/clocksource/clksrc-dbx500-prcmu.c F: drivers/dma/ste_dma40* +F: drivers/hwspinlock/u8500_hsem.c F: drivers/mfd/abx500* F: drivers/mfd/ab8500* -F: drivers/mfd/stmpe* +F: drivers/mfd/dbx500* +F: drivers/mfd/db8500* +F: drivers/pinctrl/pinctrl-nomadik* F: drivers/rtc/rtc-ab8500.c +F: drivers/rtc/rtc-pl031.c T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git ARM/VFP SUPPORT @@ -1227,9 +1232,9 @@ S: Maintained F: drivers/hwmon/asb100.c ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> W: http://sourceforge.net/projects/xscaleiop -S: Supported +S: Maintained F: Documentation/crypto/async-tx-api.txt F: crypto/async_tx/ F: drivers/dma/ @@ -2359,7 +2364,7 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git DMA GENERIC OFFLOAD ENGINE SUBSYSTEM M: Vinod Koul <vinod.koul@intel.com> -M: Dan Williams <dan.j.williams@intel.com> +M: Dan Williams <djbw@fb.com> S: Supported F: drivers/dma/ F: include/linux/dma* @@ -3094,7 +3099,7 @@ F: include/linux/gigaset_dev.h GPIO SUBSYSTEM M: Grant Likely <grant.likely@secretlab.ca> -M: Linus Walleij <linus.walleij@stericsson.com> +M: Linus Walleij <linus.walleij@linaro.org> S: Maintained T: git git://git.secretlab.ca/git/linux-2.6.git F: Documentation/gpio.txt @@ -3547,7 +3552,6 @@ K: \b(ABS|SYN)_MT_ INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support <intel-linux-scu@intel.com> -M: Dan Williams <dan.j.williams@intel.com> M: Dave Jiang <dave.jiang@intel.com> M: Ed Nadolski <edmund.nadolski@intel.com> L: linux-scsi@vger.kernel.org @@ -3590,8 +3594,8 @@ F: arch/x86/kernel/microcode_core.c F: arch/x86/kernel/microcode_intel.c INTEL I/OAT DMA DRIVER -M: Dan Williams <dan.j.williams@intel.com> -S: Supported +M: Dan Williams <djbw@fb.com> +S: Maintained F: drivers/dma/ioat* INTEL IOMMU (VT-d) @@ -3603,8 +3607,8 @@ F: drivers/iommu/intel-iommu.c F: include/linux/intel-iommu.h INTEL IOP-ADMA DMA DRIVER -M: Dan Williams <dan.j.williams@intel.com> -S: Maintained +M: Dan Williams <djbw@fb.com> +S: Odd fixes F: drivers/dma/iop-adma.c INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT @@ -5329,14 +5333,15 @@ PIN CONTROL SUBSYSTEM M: Linus Walleij <linus.walleij@linaro.org> S: Maintained F: drivers/pinctrl/ +F: include/linux/pinctrl/ PIN CONTROLLER - ST SPEAR -M: Viresh Kumar <viresh.linux@gmail.com> +M: Viresh Kumar <viresh.linux@gmail.com> L: spear-devel@list.st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.st.com/spear S: Maintained -F: driver/pinctrl/spear/ +F: drivers/pinctrl/spear/ PKTCDVD DRIVER M: Peter Osterlund <petero2@telia.com> diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi index a874dbfb5ae6..e6138310e5ce 100644 --- a/arch/arm/boot/dts/imx23.dtsi +++ b/arch/arm/boot/dts/imx23.dtsi @@ -51,11 +51,11 @@ dma-apbh@80004000 { compatible = "fsl,imx23-dma-apbh"; - reg = <0x80004000 2000>; + reg = <0x80004000 0x2000>; }; ecc@80008000 { - reg = <0x80008000 2000>; + reg = <0x80008000 0x2000>; status = "disabled"; }; @@ -63,7 +63,7 @@ compatible = "fsl,imx23-gpmi-nand"; #address-cells = <1>; #size-cells = <1>; - reg = <0x8000c000 2000>, <0x8000a000 2000>; + reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>; reg-names = "gpmi-nand", "bch"; interrupts = <13>, <56>; interrupt-names = "gpmi-dma", "bch"; @@ -72,14 +72,14 @@ }; ssp0: ssp@80010000 { - reg = <0x80010000 2000>; + reg = <0x80010000 0x2000>; interrupts = <15 14>; fsl,ssp-dma-channel = <1>; status = "disabled"; }; etm@80014000 { - reg = <0x80014000 2000>; + reg = <0x80014000 0x2000>; status = "disabled"; }; @@ -87,7 +87,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx23-pinctrl", "simple-bus"; - reg = <0x80018000 2000>; + reg = <0x80018000 0x2000>; gpio0: gpio@0 { compatible = "fsl,imx23-gpio", "fsl,mxs-gpio"; @@ -273,32 +273,32 @@ }; emi@80020000 { - reg = <0x80020000 2000>; + reg = <0x80020000 0x2000>; status = "disabled"; }; dma-apbx@80024000 { compatible = "fsl,imx23-dma-apbx"; - reg = <0x80024000 2000>; + reg = <0x80024000 0x2000>; }; dcp@80028000 { - reg = <0x80028000 2000>; + reg = <0x80028000 0x2000>; status = "disabled"; }; pxp@8002a000 { - reg = <0x8002a000 2000>; + reg = <0x8002a000 0x2000>; status = "disabled"; }; ocotp@8002c000 { - reg = <0x8002c000 2000>; + reg = <0x8002c000 0x2000>; status = "disabled"; }; axi-ahb@8002e000 { - reg = <0x8002e000 2000>; + reg = <0x8002e000 0x2000>; status = "disabled"; }; @@ -310,14 +310,14 @@ }; ssp1: ssp@80034000 { - reg = <0x80034000 2000>; + reg = <0x80034000 0x2000>; interrupts = <2 20>; fsl,ssp-dma-channel = <2>; status = "disabled"; }; tvenc@80038000 { - reg = <0x80038000 2000>; + reg = <0x80038000 0x2000>; status = "disabled"; }; }; @@ -330,37 +330,37 @@ ranges; clkctl@80040000 { - reg = <0x80040000 2000>; + reg = <0x80040000 0x2000>; status = "disabled"; }; saif0: saif@80042000 { - reg = <0x80042000 2000>; + reg = <0x80042000 0x2000>; status = "disabled"; }; power@80044000 { - reg = <0x80044000 2000>; + reg = <0x80044000 0x2000>; status = "disabled"; }; saif1: saif@80046000 { - reg = <0x80046000 2000>; + reg = <0x80046000 0x2000>; status = "disabled"; }; audio-out@80048000 { - reg = <0x80048000 2000>; + reg = <0x80048000 0x2000>; status = "disabled"; }; audio-in@8004c000 { - reg = <0x8004c000 2000>; + reg = <0x8004c000 0x2000>; status = "disabled"; }; lradc@80050000 { - reg = <0x80050000 2000>; + reg = <0x80050000 0x2000>; status = "disabled"; }; @@ -370,26 +370,26 @@ }; i2c@80058000 { - reg = <0x80058000 2000>; + reg = <0x80058000 0x2000>; status = "disabled"; }; rtc@8005c000 { compatible = "fsl,imx23-rtc", "fsl,stmp3xxx-rtc"; - reg = <0x8005c000 2000>; + reg = <0x8005c000 0x2000>; interrupts = <22>; }; pwm: pwm@80064000 { compatible = "fsl,imx23-pwm"; - reg = <0x80064000 2000>; + reg = <0x80064000 0x2000>; #pwm-cells = <2>; fsl,pwm-number = <5>; status = "disabled"; }; timrot@80068000 { - reg = <0x80068000 2000>; + reg = <0x80068000 0x2000>; status = "disabled"; }; @@ -429,7 +429,7 @@ ranges; usbctrl@80080000 { - reg = <0x80080000 0x10000>; + reg = <0x80080000 0x40000>; status = "disabled"; }; }; diff --git a/arch/arm/boot/dts/imx27-3ds.dts b/arch/arm/boot/dts/imx27-3ds.dts index d3f8296e19e0..0a8978a40ece 100644 --- a/arch/arm/boot/dts/imx27-3ds.dts +++ b/arch/arm/boot/dts/imx27-3ds.dts @@ -27,7 +27,7 @@ status = "okay"; }; - uart@1000a000 { + uart1: serial@1000a000 { fsl,uart-has-rtscts; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index 00bae3aad5ab..5303ab680a34 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi @@ -19,6 +19,12 @@ serial3 = &uart4; serial4 = &uart5; serial5 = &uart6; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; }; avic: avic-interrupt-controller@e0000000 { diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi index 787efac68da8..3fa6d190fab4 100644 --- a/arch/arm/boot/dts/imx28.dtsi +++ b/arch/arm/boot/dts/imx28.dtsi @@ -57,18 +57,18 @@ }; hsadc@80002000 { - reg = <0x80002000 2000>; + reg = <0x80002000 0x2000>; interrupts = <13 87>; status = "disabled"; }; dma-apbh@80004000 { compatible = "fsl,imx28-dma-apbh"; - reg = <0x80004000 2000>; + reg = <0x80004000 0x2000>; }; perfmon@80006000 { - reg = <0x80006000 800>; + reg = <0x80006000 0x800>; interrupts = <27>; status = "disabled"; }; @@ -77,7 +77,7 @@ compatible = "fsl,imx28-gpmi-nand"; #address-cells = <1>; #size-cells = <1>; - reg = <0x8000c000 2000>, <0x8000a000 2000>; + reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>; reg-names = "gpmi-nand", "bch"; interrupts = <88>, <41>; interrupt-names = "gpmi-dma", "bch"; @@ -86,28 +86,28 @@ }; ssp0: ssp@80010000 { - reg = <0x80010000 2000>; + reg = <0x80010000 0x2000>; interrupts = <96 82>; fsl,ssp-dma-channel = <0>; status = "disabled"; }; ssp1: ssp@80012000 { - reg = <0x80012000 2000>; + reg = <0x80012000 0x2000>; interrupts = <97 83>; fsl,ssp-dma-channel = <1>; status = "disabled"; }; ssp2: ssp@80014000 { - reg = <0x80014000 2000>; + reg = <0x80014000 0x2000>; interrupts = <98 84>; fsl,ssp-dma-channel = <2>; status = "disabled"; }; ssp3: ssp@80016000 { - reg = <0x80016000 2000>; + reg = <0x80016000 0x2000>; interrupts = <99 85>; fsl,ssp-dma-channel = <3>; status = "disabled"; @@ -117,7 +117,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-pinctrl", "simple-bus"; - reg = <0x80018000 2000>; + reg = <0x80018000 0x2000>; gpio0: gpio@0 { compatible = "fsl,imx28-gpio", "fsl,mxs-gpio"; @@ -510,96 +510,96 @@ }; digctl@8001c000 { - reg = <0x8001c000 2000>; + reg = <0x8001c000 0x2000>; interrupts = <89>; status = "disabled"; }; etm@80022000 { - reg = <0x80022000 2000>; + reg = <0x80022000 0x2000>; status = "disabled"; }; dma-apbx@80024000 { compatible = "fsl,imx28-dma-apbx"; - reg = <0x80024000 2000>; + reg = <0x80024000 0x2000>; }; dcp@80028000 { - reg = <0x80028000 2000>; + reg = <0x80028000 0x2000>; interrupts = <52 53 54>; status = "disabled"; }; pxp@8002a000 { - reg = <0x8002a000 2000>; + reg = <0x8002a000 0x2000>; interrupts = <39>; status = "disabled"; }; ocotp@8002c000 { - reg = <0x8002c000 2000>; + reg = <0x8002c000 0x2000>; status = "disabled"; }; axi-ahb@8002e000 { - reg = <0x8002e000 2000>; + reg = <0x8002e000 0x2000>; status = "disabled"; }; lcdif@80030000 { compatible = "fsl,imx28-lcdif"; - reg = <0x80030000 2000>; + reg = <0x80030000 0x2000>; interrupts = <38 86>; status = "disabled"; }; can0: can@80032000 { compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan"; - reg = <0x80032000 2000>; + reg = <0x80032000 0x2000>; interrupts = <8>; status = "disabled"; }; can1: can@80034000 { compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan"; - reg = <0x80034000 2000>; + reg = <0x80034000 0x2000>; interrupts = <9>; status = "disabled"; }; simdbg@8003c000 { - reg = <0x8003c000 200>; + reg = <0x8003c000 0x200>; status = "disabled"; }; simgpmisel@8003c200 { - reg = <0x8003c200 100>; + reg = <0x8003c200 0x100>; status = "disabled"; }; simsspsel@8003c300 { - reg = <0x8003c300 100>; + reg = <0x8003c300 0x100>; status = "disabled"; }; simmemsel@8003c400 { - reg = <0x8003c400 100>; + reg = <0x8003c400 0x100>; status = "disabled"; }; gpiomon@8003c500 { - reg = <0x8003c500 100>; + reg = <0x8003c500 0x100>; status = "disabled"; }; simenet@8003c700 { - reg = <0x8003c700 100>; + reg = <0x8003c700 0x100>; status = "disabled"; }; armjtag@8003c800 { - reg = <0x8003c800 100>; + reg = <0x8003c800 0x100>; status = "disabled"; }; }; @@ -612,45 +612,45 @@ ranges; clkctl@80040000 { - reg = <0x80040000 2000>; + reg = <0x80040000 0x2000>; status = "disabled"; }; saif0: saif@80042000 { compatible = "fsl,imx28-saif"; - reg = <0x80042000 2000>; + reg = <0x80042000 0x2000>; interrupts = <59 80>; fsl,saif-dma-channel = <4>; status = "disabled"; }; power@80044000 { - reg = <0x80044000 2000>; + reg = <0x80044000 0x2000>; status = "disabled"; }; saif1: saif@80046000 { compatible = "fsl,imx28-saif"; - reg = <0x80046000 2000>; + reg = <0x80046000 0x2000>; interrupts = <58 81>; fsl,saif-dma-channel = <5>; status = "disabled"; }; lradc@80050000 { - reg = <0x80050000 2000>; + reg = <0x80050000 0x2000>; status = "disabled"; }; spdif@80054000 { - reg = <0x80054000 2000>; + reg = <0x80054000 0x2000>; interrupts = <45 66>; status = "disabled"; }; rtc@80056000 { compatible = "fsl,imx28-rtc", "fsl,stmp3xxx-rtc"; - reg = <0x80056000 2000>; + reg = <0x80056000 0x2000>; interrupts = <29>; }; @@ -658,7 +658,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-i2c"; - reg = <0x80058000 2000>; + reg = <0x80058000 0x2000>; interrupts = <111 68>; clock-frequency = <100000>; status = "disabled"; @@ -668,7 +668,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-i2c"; - reg = <0x8005a000 2000>; + reg = <0x8005a000 0x2000>; interrupts = <110 69>; clock-frequency = <100000>; status = "disabled"; @@ -676,14 +676,14 @@ pwm: pwm@80064000 { compatible = "fsl,imx28-pwm", "fsl,imx23-pwm"; - reg = <0x80064000 2000>; + reg = <0x80064000 0x2000>; #pwm-cells = <2>; fsl,pwm-number = <8>; status = "disabled"; }; timrot@80068000 { - reg = <0x80068000 2000>; + reg = <0x80068000 0x2000>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts index de065b5976e6..cd86177a3ea2 100644 --- a/arch/arm/boot/dts/imx51-babbage.dts +++ b/arch/arm/boot/dts/imx51-babbage.dts @@ -53,7 +53,7 @@ spi-max-frequency = <6000000>; reg = <0>; interrupt-parent = <&gpio1>; - interrupts = <8>; + interrupts = <8 0x4>; regulators { sw1_reg: sw1 { diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 53cbaa3d4f90..aba28dc87fc8 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi @@ -17,6 +17,10 @@ serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; }; tzic: tz-interrupt-controller@e0000000 { diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts index 5b8eafcdbeec..da895e93a999 100644 --- a/arch/arm/boot/dts/imx53-ard.dts +++ b/arch/arm/boot/dts/imx53-ard.dts @@ -64,12 +64,32 @@ reg = <0xf4000000 0x2000000>; phy-mode = "mii"; interrupt-parent = <&gpio2>; - interrupts = <31>; + interrupts = <31 0x8>; reg-io-width = <4>; + /* + * VDD33A and VDDVARIO of LAN9220 are supplied by + * SW4_3V3 of LTC3589. Before the regulator driver + * for this PMIC is available, we use a fixed dummy + * 3V3 regulator to get LAN9220 driver probing work. + */ + vdd33a-supply = <®_3p3v>; + vddvario-supply = <®_3p3v>; smsc,irq-push-pull; }; }; + regulators { + compatible = "simple-bus"; + + reg_3p3v: 3p3v { + compatible = "regulator-fixed"; + regulator-name = "3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + }; + gpio-keys { compatible = "gpio-keys"; diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index fc79cdc4b4e6..cd37165edce5 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi @@ -19,6 +19,13 @@ serial2 = &uart3; serial3 = &uart4; serial4 = &uart5; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; + gpio6 = &gpio7; }; tzic: tz-interrupt-controller@0fffc000 { diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts index d42e851ceb97..72f30f3e6171 100644 --- a/arch/arm/boot/dts/imx6q-sabrelite.dts +++ b/arch/arm/boot/dts/imx6q-sabrelite.dts @@ -53,6 +53,7 @@ fsl,pins = < 144 0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */ 121 0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */ + 953 0x80000000 /* MX6Q_PAD_GPIO_0__CCM_CLKO */ >; }; }; diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi index 3d3c64b014e6..fd57079f71a9 100644 --- a/arch/arm/boot/dts/imx6q.dtsi +++ b/arch/arm/boot/dts/imx6q.dtsi @@ -19,6 +19,13 @@ serial2 = &uart3; serial3 = &uart4; serial4 = &uart5; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; + gpio6 = &gpio7; }; cpus { diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig index f725b9637b33..3c9f32f9b6b4 100644 --- a/arch/arm/configs/imx_v6_v7_defconfig +++ b/arch/arm/configs/imx_v6_v7_defconfig @@ -192,6 +192,7 @@ CONFIG_RTC_DRV_MC13XXX=y CONFIG_RTC_DRV_MXC=y CONFIG_DMADEVICES=y CONFIG_IMX_SDMA=y +CONFIG_MXS_DMA=y CONFIG_COMMON_CLK_DEBUG=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig index ccdb6357fb74..4edcfb4e4dee 100644 --- a/arch/arm/configs/mxs_defconfig +++ b/arch/arm/configs/mxs_defconfig @@ -34,7 +34,6 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT_VOLUNTARY=y CONFIG_AEABI=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_AUTO_ZRELADDR=y CONFIG_FPE_NWFPE=y CONFIG_NET=y diff --git a/arch/arm/configs/tct_hammer_defconfig b/arch/arm/configs/tct_hammer_defconfig index 1d24f8458bef..71277a1591ba 100644 --- a/arch/arm/configs/tct_hammer_defconfig +++ b/arch/arm/configs/tct_hammer_defconfig @@ -7,7 +7,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set -# CONFIG_BUG is not set +# CONFIG_BUGVERBOSE is not set # CONFIG_ELF_CORE is not set # CONFIG_SHMEM is not set CONFIG_SLOB=y diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 5de69f2fcca9..f6b9fc70161b 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c @@ -162,38 +162,6 @@ static void __init davinci_ntosd2_map_io(void) dm644x_init(); } -/* - I2C initialization -*/ -static struct davinci_i2c_platform_data ntosd2_i2c_pdata = { - .bus_freq = 20 /* kHz */, - .bus_delay = 100 /* usec */, -}; - -static struct i2c_board_info __initdata ntosd2_i2c_info[] = { -}; - -static int ntosd2_init_i2c(void) -{ - int status; - - davinci_init_i2c(&ntosd2_i2c_pdata); - status = gpio_request(NTOSD2_MSP430_IRQ, ntosd2_i2c_info[0].type); - if (status == 0) { - status = gpio_direction_input(NTOSD2_MSP430_IRQ); - if (status == 0) { - status = gpio_to_irq(NTOSD2_MSP430_IRQ); - if (status > 0) { - ntosd2_i2c_info[0].irq = status; - i2c_register_board_info(1, - ntosd2_i2c_info, - ARRAY_SIZE(ntosd2_i2c_info)); - } - } - } - return status; -} - static struct davinci_mmc_config davinci_ntosd2_mmc_config = { .wires = 4, .version = MMC_CTLR_VERSION_1 @@ -218,7 +186,6 @@ static __init void davinci_ntosd2_init(void) { struct clk *aemif_clk; struct davinci_soc_info *soc_info = &davinci_soc_info; - int status; aemif_clk = clk_get(NULL, "aemif"); clk_enable(aemif_clk); @@ -242,12 +209,6 @@ static __init void davinci_ntosd2_init(void) platform_add_devices(davinci_ntosd2_devices, ARRAY_SIZE(davinci_ntosd2_devices)); - /* Initialize I2C interface specific for this board */ - status = ntosd2_init_i2c(); - if (status < 0) - pr_warning("davinci_ntosd2_init: msp430 irq setup failed:" - " %d\n", status); - davinci_serial_init(&uart_config); dm644x_init_asp(&dm644x_ntosd2_snd_data); diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c index 373c3c00d24c..c0bc83a7663e 100644 --- a/arch/arm/mach-exynos/pm_domains.c +++ b/arch/arm/mach-exynos/pm_domains.c @@ -115,7 +115,7 @@ static __init int exynos_pm_dt_parse_domains(void) } #endif /* CONFIG_OF */ -static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, +static __init __maybe_unused void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, struct exynos_pm_domain *pd) { if (pdev->dev.bus) { diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c index 7aa6313fb167..f69ca4680049 100644 --- a/arch/arm/mach-imx/clk-imx27.c +++ b/arch/arm/mach-imx/clk-imx27.c @@ -223,7 +223,7 @@ int __init mx27_clocks_init(unsigned long fref) clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0"); - clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0"); + clk_register_clkdev(clk[csi_ahb_gate], "ahb", "mx2-camera.0"); clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc"); clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc"); @@ -250,8 +250,10 @@ int __init mx27_clocks_init(unsigned long fref) clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1"); clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0"); clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad"); - clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma"); - clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma"); + clk_register_clkdev(clk[emma_ahb_gate], "emma-ahb", "mx2-camera.0"); + clk_register_clkdev(clk[emma_ipg_gate], "emma-ipg", "mx2-camera.0"); + clk_register_clkdev(clk[emma_ahb_gate], "ahb", "m2m-emmaprp.0"); + clk_register_clkdev(clk[emma_ipg_gate], "ipg", "m2m-emmaprp.0"); clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL); clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL); clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL); diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c index 8e19e70f90f9..1253af2d9971 100644 --- a/arch/arm/mach-imx/clk-imx31.c +++ b/arch/arm/mach-imx/clk-imx31.c @@ -130,7 +130,7 @@ int __init mx31_clocks_init(unsigned long fref) clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0"); clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core"); clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); - clk_register_clkdev(clk[kpp_gate], "kpp", NULL); + clk_register_clkdev(clk[kpp_gate], NULL, "imx-keypad"); clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0"); clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0"); diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c index f6086693ebd2..4bdcaa97bd98 100644 --- a/arch/arm/mach-imx/clk-imx51-imx53.c +++ b/arch/arm/mach-imx/clk-imx51-imx53.c @@ -303,6 +303,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil, clk_prepare_enable(clk[aips_tz2]); /* fec */ clk_prepare_enable(clk[spba]); clk_prepare_enable(clk[emi_fast_gate]); /* fec */ + clk_prepare_enable(clk[emi_slow_gate]); /* eim */ clk_prepare_enable(clk[tmax1]); clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */ clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */ diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index ebf680bebdf2..3fa6c51390da 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c @@ -11,6 +11,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> +#include <linux/export.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/irq.h> diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index 7b1055c8e0b9..3b2267529f5e 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c @@ -456,7 +456,7 @@ static void __init ap_init_timer(void) clk = clk_get_sys("ap_timer", NULL); BUG_ON(IS_ERR(clk)); - clk_enable(clk); + clk_prepare_enable(clk); rate = clk_get_rate(clk); writel(0, TIMER0_VA_BASE + TIMER_CTRL); diff --git a/arch/arm/mach-kirkwood/Makefile.boot b/arch/arm/mach-kirkwood/Makefile.boot index 2a576abf409b..a5717558ee89 100644 --- a/arch/arm/mach-kirkwood/Makefile.boot +++ b/arch/arm/mach-kirkwood/Makefile.boot @@ -9,5 +9,5 @@ dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-qnap-ts219.dtb dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb -dbt-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb -dbt-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb +dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb +dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig index ccdf83b17cf1..9a8bbda195b2 100644 --- a/arch/arm/mach-mxs/Kconfig +++ b/arch/arm/mach-mxs/Kconfig @@ -2,9 +2,6 @@ if ARCH_MXS source "arch/arm/mach-mxs/devices/Kconfig" -config MXS_OCOTP - bool - config SOC_IMX23 bool select ARM_AMBA @@ -66,7 +63,6 @@ config MACH_MX28EVK select MXS_HAVE_PLATFORM_MXS_SAIF select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_RTC_STMP3XXX - select MXS_OCOTP help Include support for MX28EVK platform. This includes specific configurations for the board and its peripherals. @@ -94,7 +90,6 @@ config MODULE_M28 select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_MXS_MMC select MXS_HAVE_PLATFORM_MXSFB - select MXS_OCOTP config MODULE_APX4 bool @@ -106,7 +101,6 @@ config MODULE_APX4 select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_MXS_MMC select MXS_HAVE_PLATFORM_MXS_SAIF - select MXS_OCOTP config MACH_TX28 bool "Ka-Ro TX28 module" diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile index e41590ccb437..fed3695a1339 100644 --- a/arch/arm/mach-mxs/Makefile +++ b/arch/arm/mach-mxs/Makefile @@ -1,7 +1,6 @@ # Common support -obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o +obj-y := devices.o icoll.o iomux.o ocotp.o system.o timer.o mm.o -obj-$(CONFIG_MXS_OCOTP) += ocotp.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index 5905ed130e94..d89d87ae144c 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c @@ -953,12 +953,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = { static struct eeti_ts_platform_data eeti_ts_pdata = { .irq_active_high = 1, + .irq_gpio = GPIO_TOUCH_IRQ, }; static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = { .type = "eeti_ts", .addr = 0x0a, - .irq = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ), .platform_data = &eeti_ts_pdata, }; diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig index e24961109b70..d56b0f7f2b20 100644 --- a/arch/arm/mach-s3c24xx/Kconfig +++ b/arch/arm/mach-s3c24xx/Kconfig @@ -483,7 +483,7 @@ config MACH_NEO1973_GTA02 select I2C select POWER_SUPPLY select MACH_NEO1973 - select S3C2410_PWM + select S3C24XX_PWM select S3C_DEV_USB_HOST help Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone @@ -493,7 +493,7 @@ config MACH_RX1950 select S3C24XX_DCLK select PM_H1940 if PM select I2C - select S3C2410_PWM + select S3C24XX_PWM select S3C_DEV_NAND select S3C2410_IOTIMING if S3C2440_CPUFREQ select S3C2440_XTAL_16934400 diff --git a/arch/arm/mach-sa1100/leds-hackkit.c b/arch/arm/mach-sa1100/leds-hackkit.c index 6a2352436e62..f8e47235babe 100644 --- a/arch/arm/mach-sa1100/leds-hackkit.c +++ b/arch/arm/mach-sa1100/leds-hackkit.c @@ -10,6 +10,7 @@ * as cpu led, the green one is used as timer led. */ #include <linux/init.h> +#include <linux/io.h> #include <mach/hardware.h> #include <asm/leds.h> diff --git a/arch/arm/mach-tegra/board-harmony-power.c b/arch/arm/mach-tegra/board-harmony-power.c index 8fd387bf31f0..b7344beec102 100644 --- a/arch/arm/mach-tegra/board-harmony-power.c +++ b/arch/arm/mach-tegra/board-harmony-power.c @@ -51,7 +51,7 @@ static struct regulator_init_data ldo0_data = { .consumer_supplies = tps658621_ldo0_supply, }; -#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv) \ +#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv, _on)\ static struct regulator_init_data _id##_data = { \ .supply_regulator = _supply, \ .constraints = { \ @@ -63,21 +63,22 @@ static struct regulator_init_data ldo0_data = { .valid_ops_mask = (REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_STATUS | \ REGULATOR_CHANGE_VOLTAGE), \ + .always_on = _on, \ }, \ } -HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500); -HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500); -HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550); -HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500); -HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500); -HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475); -HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL, 1250, 3300); -HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300); +HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500, 1); +HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500, 1); +HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550, 1); +HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500, 1); +HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500, 0); +HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300, 1); +HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475, 1); +HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL, 1250, 3300, 1); +HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300, 1); #define TPS_REG(_id, _data) \ { \ @@ -119,9 +120,10 @@ static struct i2c_board_info __initdata harmony_regulators[] = { int __init harmony_regulator_init(void) { + regulator_register_always_on(0, "vdd_sys", + NULL, 0, 5000000); + if (machine_is_harmony()) { - regulator_register_always_on(0, "vdd_sys", - NULL, 0, 5000000); i2c_register_board_info(3, harmony_regulators, 1); } else { /* Harmony, booted using device tree */ struct device_node *np; diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c2cdf6500f75..4e7d1182e8a3 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -358,7 +358,7 @@ void __init dma_contiguous_remap(void) if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) - return; + continue; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); @@ -423,7 +423,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) unsigned int pageno; unsigned long flags; void *ptr = NULL; - size_t align; + unsigned long align_mask; if (!pool->vaddr) { WARN(1, "coherent pool not initialised!\n"); @@ -435,11 +435,11 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) * small, so align them to their order in pages, minimum is a page * size. This helps reduce fragmentation of the DMA space. */ - align = PAGE_SIZE << get_order(size); + align_mask = (1 << get_order(size)) - 1; spin_lock_irqsave(&pool->lock, flags); pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, - 0, count, (1 << align) - 1); + 0, count, align_mask); if (pageno < pool->nr_pages) { bitmap_set(pool->bitmap, pageno, count); ptr = pool->vaddr + PAGE_SIZE * pageno; @@ -648,12 +648,12 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, if (arch_is_coherent() || nommu()) { __dma_free_buffer(page, size); + } else if (__free_from_pool(cpu_addr, size)) { + return; } else if (!IS_ENABLED(CONFIG_CMA)) { __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { - if (__free_from_pool(cpu_addr, size)) - return; /* * Non-atomic allocations cannot be freed with IRQs disabled */ diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig index 7aca31c1df1f..9c3b90c3538e 100644 --- a/arch/arm/plat-samsung/Kconfig +++ b/arch/arm/plat-samsung/Kconfig @@ -403,7 +403,8 @@ config S5P_DEV_USB_EHCI config S3C24XX_PWM bool "PWM device support" - select HAVE_PWM + select PWM + select PWM_SAMSUNG help Support for exporting the PWM timer blocks via the pwm device system diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 76de6b68487c..107610e01a29 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -124,6 +124,7 @@ config S390 select GENERIC_TIME_VSYSCALL select GENERIC_CLOCKEVENTS select KTIME_SCALAR if 32BIT + select HAVE_ARCH_SECCOMP_FILTER config SCHED_OMIT_FRAME_POINTER def_bool y diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h index 0fb34027d3f6..a60d085ddb4d 100644 --- a/arch/s390/include/asm/sparsemem.h +++ b/arch/s390/include/asm/sparsemem.h @@ -4,13 +4,11 @@ #ifdef CONFIG_64BIT #define SECTION_SIZE_BITS 28 -#define MAX_PHYSADDR_BITS 46 #define MAX_PHYSMEM_BITS 46 #else #define SECTION_SIZE_BITS 25 -#define MAX_PHYSADDR_BITS 31 #define MAX_PHYSMEM_BITS 31 #endif /* CONFIG_64BIT */ diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index fb214dd9b7e0..fe7b99759e12 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -12,6 +12,7 @@ #ifndef _ASM_SYSCALL_H #define _ASM_SYSCALL_H 1 +#include <linux/audit.h> #include <linux/sched.h> #include <linux/err.h> #include <asm/ptrace.h> @@ -87,4 +88,13 @@ static inline void syscall_set_arguments(struct task_struct *task, regs->orig_gpr2 = args[0]; } +static inline int syscall_get_arch(struct task_struct *task, + struct pt_regs *regs) +{ +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) + return AUDIT_ARCH_S390; +#endif + return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390; +} #endif /* _ASM_SYSCALL_H */ diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index d1225089a4bb..f606d935f495 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -620,7 +620,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } @@ -631,7 +630,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); } diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index e835d6d5b7fd..2d82cfcbce5b 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1635,7 +1635,7 @@ ENTRY(compat_sys_process_vm_readv_wrapper) llgfr %r6,%r6 # unsigned long llgf %r0,164(%r15) # unsigned long stg %r0,160(%r15) - jg sys_process_vm_readv + jg compat_sys_process_vm_readv ENTRY(compat_sys_process_vm_writev_wrapper) lgfr %r2,%r2 # compat_pid_t @@ -1645,4 +1645,4 @@ ENTRY(compat_sys_process_vm_writev_wrapper) llgfr %r6,%r6 # unsigned long llgf %r0,164(%r15) # unsigned long stg %r0,160(%r15) - jg sys_process_vm_writev + jg compat_sys_process_vm_writev diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index f4eb37680b91..e4be113fbac6 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -719,7 +719,11 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; /* Do the secure computing check first. */ - secure_computing_strict(regs->gprs[2]); + if (secure_computing(regs->gprs[2])) { + /* seccomp failures shouldn't expose any additional code. */ + ret = -1; + goto out; + } /* * The sysc_tracesys code in entry.S stored the system @@ -745,6 +749,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) regs->gprs[2], regs->orig_gpr2, regs->gprs[3], regs->gprs[4], regs->gprs[5]); +out: return ret ?: regs->gprs[2]; } diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index b4a29eee41b8..d0964d22adb5 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -81,11 +81,12 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality) { unsigned int ret; - if (current->personality == PER_LINUX32 && personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/arch/sh/drivers/dma/dma-sh.c b/arch/sh/drivers/dma/dma-sh.c index 4c171f13b0e8..b22565623142 100644 --- a/arch/sh/drivers/dma/dma-sh.c +++ b/arch/sh/drivers/dma/dma-sh.c @@ -335,7 +335,7 @@ static int dmae_irq_init(void) for (n = 0; n < NR_DMAE; n++) { int i = request_irq(get_dma_error_irq(n), dma_err, - IRQF_SHARED, dmae_name[n], NULL); + IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]); if (unlikely(i < 0)) { printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); return i; diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h index 48d14498e774..2a0ca8780f0d 100644 --- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h +++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h @@ -183,18 +183,30 @@ enum { GPIO_FN_DV_DATA1, GPIO_FN_DV_DATA0, GPIO_FN_LCD_CLK, GPIO_FN_LCD_EXTCLK, GPIO_FN_LCD_VSYNC, GPIO_FN_LCD_HSYNC, GPIO_FN_LCD_DE, - GPIO_FN_LCD_DATA23, GPIO_FN_LCD_DATA22, - GPIO_FN_LCD_DATA21, GPIO_FN_LCD_DATA20, - GPIO_FN_LCD_DATA19, GPIO_FN_LCD_DATA18, - GPIO_FN_LCD_DATA17, GPIO_FN_LCD_DATA16, - GPIO_FN_LCD_DATA15, GPIO_FN_LCD_DATA14, - GPIO_FN_LCD_DATA13, GPIO_FN_LCD_DATA12, - GPIO_FN_LCD_DATA11, GPIO_FN_LCD_DATA10, - GPIO_FN_LCD_DATA9, GPIO_FN_LCD_DATA8, - GPIO_FN_LCD_DATA7, GPIO_FN_LCD_DATA6, - GPIO_FN_LCD_DATA5, GPIO_FN_LCD_DATA4, - GPIO_FN_LCD_DATA3, GPIO_FN_LCD_DATA2, - GPIO_FN_LCD_DATA1, GPIO_FN_LCD_DATA0, + GPIO_FN_LCD_DATA23_PG23, GPIO_FN_LCD_DATA22_PG22, + GPIO_FN_LCD_DATA21_PG21, GPIO_FN_LCD_DATA20_PG20, + GPIO_FN_LCD_DATA19_PG19, GPIO_FN_LCD_DATA18_PG18, + GPIO_FN_LCD_DATA17_PG17, GPIO_FN_LCD_DATA16_PG16, + GPIO_FN_LCD_DATA15_PG15, GPIO_FN_LCD_DATA14_PG14, + GPIO_FN_LCD_DATA13_PG13, GPIO_FN_LCD_DATA12_PG12, + GPIO_FN_LCD_DATA11_PG11, GPIO_FN_LCD_DATA10_PG10, + GPIO_FN_LCD_DATA9_PG9, GPIO_FN_LCD_DATA8_PG8, + GPIO_FN_LCD_DATA7_PG7, GPIO_FN_LCD_DATA6_PG6, + GPIO_FN_LCD_DATA5_PG5, GPIO_FN_LCD_DATA4_PG4, + GPIO_FN_LCD_DATA3_PG3, GPIO_FN_LCD_DATA2_PG2, + GPIO_FN_LCD_DATA1_PG1, GPIO_FN_LCD_DATA0_PG0, + GPIO_FN_LCD_DATA23_PJ23, GPIO_FN_LCD_DATA22_PJ22, + GPIO_FN_LCD_DATA21_PJ21, GPIO_FN_LCD_DATA20_PJ20, + GPIO_FN_LCD_DATA19_PJ19, GPIO_FN_LCD_DATA18_PJ18, + GPIO_FN_LCD_DATA17_PJ17, GPIO_FN_LCD_DATA16_PJ16, + GPIO_FN_LCD_DATA15_PJ15, GPIO_FN_LCD_DATA14_PJ14, + GPIO_FN_LCD_DATA13_PJ13, GPIO_FN_LCD_DATA12_PJ12, + GPIO_FN_LCD_DATA11_PJ11, GPIO_FN_LCD_DATA10_PJ10, + GPIO_FN_LCD_DATA9_PJ9, GPIO_FN_LCD_DATA8_PJ8, + GPIO_FN_LCD_DATA7_PJ7, GPIO_FN_LCD_DATA6_PJ6, + GPIO_FN_LCD_DATA5_PJ5, GPIO_FN_LCD_DATA4_PJ4, + GPIO_FN_LCD_DATA3_PJ3, GPIO_FN_LCD_DATA2_PJ2, + GPIO_FN_LCD_DATA1_PJ1, GPIO_FN_LCD_DATA0_PJ0, GPIO_FN_LCD_M_DISP, }; diff --git a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c index f25127c46eca..039e4587dd9b 100644 --- a/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c +++ b/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c @@ -758,12 +758,22 @@ enum { DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK, LCD_CLK_MARK, LCD_EXTCLK_MARK, LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK, - LCD_DATA23_MARK, LCD_DATA22_MARK, LCD_DATA21_MARK, LCD_DATA20_MARK, - LCD_DATA19_MARK, LCD_DATA18_MARK, LCD_DATA17_MARK, LCD_DATA16_MARK, - LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK, - LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK, - LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK, - LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK, + LCD_DATA23_PG23_MARK, LCD_DATA22_PG22_MARK, LCD_DATA21_PG21_MARK, + LCD_DATA20_PG20_MARK, LCD_DATA19_PG19_MARK, LCD_DATA18_PG18_MARK, + LCD_DATA17_PG17_MARK, LCD_DATA16_PG16_MARK, LCD_DATA15_PG15_MARK, + LCD_DATA14_PG14_MARK, LCD_DATA13_PG13_MARK, LCD_DATA12_PG12_MARK, + LCD_DATA11_PG11_MARK, LCD_DATA10_PG10_MARK, LCD_DATA9_PG9_MARK, + LCD_DATA8_PG8_MARK, LCD_DATA7_PG7_MARK, LCD_DATA6_PG6_MARK, + LCD_DATA5_PG5_MARK, LCD_DATA4_PG4_MARK, LCD_DATA3_PG3_MARK, + LCD_DATA2_PG2_MARK, LCD_DATA1_PG1_MARK, LCD_DATA0_PG0_MARK, + LCD_DATA23_PJ23_MARK, LCD_DATA22_PJ22_MARK, LCD_DATA21_PJ21_MARK, + LCD_DATA20_PJ20_MARK, LCD_DATA19_PJ19_MARK, LCD_DATA18_PJ18_MARK, + LCD_DATA17_PJ17_MARK, LCD_DATA16_PJ16_MARK, LCD_DATA15_PJ15_MARK, + LCD_DATA14_PJ14_MARK, LCD_DATA13_PJ13_MARK, LCD_DATA12_PJ12_MARK, + LCD_DATA11_PJ11_MARK, LCD_DATA10_PJ10_MARK, LCD_DATA9_PJ9_MARK, + LCD_DATA8_PJ8_MARK, LCD_DATA7_PJ7_MARK, LCD_DATA6_PJ6_MARK, + LCD_DATA5_PJ5_MARK, LCD_DATA4_PJ4_MARK, LCD_DATA3_PJ3_MARK, + LCD_DATA2_PJ2_MARK, LCD_DATA1_PJ1_MARK, LCD_DATA0_PJ0_MARK, LCD_TCON6_MARK, LCD_TCON5_MARK, LCD_TCON4_MARK, LCD_TCON3_MARK, LCD_TCON2_MARK, LCD_TCON1_MARK, LCD_TCON0_MARK, LCD_M_DISP_MARK, @@ -1036,6 +1046,7 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PF1_DATA, PF1MD_000), PINMUX_DATA(BACK_MARK, PF1MD_001), + PINMUX_DATA(SSL10_MARK, PF1MD_011), PINMUX_DATA(TIOC4B_MARK, PF1MD_100), PINMUX_DATA(DACK0_MARK, PF1MD_101), @@ -1049,47 +1060,50 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PG27_DATA, PG27MD_00), PINMUX_DATA(LCD_TCON2_MARK, PG27MD_10), PINMUX_DATA(LCD_EXTCLK_MARK, PG27MD_11), + PINMUX_DATA(LCD_DE_MARK, PG27MD_11), PINMUX_DATA(PG26_DATA, PG26MD_00), PINMUX_DATA(LCD_TCON1_MARK, PG26MD_10), + PINMUX_DATA(LCD_HSYNC_MARK, PG26MD_10), PINMUX_DATA(PG25_DATA, PG25MD_00), PINMUX_DATA(LCD_TCON0_MARK, PG25MD_10), + PINMUX_DATA(LCD_VSYNC_MARK, PG25MD_10), PINMUX_DATA(PG24_DATA, PG24MD_00), PINMUX_DATA(LCD_CLK_MARK, PG24MD_10), PINMUX_DATA(PG23_DATA, PG23MD_000), - PINMUX_DATA(LCD_DATA23_MARK, PG23MD_010), + PINMUX_DATA(LCD_DATA23_PG23_MARK, PG23MD_010), PINMUX_DATA(LCD_TCON6_MARK, PG23MD_011), PINMUX_DATA(TXD5_MARK, PG23MD_100), PINMUX_DATA(PG22_DATA, PG22MD_000), - PINMUX_DATA(LCD_DATA22_MARK, PG22MD_010), + PINMUX_DATA(LCD_DATA22_PG22_MARK, PG22MD_010), PINMUX_DATA(LCD_TCON5_MARK, PG22MD_011), PINMUX_DATA(RXD5_MARK, PG22MD_100), PINMUX_DATA(PG21_DATA, PG21MD_000), PINMUX_DATA(DV_DATA7_MARK, PG21MD_001), - PINMUX_DATA(LCD_DATA21_MARK, PG21MD_010), + PINMUX_DATA(LCD_DATA21_PG21_MARK, PG21MD_010), PINMUX_DATA(LCD_TCON4_MARK, PG21MD_011), PINMUX_DATA(TXD4_MARK, PG21MD_100), PINMUX_DATA(PG20_DATA, PG20MD_000), PINMUX_DATA(DV_DATA6_MARK, PG20MD_001), - PINMUX_DATA(LCD_DATA20_MARK, PG21MD_010), + PINMUX_DATA(LCD_DATA20_PG20_MARK, PG21MD_010), PINMUX_DATA(LCD_TCON3_MARK, PG20MD_011), PINMUX_DATA(RXD4_MARK, PG20MD_100), PINMUX_DATA(PG19_DATA, PG19MD_000), PINMUX_DATA(DV_DATA5_MARK, PG19MD_001), - PINMUX_DATA(LCD_DATA19_MARK, PG19MD_010), + PINMUX_DATA(LCD_DATA19_PG19_MARK, PG19MD_010), PINMUX_DATA(SPDIF_OUT_MARK, PG19MD_011), PINMUX_DATA(SCK5_MARK, PG19MD_100), PINMUX_DATA(PG18_DATA, PG18MD_000), PINMUX_DATA(DV_DATA4_MARK, PG18MD_001), - PINMUX_DATA(LCD_DATA18_MARK, PG18MD_010), + PINMUX_DATA(LCD_DATA18_PG18_MARK, PG18MD_010), PINMUX_DATA(SPDIF_IN_MARK, PG18MD_011), PINMUX_DATA(SCK4_MARK, PG18MD_100), @@ -1097,103 +1111,103 @@ static pinmux_enum_t pinmux_data[] = { // we're going with 2 bits PINMUX_DATA(PG17_DATA, PG17MD_00), PINMUX_DATA(WE3ICIOWRAHDQMUU_MARK, PG17MD_01), - PINMUX_DATA(LCD_DATA17_MARK, PG17MD_10), + PINMUX_DATA(LCD_DATA17_PG17_MARK, PG17MD_10), // TODO hardware manual has PG16 3 bits wide in reg picture and 2 bits in description // we're going with 2 bits PINMUX_DATA(PG16_DATA, PG16MD_00), PINMUX_DATA(WE2ICIORDDQMUL_MARK, PG16MD_01), - PINMUX_DATA(LCD_DATA16_MARK, PG16MD_10), + PINMUX_DATA(LCD_DATA16_PG16_MARK, PG16MD_10), PINMUX_DATA(PG15_DATA, PG15MD_00), PINMUX_DATA(D31_MARK, PG15MD_01), - PINMUX_DATA(LCD_DATA15_MARK, PG15MD_10), + PINMUX_DATA(LCD_DATA15_PG15_MARK, PG15MD_10), PINMUX_DATA(PINT7_PG_MARK, PG15MD_11), PINMUX_DATA(PG14_DATA, PG14MD_00), PINMUX_DATA(D30_MARK, PG14MD_01), - PINMUX_DATA(LCD_DATA14_MARK, PG14MD_10), + PINMUX_DATA(LCD_DATA14_PG14_MARK, PG14MD_10), PINMUX_DATA(PINT6_PG_MARK, PG14MD_11), PINMUX_DATA(PG13_DATA, PG13MD_00), PINMUX_DATA(D29_MARK, PG13MD_01), - PINMUX_DATA(LCD_DATA13_MARK, PG13MD_10), + PINMUX_DATA(LCD_DATA13_PG13_MARK, PG13MD_10), PINMUX_DATA(PINT5_PG_MARK, PG13MD_11), PINMUX_DATA(PG12_DATA, PG12MD_00), PINMUX_DATA(D28_MARK, PG12MD_01), - PINMUX_DATA(LCD_DATA12_MARK, PG12MD_10), + PINMUX_DATA(LCD_DATA12_PG12_MARK, PG12MD_10), PINMUX_DATA(PINT4_PG_MARK, PG12MD_11), PINMUX_DATA(PG11_DATA, PG11MD_000), PINMUX_DATA(D27_MARK, PG11MD_001), - PINMUX_DATA(LCD_DATA11_MARK, PG11MD_010), + PINMUX_DATA(LCD_DATA11_PG11_MARK, PG11MD_010), PINMUX_DATA(PINT3_PG_MARK, PG11MD_011), PINMUX_DATA(TIOC3D_MARK, PG11MD_100), PINMUX_DATA(PG10_DATA, PG10MD_000), PINMUX_DATA(D26_MARK, PG10MD_001), - PINMUX_DATA(LCD_DATA10_MARK, PG10MD_010), + PINMUX_DATA(LCD_DATA10_PG10_MARK, PG10MD_010), PINMUX_DATA(PINT2_PG_MARK, PG10MD_011), PINMUX_DATA(TIOC3C_MARK, PG10MD_100), PINMUX_DATA(PG9_DATA, PG9MD_000), PINMUX_DATA(D25_MARK, PG9MD_001), - PINMUX_DATA(LCD_DATA9_MARK, PG9MD_010), + PINMUX_DATA(LCD_DATA9_PG9_MARK, PG9MD_010), PINMUX_DATA(PINT1_PG_MARK, PG9MD_011), PINMUX_DATA(TIOC3B_MARK, PG9MD_100), PINMUX_DATA(PG8_DATA, PG8MD_000), PINMUX_DATA(D24_MARK, PG8MD_001), - PINMUX_DATA(LCD_DATA8_MARK, PG8MD_010), + PINMUX_DATA(LCD_DATA8_PG8_MARK, PG8MD_010), PINMUX_DATA(PINT0_PG_MARK, PG8MD_011), PINMUX_DATA(TIOC3A_MARK, PG8MD_100), PINMUX_DATA(PG7_DATA, PG7MD_000), PINMUX_DATA(D23_MARK, PG7MD_001), - PINMUX_DATA(LCD_DATA7_MARK, PG7MD_010), + PINMUX_DATA(LCD_DATA7_PG7_MARK, PG7MD_010), PINMUX_DATA(IRQ7_PG_MARK, PG7MD_011), PINMUX_DATA(TIOC2B_MARK, PG7MD_100), PINMUX_DATA(PG6_DATA, PG6MD_000), PINMUX_DATA(D22_MARK, PG6MD_001), - PINMUX_DATA(LCD_DATA6_MARK, PG6MD_010), + PINMUX_DATA(LCD_DATA6_PG6_MARK, PG6MD_010), PINMUX_DATA(IRQ6_PG_MARK, PG6MD_011), PINMUX_DATA(TIOC2A_MARK, PG6MD_100), PINMUX_DATA(PG5_DATA, PG5MD_000), PINMUX_DATA(D21_MARK, PG5MD_001), - PINMUX_DATA(LCD_DATA5_MARK, PG5MD_010), + PINMUX_DATA(LCD_DATA5_PG5_MARK, PG5MD_010), PINMUX_DATA(IRQ5_PG_MARK, PG5MD_011), PINMUX_DATA(TIOC1B_MARK, PG5MD_100), PINMUX_DATA(PG4_DATA, PG4MD_000), PINMUX_DATA(D20_MARK, PG4MD_001), - PINMUX_DATA(LCD_DATA4_MARK, PG4MD_010), + PINMUX_DATA(LCD_DATA4_PG4_MARK, PG4MD_010), PINMUX_DATA(IRQ4_PG_MARK, PG4MD_011), PINMUX_DATA(TIOC1A_MARK, PG4MD_100), PINMUX_DATA(PG3_DATA, PG3MD_000), PINMUX_DATA(D19_MARK, PG3MD_001), - PINMUX_DATA(LCD_DATA3_MARK, PG3MD_010), + PINMUX_DATA(LCD_DATA3_PG3_MARK, PG3MD_010), PINMUX_DATA(IRQ3_PG_MARK, PG3MD_011), PINMUX_DATA(TIOC0D_MARK, PG3MD_100), PINMUX_DATA(PG2_DATA, PG2MD_000), PINMUX_DATA(D18_MARK, PG2MD_001), - PINMUX_DATA(LCD_DATA2_MARK, PG2MD_010), + PINMUX_DATA(LCD_DATA2_PG2_MARK, PG2MD_010), PINMUX_DATA(IRQ2_PG_MARK, PG2MD_011), PINMUX_DATA(TIOC0C_MARK, PG2MD_100), PINMUX_DATA(PG1_DATA, PG1MD_000), PINMUX_DATA(D17_MARK, PG1MD_001), - PINMUX_DATA(LCD_DATA1_MARK, PG1MD_010), + PINMUX_DATA(LCD_DATA1_PG1_MARK, PG1MD_010), PINMUX_DATA(IRQ1_PG_MARK, PG1MD_011), PINMUX_DATA(TIOC0B_MARK, PG1MD_100), PINMUX_DATA(PG0_DATA, PG0MD_000), PINMUX_DATA(D16_MARK, PG0MD_001), - PINMUX_DATA(LCD_DATA0_MARK, PG0MD_010), + PINMUX_DATA(LCD_DATA0_PG0_MARK, PG0MD_010), PINMUX_DATA(IRQ0_PG_MARK, PG0MD_011), PINMUX_DATA(TIOC0A_MARK, PG0MD_100), @@ -1275,14 +1289,14 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ23_DATA, PJ23MD_000), PINMUX_DATA(DV_DATA23_MARK, PJ23MD_001), - PINMUX_DATA(LCD_DATA23_MARK, PJ23MD_010), + PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010), PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011), PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100), PINMUX_DATA(CTX1_MARK, PJ23MD_101), PINMUX_DATA(PJ22_DATA, PJ22MD_000), PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001), - PINMUX_DATA(LCD_DATA22_MARK, PJ22MD_010), + PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010), PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011), PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100), PINMUX_DATA(CRX1_MARK, PJ22MD_101), @@ -1290,14 +1304,14 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ21_DATA, PJ21MD_000), PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001), - PINMUX_DATA(LCD_DATA21_MARK, PJ21MD_010), + PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010), PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011), PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100), PINMUX_DATA(CTX2_MARK, PJ21MD_101), PINMUX_DATA(PJ20_DATA, PJ20MD_000), PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001), - PINMUX_DATA(LCD_DATA20_MARK, PJ20MD_010), + PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010), PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011), PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100), PINMUX_DATA(CRX2_MARK, PJ20MD_101), @@ -1305,7 +1319,7 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ19_DATA, PJ19MD_000), PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001), - PINMUX_DATA(LCD_DATA19_MARK, PJ19MD_010), + PINMUX_DATA(LCD_DATA19_PJ19_MARK, PJ19MD_010), PINMUX_DATA(MISO0_PJ19_MARK, PJ19MD_011), PINMUX_DATA(TIOC0D_MARK, PJ19MD_100), PINMUX_DATA(SIOFRXD_MARK, PJ19MD_101), @@ -1313,126 +1327,126 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ18_DATA, PJ18MD_000), PINMUX_DATA(DV_DATA18_MARK, PJ18MD_001), - PINMUX_DATA(LCD_DATA18_MARK, PJ18MD_010), + PINMUX_DATA(LCD_DATA18_PJ18_MARK, PJ18MD_010), PINMUX_DATA(MOSI0_PJ18_MARK, PJ18MD_011), PINMUX_DATA(TIOC0C_MARK, PJ18MD_100), PINMUX_DATA(SIOFTXD_MARK, PJ18MD_101), PINMUX_DATA(PJ17_DATA, PJ17MD_000), PINMUX_DATA(DV_DATA17_MARK, PJ17MD_001), - PINMUX_DATA(LCD_DATA17_MARK, PJ17MD_010), + PINMUX_DATA(LCD_DATA17_PJ17_MARK, PJ17MD_010), PINMUX_DATA(SSL00_PJ17_MARK, PJ17MD_011), PINMUX_DATA(TIOC0B_MARK, PJ17MD_100), PINMUX_DATA(SIOFSYNC_MARK, PJ17MD_101), PINMUX_DATA(PJ16_DATA, PJ16MD_000), PINMUX_DATA(DV_DATA16_MARK, PJ16MD_001), - PINMUX_DATA(LCD_DATA16_MARK, PJ16MD_010), + PINMUX_DATA(LCD_DATA16_PJ16_MARK, PJ16MD_010), PINMUX_DATA(RSPCK0_PJ16_MARK, PJ16MD_011), PINMUX_DATA(TIOC0A_MARK, PJ16MD_100), PINMUX_DATA(SIOFSCK_MARK, PJ16MD_101), PINMUX_DATA(PJ15_DATA, PJ15MD_000), PINMUX_DATA(DV_DATA15_MARK, PJ15MD_001), - PINMUX_DATA(LCD_DATA15_MARK, PJ15MD_010), + PINMUX_DATA(LCD_DATA15_PJ15_MARK, PJ15MD_010), PINMUX_DATA(PINT7_PJ_MARK, PJ15MD_011), PINMUX_DATA(PWM2H_MARK, PJ15MD_100), PINMUX_DATA(TXD7_MARK, PJ15MD_101), PINMUX_DATA(PJ14_DATA, PJ14MD_000), PINMUX_DATA(DV_DATA14_MARK, PJ14MD_001), - PINMUX_DATA(LCD_DATA14_MARK, PJ14MD_010), + PINMUX_DATA(LCD_DATA14_PJ14_MARK, PJ14MD_010), PINMUX_DATA(PINT6_PJ_MARK, PJ14MD_011), PINMUX_DATA(PWM2G_MARK, PJ14MD_100), PINMUX_DATA(TXD6_MARK, PJ14MD_101), PINMUX_DATA(PJ13_DATA, PJ13MD_000), PINMUX_DATA(DV_DATA13_MARK, PJ13MD_001), - PINMUX_DATA(LCD_DATA13_MARK, PJ13MD_010), + PINMUX_DATA(LCD_DATA13_PJ13_MARK, PJ13MD_010), PINMUX_DATA(PINT5_PJ_MARK, PJ13MD_011), PINMUX_DATA(PWM2F_MARK, PJ13MD_100), PINMUX_DATA(TXD5_MARK, PJ13MD_101), PINMUX_DATA(PJ12_DATA, PJ12MD_000), PINMUX_DATA(DV_DATA12_MARK, PJ12MD_001), - PINMUX_DATA(LCD_DATA12_MARK, PJ12MD_010), + PINMUX_DATA(LCD_DATA12_PJ12_MARK, PJ12MD_010), PINMUX_DATA(PINT4_PJ_MARK, PJ12MD_011), PINMUX_DATA(PWM2E_MARK, PJ12MD_100), PINMUX_DATA(SCK7_MARK, PJ12MD_101), PINMUX_DATA(PJ11_DATA, PJ11MD_000), PINMUX_DATA(DV_DATA11_MARK, PJ11MD_001), - PINMUX_DATA(LCD_DATA11_MARK, PJ11MD_010), + PINMUX_DATA(LCD_DATA11_PJ11_MARK, PJ11MD_010), PINMUX_DATA(PINT3_PJ_MARK, PJ11MD_011), PINMUX_DATA(PWM2D_MARK, PJ11MD_100), PINMUX_DATA(SCK6_MARK, PJ11MD_101), PINMUX_DATA(PJ10_DATA, PJ10MD_000), PINMUX_DATA(DV_DATA10_MARK, PJ10MD_001), - PINMUX_DATA(LCD_DATA10_MARK, PJ10MD_010), + PINMUX_DATA(LCD_DATA10_PJ10_MARK, PJ10MD_010), PINMUX_DATA(PINT2_PJ_MARK, PJ10MD_011), PINMUX_DATA(PWM2C_MARK, PJ10MD_100), PINMUX_DATA(SCK5_MARK, PJ10MD_101), PINMUX_DATA(PJ9_DATA, PJ9MD_000), PINMUX_DATA(DV_DATA9_MARK, PJ9MD_001), - PINMUX_DATA(LCD_DATA9_MARK, PJ9MD_010), + PINMUX_DATA(LCD_DATA9_PJ9_MARK, PJ9MD_010), PINMUX_DATA(PINT1_PJ_MARK, PJ9MD_011), PINMUX_DATA(PWM2B_MARK, PJ9MD_100), PINMUX_DATA(RTS5_MARK, PJ9MD_101), PINMUX_DATA(PJ8_DATA, PJ8MD_000), PINMUX_DATA(DV_DATA8_MARK, PJ8MD_001), - PINMUX_DATA(LCD_DATA8_MARK, PJ8MD_010), + PINMUX_DATA(LCD_DATA8_PJ8_MARK, PJ8MD_010), PINMUX_DATA(PINT0_PJ_MARK, PJ8MD_011), PINMUX_DATA(PWM2A_MARK, PJ8MD_100), PINMUX_DATA(CTS5_MARK, PJ8MD_101), PINMUX_DATA(PJ7_DATA, PJ7MD_000), PINMUX_DATA(DV_DATA7_MARK, PJ7MD_001), - PINMUX_DATA(LCD_DATA7_MARK, PJ7MD_010), + PINMUX_DATA(LCD_DATA7_PJ7_MARK, PJ7MD_010), PINMUX_DATA(SD_D2_MARK, PJ7MD_011), PINMUX_DATA(PWM1H_MARK, PJ7MD_100), PINMUX_DATA(PJ6_DATA, PJ6MD_000), PINMUX_DATA(DV_DATA6_MARK, PJ6MD_001), - PINMUX_DATA(LCD_DATA6_MARK, PJ6MD_010), + PINMUX_DATA(LCD_DATA6_PJ6_MARK, PJ6MD_010), PINMUX_DATA(SD_D3_MARK, PJ6MD_011), PINMUX_DATA(PWM1G_MARK, PJ6MD_100), PINMUX_DATA(PJ5_DATA, PJ5MD_000), PINMUX_DATA(DV_DATA5_MARK, PJ5MD_001), - PINMUX_DATA(LCD_DATA5_MARK, PJ5MD_010), + PINMUX_DATA(LCD_DATA5_PJ5_MARK, PJ5MD_010), PINMUX_DATA(SD_CMD_MARK, PJ5MD_011), PINMUX_DATA(PWM1F_MARK, PJ5MD_100), PINMUX_DATA(PJ4_DATA, PJ4MD_000), PINMUX_DATA(DV_DATA4_MARK, PJ4MD_001), - PINMUX_DATA(LCD_DATA4_MARK, PJ4MD_010), + PINMUX_DATA(LCD_DATA4_PJ4_MARK, PJ4MD_010), PINMUX_DATA(SD_CLK_MARK, PJ4MD_011), PINMUX_DATA(PWM1E_MARK, PJ4MD_100), PINMUX_DATA(PJ3_DATA, PJ3MD_000), PINMUX_DATA(DV_DATA3_MARK, PJ3MD_001), - PINMUX_DATA(LCD_DATA3_MARK, PJ3MD_010), + PINMUX_DATA(LCD_DATA3_PJ3_MARK, PJ3MD_010), PINMUX_DATA(SD_D0_MARK, PJ3MD_011), PINMUX_DATA(PWM1D_MARK, PJ3MD_100), PINMUX_DATA(PJ2_DATA, PJ2MD_000), PINMUX_DATA(DV_DATA2_MARK, PJ2MD_001), - PINMUX_DATA(LCD_DATA2_MARK, PJ2MD_010), + PINMUX_DATA(LCD_DATA2_PJ2_MARK, PJ2MD_010), PINMUX_DATA(SD_D1_MARK, PJ2MD_011), PINMUX_DATA(PWM1C_MARK, PJ2MD_100), PINMUX_DATA(PJ1_DATA, PJ1MD_000), PINMUX_DATA(DV_DATA1_MARK, PJ1MD_001), - PINMUX_DATA(LCD_DATA1_MARK, PJ1MD_010), + PINMUX_DATA(LCD_DATA1_PJ1_MARK, PJ1MD_010), PINMUX_DATA(SD_WP_MARK, PJ1MD_011), PINMUX_DATA(PWM1B_MARK, PJ1MD_100), PINMUX_DATA(PJ0_DATA, PJ0MD_000), PINMUX_DATA(DV_DATA0_MARK, PJ0MD_001), - PINMUX_DATA(LCD_DATA0_MARK, PJ0MD_010), + PINMUX_DATA(LCD_DATA0_PJ0_MARK, PJ0MD_010), PINMUX_DATA(SD_CD_MARK, PJ0MD_011), PINMUX_DATA(PWM1A_MARK, PJ0MD_100), }; @@ -1877,30 +1891,55 @@ static struct pinmux_gpio pinmux_gpios[] = { PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK), PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA23, LCD_DATA23_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA22, LCD_DATA22_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA21, LCD_DATA21_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA20, LCD_DATA20_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA19, LCD_DATA19_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA18, LCD_DATA18_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA17, LCD_DATA17_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA16, LCD_DATA16_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK), + + PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK), PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK), }; diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 0dc1f5786081..11c6c9603e71 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -502,12 +502,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) { int ret; - if (current->personality == PER_LINUX32 && - personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 6026fdd1b2ed..d58edf5fefdb 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2020,6 +2020,9 @@ EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP unsigned long vmemmap_table[VMEMMAP_SIZE]; +static long __meminitdata addr_start, addr_end; +static int __meminitdata node_start; + int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) { unsigned long vstart = (unsigned long) start; @@ -2050,15 +2053,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) *vmem_pp = pte_base | __pa(block); - printk(KERN_INFO "[%p-%p] page_structs=%lu " - "node=%d entry=%lu/%lu\n", start, block, nr, - node, - addr >> VMEMMAP_CHUNK_SHIFT, - VMEMMAP_SIZE); + /* check to see if we have contiguous blocks */ + if (addr_end != addr || node_start != node) { + if (addr_start) + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = addr; + node_start = node; + } + addr_end = addr + VMEMMAP_CHUNK; } } return 0; } + +void __meminit vmemmap_populate_print_last(void) +{ + if (addr_start) { + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = 0; + addr_end = 0; + node_start = 0; + } +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index 64effdc6da94..b2e91d40a4cb 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -194,6 +194,11 @@ RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID * boundary violation will require three middle nodes. */ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); +/* When we populate back during bootup, the amount of pages can vary. The + * max we have is seen is 395979, but that does not mean it can't be more. + * But some machines can have 3GB I/O holes even. So lets reserve enough + * for 4GB of I/O and E820 holes. */ +RESERVE_BRK(p2m_populated, PMD_SIZE * 4); static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index ac7034129f3f..d5fdd36190cc 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -69,7 +69,9 @@ static const struct acpi_device_id ac_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, ac_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_ac_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); static struct acpi_driver acpi_ac_driver = { @@ -313,6 +315,7 @@ static int acpi_ac_add(struct acpi_device *device) return result; } +#ifdef CONFIG_PM_SLEEP static int acpi_ac_resume(struct device *dev) { struct acpi_ac *ac; @@ -332,6 +335,7 @@ static int acpi_ac_resume(struct device *dev) kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); return 0; } +#endif static int acpi_ac_remove(struct acpi_device *device, int type) { diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index ff2c876ec412..45e3e1759fb8 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -1052,6 +1052,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP /* this is needed to learn about changes made in suspended state */ static int acpi_battery_resume(struct device *dev) { @@ -1068,6 +1069,7 @@ static int acpi_battery_resume(struct device *dev) acpi_battery_update(battery); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume); diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 79d4c22f7a6d..314a3b84bbc7 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -78,7 +78,9 @@ static int acpi_button_add(struct acpi_device *device); static int acpi_button_remove(struct acpi_device *device, int type); static void acpi_button_notify(struct acpi_device *device, u32 event); +#ifdef CONFIG_PM_SLEEP static int acpi_button_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); static struct acpi_driver acpi_button_driver = { @@ -310,6 +312,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int acpi_button_resume(struct device *dev) { struct acpi_device *device = to_acpi_device(dev); @@ -319,6 +322,7 @@ static int acpi_button_resume(struct device *dev) return acpi_lid_send_state(device); return 0; } +#endif static int acpi_button_add(struct acpi_device *device) { diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index 669d9ee80d16..bc36a476f1ab 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -53,8 +53,10 @@ static const struct acpi_device_id fan_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, fan_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_fan_suspend(struct device *dev); static int acpi_fan_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume); static struct acpi_driver acpi_fan_driver = { @@ -184,6 +186,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_fan_suspend(struct device *dev) { if (!dev) @@ -207,6 +210,7 @@ static int acpi_fan_resume(struct device *dev) return result; } +#endif static int __init acpi_fan_init(void) { diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 215ecd097408..fc1803414629 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -67,7 +67,9 @@ static const struct acpi_device_id power_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, power_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_power_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume); static struct acpi_driver acpi_power_driver = { @@ -775,6 +777,7 @@ static int acpi_power_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_power_resume(struct device *dev) { int result = 0, state; @@ -803,6 +806,7 @@ static int acpi_power_resume(struct device *dev) return result; } +#endif int __init acpi_power_init(void) { diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index c0b9aa5faf4c..ff0740e0a9c2 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c @@ -988,6 +988,7 @@ static void acpi_sbs_rmdirs(void) #endif } +#ifdef CONFIG_PM_SLEEP static int acpi_sbs_resume(struct device *dev) { struct acpi_sbs *sbs; @@ -997,6 +998,7 @@ static int acpi_sbs_resume(struct device *dev) acpi_sbs_callback(sbs); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume); diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 9fe90e9fecb5..edda74a43406 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -106,7 +106,9 @@ static const struct acpi_device_id thermal_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, thermal_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_thermal_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume); static struct acpi_driver acpi_thermal_driver = { @@ -1041,6 +1043,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_thermal_resume(struct device *dev) { struct acpi_thermal *tz; @@ -1075,6 +1078,7 @@ static int acpi_thermal_resume(struct device *dev) return AE_OK; } +#endif static int thermal_act(const struct dmi_system_id *d) { diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index d4386019af5d..96cce6d53195 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev) { printk(DEV_LABEL " (itf %d): can't set up page mapping\n", dev->number); - return error; + return -ENOMEM; } IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n", dev->number, iadev->pci->revision, base, iadev->irq);) diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 869d7ff2227f..eb78e9640c4a 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c @@ -169,8 +169,7 @@ void pm_clk_init(struct device *dev) */ int pm_clk_create(struct device *dev) { - int ret = dev_pm_get_subsys_data(dev); - return ret < 0 ? ret : 0; + return dev_pm_get_subsys_data(dev); } /** diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index a14085cc613f..39c32529b833 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -24,7 +24,6 @@ int dev_pm_get_subsys_data(struct device *dev) { struct pm_subsys_data *psd; - int ret = 0; psd = kzalloc(sizeof(*psd), GFP_KERNEL); if (!psd) @@ -40,7 +39,6 @@ int dev_pm_get_subsys_data(struct device *dev) dev->power.subsys_data = psd; pm_clk_init(dev); psd = NULL; - ret = 1; } spin_unlock_irq(&dev->power.lock); @@ -48,7 +46,7 @@ int dev_pm_get_subsys_data(struct device *dev) /* kfree() verifies that its argument is nonzero. */ kfree(psd); - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 11b32d2642df..a6e5672c67e7 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c @@ -272,6 +272,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, { 0, }, }; diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index 26823d97fd9f..9ea4627dc0c2 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c @@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus) /* for these chips OTP is always available */ present = true; break; - + case BCMA_CHIP_ID_BCM43228: + present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT; + break; default: present = false; break; diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index 57226424690c..6f007b6c240d 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h @@ -239,16 +239,45 @@ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 -#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402 #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412 -#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406 #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416 -#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a -#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */ -#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a +#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A #endif diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 9ed92ef5829b..08fc5cbb13cd 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -1502,15 +1502,73 @@ static const struct intel_gtt_driver_description { "Haswell", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, "Haswell", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, "Haswell", &sandybridge_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, "Haswell", &sandybridge_gtt_driver }, - { PCI_DEVICE_ID_INTEL_HASWELL_SDV, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, + "Haswell", &sandybridge_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, "Haswell", &sandybridge_gtt_driver }, { 0, NULL, NULL } }; diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index d706bd0e9e80..4fbdceb6f773 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -160,7 +160,7 @@ static int __exit omap_rng_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int omap_rng_suspend(struct device *dev) { diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 89682fa8801e..c4be3519a587 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -807,6 +807,7 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); #endif +#ifdef CONFIG_PM_SLEEP static int tpm_tis_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); @@ -816,6 +817,7 @@ static int tpm_tis_resume(struct device *dev) return tpm_pm_resume(dev); } +#endif static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c index cdc02ac8f41a..503996a94a6a 100644 --- a/drivers/cpufreq/pcc-cpufreq.c +++ b/drivers/cpufreq/pcc-cpufreq.c @@ -454,6 +454,7 @@ static int __init pcc_cpufreq_probe(void) mem_resource->address_length); if (pcch_virt_addr == NULL) { pr_debug("probe: could not map shared mem region\n"); + ret = -ENOMEM; goto out_free; } pcch_hdr = pcch_virt_addr; diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index fcfeb3cd8d31..5084975d793c 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -172,7 +172,8 @@ struct imxdma_engine { struct device_dma_parameters dma_parms; struct dma_device dma_device; void __iomem *base; - struct clk *dma_clk; + struct clk *dma_ahb; + struct clk *dma_ipg; spinlock_t lock; struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; struct imxdma_channel channel[IMX_DMA_CHANNELS]; @@ -976,10 +977,20 @@ static int __init imxdma_probe(struct platform_device *pdev) return 0; } - imxdma->dma_clk = clk_get(NULL, "dma"); - if (IS_ERR(imxdma->dma_clk)) - return PTR_ERR(imxdma->dma_clk); - clk_enable(imxdma->dma_clk); + imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(imxdma->dma_ipg)) { + ret = PTR_ERR(imxdma->dma_ipg); + goto err_clk; + } + + imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(imxdma->dma_ahb)) { + ret = PTR_ERR(imxdma->dma_ahb); + goto err_clk; + } + + clk_prepare_enable(imxdma->dma_ipg); + clk_prepare_enable(imxdma->dma_ahb); /* reset DMA module */ imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); @@ -988,16 +999,14 @@ static int __init imxdma_probe(struct platform_device *pdev) ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); - kfree(imxdma); - return ret; + goto err_enable; } ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); free_irq(MX1_DMA_INT, NULL); - kfree(imxdma); - return ret; + goto err_enable; } } @@ -1094,7 +1103,10 @@ err_init: free_irq(MX1_DMA_INT, NULL); free_irq(MX1_DMA_ERR, NULL); } - +err_enable: + clk_disable_unprepare(imxdma->dma_ipg); + clk_disable_unprepare(imxdma->dma_ahb); +err_clk: kfree(imxdma); return ret; } @@ -1114,7 +1126,9 @@ static int __exit imxdma_remove(struct platform_device *pdev) free_irq(MX1_DMA_ERR, NULL); } - kfree(imxdma); + clk_disable_unprepare(imxdma->dma_ipg); + clk_disable_unprepare(imxdma->dma_ahb); + kfree(imxdma); return 0; } diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index d52dbc6c54ab..24acd711e032 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1119,15 +1119,21 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma *tdma = tdc->tdma; + int ret; dma_cookie_init(&tdc->dma_chan); tdc->config_init = false; - return 0; + ret = clk_prepare_enable(tdma->dma_clk); + if (ret < 0) + dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret); + return ret; } static void tegra_dma_free_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma *tdma = tdc->tdma; struct tegra_dma_desc *dma_desc; struct tegra_dma_sg_req *sg_req; @@ -1163,6 +1169,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) list_del(&sg_req->node); kfree(sg_req); } + clk_disable_unprepare(tdma->dma_clk); } /* Tegra20 specific DMA controller information */ @@ -1255,6 +1262,13 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) } } + /* Enable clock before accessing registers */ + ret = clk_prepare_enable(tdma->dma_clk); + if (ret < 0) { + dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); + goto err_pm_disable; + } + /* Reset DMA controller */ tegra_periph_reset_assert(tdma->dma_clk); udelay(2); @@ -1265,6 +1279,8 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); + clk_disable_unprepare(tdma->dma_clk); + INIT_LIST_HEAD(&tdma->dma_dev.channels); for (i = 0; i < cdata->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c index 150d9768811d..ae37181798b3 100644 --- a/drivers/gpio/gpio-em.c +++ b/drivers/gpio/gpio-em.c @@ -266,7 +266,7 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p) return 0; } -static void __devexit em_gio_irq_domain_cleanup(struct em_gio_priv *p) +static void em_gio_irq_domain_cleanup(struct em_gio_priv *p) { struct gpio_em_config *pdata = p->pdev->dev.platform_data; diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c index a1c8754f52cf..202a99207b7d 100644 --- a/drivers/gpio/gpio-langwell.c +++ b/drivers/gpio/gpio-langwell.c @@ -339,7 +339,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, resource_size_t start, len; struct lnw_gpio *lnw; u32 gpio_base; - int retval = 0; + int retval; int ngpio = id->driver_data; retval = pci_enable_device(pdev); @@ -357,6 +357,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, base = ioremap_nocache(start, len); if (!base) { dev_err(&pdev->dev, "error mapping bar1\n"); + retval = -EFAULT; goto err3; } gpio_base = *((u32 *)base + 1); @@ -381,8 +382,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio, &lnw_gpio_irq_ops, lnw); - if (!lnw->domain) + if (!lnw->domain) { + retval = -ENOMEM; goto err3; + } lnw->reg_base = base; lnw->chip.label = dev_name(&pdev->dev); diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c index 71a838f44501..b38986285868 100644 --- a/drivers/gpio/gpio-msic.c +++ b/drivers/gpio/gpio-msic.c @@ -99,7 +99,7 @@ static int msic_gpio_to_oreg(unsigned offset) if (offset < 20) return INTEL_MSIC_GPIO0HV0CTLO - offset + 16; - return INTEL_MSIC_GPIO1HV0CTLO + offset + 20; + return INTEL_MSIC_GPIO1HV0CTLO - offset + 20; } static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index 4db460b6ecf7..80f44bb64a87 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -465,9 +465,8 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev) goto out_iounmap; port->bgc.gc.to_irq = mxc_gpio_to_irq; - port->bgc.gc.base = pdev->id * 32; - port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir); - port->bgc.data = port->bgc.read_reg(port->bgc.reg_set); + port->bgc.gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 : + pdev->id * 32; err = gpiochip_add(&port->bgc.gc); if (err) diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 58a6a63a6ece..9cac88a65f78 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -62,6 +62,7 @@ int pxa_last_gpio; #ifdef CONFIG_OF static struct irq_domain *domain; +static struct device_node *pxa_gpio_of_node; #endif struct pxa_gpio_chip { @@ -277,6 +278,24 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value) (value ? GPSR_OFFSET : GPCR_OFFSET)); } +#ifdef CONFIG_OF_GPIO +static int pxa_gpio_of_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + if (gpiospec->args[0] > pxa_last_gpio) + return -EINVAL; + + if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip) + return -EINVAL; + + if (flags) + *flags = gpiospec->args[1]; + + return gpiospec->args[0] % 32; +} +#endif + static int __devinit pxa_init_gpio_chip(int gpio_end, int (*set_wake)(unsigned int, unsigned int)) { @@ -304,6 +323,11 @@ static int __devinit pxa_init_gpio_chip(int gpio_end, c->get = pxa_gpio_get; c->set = pxa_gpio_set; c->to_irq = pxa_gpio_to_irq; +#ifdef CONFIG_OF_GPIO + c->of_node = pxa_gpio_of_node; + c->of_xlate = pxa_gpio_of_xlate; + c->of_gpio_n_cells = 2; +#endif /* number of GPIOs on last bank may be less than 32 */ c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32; @@ -488,6 +512,7 @@ static int pxa_gpio_nums(void) return count; } +#ifdef CONFIG_OF static struct of_device_id pxa_gpio_dt_ids[] = { { .compatible = "mrvl,pxa-gpio" }, { .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO }, @@ -505,9 +530,9 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq, const struct irq_domain_ops pxa_irq_domain_ops = { .map = pxa_irq_domain_map, + .xlate = irq_domain_xlate_twocell, }; -#ifdef CONFIG_OF static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev) { int ret, nr_banks, nr_gpios, irq_base; @@ -545,6 +570,7 @@ static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev) } domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0, &pxa_irq_domain_ops, NULL); + pxa_gpio_of_node = np; return 0; err: iounmap(gpio_reg_base); @@ -653,7 +679,7 @@ static struct platform_driver pxa_gpio_driver = { .probe = pxa_gpio_probe, .driver = { .name = "pxa-gpio", - .of_match_table = pxa_gpio_dt_ids, + .of_match_table = of_match_ptr(pxa_gpio_dt_ids), }, }; diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 92f7b2bb79d4..ba126cc04073 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c @@ -2454,12 +2454,6 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = { }, }, { .chip = { - .base = EXYNOS5_GPC4(0), - .ngpio = EXYNOS5_GPIO_C4_NR, - .label = "GPC4", - }, - }, { - .chip = { .base = EXYNOS5_GPD0(0), .ngpio = EXYNOS5_GPIO_D0_NR, .label = "GPD0", @@ -2513,6 +2507,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = { .label = "GPY6", }, }, { + .chip = { + .base = EXYNOS5_GPC4(0), + .ngpio = EXYNOS5_GPIO_C4_NR, + .label = "GPC4", + }, + }, { .config = &samsung_gpio_cfgs[9], .irq_base = IRQ_EINT(0), .chip = { @@ -2836,7 +2836,7 @@ static __init void exynos5_gpiolib_init(void) } /* need to set base address for gpc4 */ - exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; + exynos5_gpios_1[20].base = gpio_base1 + 0x2E0; /* need to set base address for gpx */ chip = &exynos5_gpios_1[21]; diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index 424dce8e3f30..8707d4572a06 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -241,7 +241,8 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev) break; default: - return -ENODEV; + err = -ENODEV; + goto err_sch_gpio_core; } sch_gpio_core.dev = &pdev->dev; diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 66d4a28ad5a2..0303935d10e2 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -119,7 +119,7 @@ static int edid_load(struct drm_connector *connector, char *name, { const struct firmware *fw; struct platform_device *pdev; - u8 *fwdata = NULL, *edid; + u8 *fwdata = NULL, *edid, *new_edid; int fwsize, expected; int builtin = 0, err = 0; int i, valid_extensions = 0; @@ -195,12 +195,14 @@ static int edid_load(struct drm_connector *connector, char *name, "\"%s\" for connector \"%s\"\n", valid_extensions, edid[0x7e], name, connector_name); edid[0x7e] = valid_extensions; - edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, + new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); - if (edid == NULL) { + if (new_edid == NULL) { err = -ENOMEM; + kfree(edid); goto relfw_out; } + edid = new_edid; } connector->display_info.raw_edid = edid; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ed22612bc847..a24ffbe97c01 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -346,11 +346,40 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ + INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ + INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ - INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */ + INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ + INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ + INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ + INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */ + INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ + INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ + INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */ + INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ + INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ + INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */ + INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ + INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ + INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */ + INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ + INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ + INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */ + INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ + INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ + INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ + INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */ + INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */ + INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */ + INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ + INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index da8b01fb1bf8..a9d58d72bb4d 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -451,7 +451,6 @@ int i915_switch_context(struct intel_ring_buffer *ring, struct drm_i915_file_private *file_priv = NULL; struct i915_hw_context *to; struct drm_i915_gem_object *from_obj = ring->last_context_obj; - int ret; if (dev_priv->hw_contexts_disabled) return 0; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 5af631e788c8..ff2819ea0813 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -291,6 +291,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, target_i915_obj = to_intel_bo(target_obj); target_offset = target_i915_obj->gtt_offset; + /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and + * pipe_control writes because the gpu doesn't properly redirect them + * through the ppgtt for non_secure batchbuffers. */ + if (unlikely(IS_GEN6(dev) && + reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && + !target_i915_obj->has_global_gtt_mapping)) { + i915_gem_gtt_bind_object(target_i915_obj, + target_i915_obj->cache_level); + } + /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ @@ -399,16 +409,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, io_mapping_unmap_atomic(reloc_page); } - /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and - * pipe_control writes because the gpu doesn't properly redirect them - * through the ppgtt for non_secure batchbuffers. */ - if (unlikely(IS_GEN6(dev) && - reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - !target_i915_obj->has_global_gtt_mapping)) { - i915_gem_gtt_bind_object(target_i915_obj, - target_i915_obj->cache_level); - } - /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 9fd25a435536..ee9b68f6bc36 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -361,7 +361,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->mm.gtt->needs_dmar) + /* don't map imported dma buf objects */ + if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table) return intel_gtt_map_memory(obj->pages, obj->base.size >> PAGE_SHIFT, &obj->sg_list, diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 2f5388af8df9..7631807a2788 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -32,6 +32,7 @@ #include "intel_drv.h" #include "i915_drv.h" +#ifdef CONFIG_PM static u32 calc_residency(struct drm_device *dev, const u32 reg) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -224,3 +225,14 @@ void i915_teardown_sysfs(struct drm_device *dev) device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); } +#else +void i915_setup_sysfs(struct drm_device *dev) +{ + return; +} + +void i915_teardown_sysfs(struct drm_device *dev) +{ + return; +} +#endif /* CONFIG_PM */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f6159765f1eb..a69a3d0d3acf 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -869,6 +869,7 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, unsigned long bestppm, ppm, absppm; int dotclk, flag; + flag = 0; dotclk = target * 1000; bestppm = 1000000; ppm = absppm = 0; @@ -3753,17 +3754,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, continue; } - if (intel_encoder->type == INTEL_OUTPUT_EDP) { - /* Use VBT settings if we have an eDP panel */ - unsigned int edp_bpc = dev_priv->edp.bpp / 3; - - if (edp_bpc < display_bpc) { - DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); - display_bpc = edp_bpc; - } - continue; - } - /* Not one of the known troublemakers, check the EDID */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 0a56b9ab0f58..a6c426afaa7a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1174,10 +1174,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp) WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); pp = ironlake_get_pp_control(dev_priv); - pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); + /* We need to switch off panel power _and_ force vdd, for otherwise some + * panels get very unhappy and cease to work. */ + pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); + intel_dp->want_panel_vdd = false; + ironlake_wait_panel_off(intel_dp); } @@ -1287,11 +1291,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder) * ensure that we have vdd while we switch off the panel. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - ironlake_edp_panel_off(intel_dp); - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + ironlake_edp_panel_off(intel_dp); intel_dp_link_down(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, false); } static void intel_dp_commit(struct drm_encoder *encoder) @@ -1326,11 +1328,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) /* Switching the panel off requires vdd. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - ironlake_edp_panel_off(intel_dp); - intel_dp_sink_dpms(intel_dp, mode); + ironlake_edp_panel_off(intel_dp); intel_dp_link_down(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, false); if (is_cpu_edp(intel_dp)) ironlake_edp_pll_off(encoder); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 84353559441c..132ab511b90c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -46,15 +46,16 @@ }) #define wait_for_atomic_us(COND, US) ({ \ - int i, ret__ = -ETIMEDOUT; \ - for (i = 0; i < (US); i++) { \ - if ((COND)) { \ - ret__ = 0; \ - break; \ - } \ - udelay(1); \ - } \ - ret__; \ + unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \ + int ret__ = 0; \ + while (!(COND)) { \ + if (time_after(jiffies, timeout__)) { \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + cpu_relax(); \ + } \ + ret__; \ }) #define wait_for(COND, MS) _wait_for(COND, MS, 1) @@ -380,7 +381,6 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern u32 intel_panel_get_max_backlight(struct drm_device *dev); -extern u32 intel_panel_get_backlight(struct drm_device *dev); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern int intel_panel_setup_backlight(struct drm_device *dev); extern void intel_panel_enable_backlight(struct drm_device *dev, diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 1991a4408cf9..b9755f6378d8 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -486,9 +486,6 @@ int intel_setup_gmbus(struct drm_device *dev) bus->dev_priv = dev_priv; bus->adapter.algo = &gmbus_algorithm; - ret = i2c_add_adapter(&bus->adapter); - if (ret) - goto err; /* By default use a conservative clock rate */ bus->reg0 = port | GMBUS_RATE_100KHZ; @@ -498,6 +495,10 @@ int intel_setup_gmbus(struct drm_device *dev) bus->force_bit = true; intel_gpio_setup(bus, port); + + ret = i2c_add_adapter(&bus->adapter); + if (ret) + goto err; } intel_i2c_reset(dev_priv->dev); @@ -540,9 +541,6 @@ void intel_teardown_gmbus(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - if (dev_priv->gmbus == NULL) - return; - for (i = 0; i < GMBUS_NUM_PORTS; i++) { struct intel_gmbus *bus = &dev_priv->gmbus[i]; i2c_del_adapter(&bus->adapter); diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 10c7d39034e1..3df4f5fa892a 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -213,7 +213,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) return val; } -u32 intel_panel_get_backlight(struct drm_device *dev) +static u32 intel_panel_get_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 val; @@ -311,9 +311,6 @@ void intel_panel_enable_backlight(struct drm_device *dev, if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); - dev_priv->backlight_enabled = true; - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); - if (INTEL_INFO(dev)->gen >= 4) { uint32_t reg, tmp; @@ -326,7 +323,7 @@ void intel_panel_enable_backlight(struct drm_device *dev, * we don't track the backlight dpms state, hence check whether * we have to do anything first. */ if (tmp & BLM_PWM_ENABLE) - return; + goto set_level; if (dev_priv->num_pipe == 3) tmp &= ~BLM_PIPE_SELECT_IVB; @@ -347,6 +344,14 @@ void intel_panel_enable_backlight(struct drm_device *dev, I915_WRITE(BLC_PWM_PCH_CTL1, tmp); } } + +set_level: + /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. + * BLC_PWM_CPU_CTL may be cleared to zero automatically when these + * registers are set. + */ + dev_priv->backlight_enabled = true; + intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); } static void intel_panel_init_backlight(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 94aabcaa3a67..58c07cdafb7e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3963,6 +3963,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE, 1); + POSTING_READ(FORCEWAKE); if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); @@ -3983,6 +3984,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); + POSTING_READ(FORCEWAKE_MT); if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); @@ -4018,14 +4020,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); - /* The below doubles as a POSTING_READ */ + POSTING_READ(FORCEWAKE); gen6_gt_check_fifodbg(dev_priv); } static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); - /* The below doubles as a POSTING_READ */ + POSTING_READ(FORCEWAKE_MT); gen6_gt_check_fifodbg(dev_priv); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index bf0195a96d53..e2a73b38abe9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -227,31 +227,36 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, * number of bits based on the write domains has little performance * impact. */ - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_TLB_INVALIDATE; - flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; - flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; - /* - * Ensure that any following seqno writes only happen when the render - * cache is indeed flushed (but only if the caller actually wants that). - */ - if (flush_domains) + if (flush_domains) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + /* + * Ensure that any following seqno writes only happen + * when the render cache is indeed flushed. + */ flags |= PIPE_CONTROL_CS_STALL; + } + if (invalidate_domains) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE; + } - ret = intel_ring_begin(ring, 6); + ret = intel_ring_begin(ring, 4); if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); intel_ring_emit(ring, flags); intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, 0); /* lower dword */ - intel_ring_emit(ring, 0); /* uppwer dword */ - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, 0); intel_ring_advance(ring); return 0; @@ -289,8 +294,6 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); - /* Initialize the ring. */ - I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ @@ -316,6 +319,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) } } + /* Initialize the ring. This must happen _after_ we've cleared the ring + * registers with the above sequence (the readback of the HEAD registers + * also enforces ordering), otherwise the hw might lose the new ring + * register values. */ + I915_WRITE_START(ring, obj->gtt_offset); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 26a6a4d0d078..d172e9873131 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -444,13 +444,16 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, struct i2c_msg *msgs; int i, ret = true; + /* Would be simpler to allocate both in one go ? */ buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL); if (!buf) return false; msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL); - if (!msgs) + if (!msgs) { + kfree(buf); return false; + } intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index a4d7c500c97b..b69642d5d850 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -468,10 +468,11 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) { unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta; - unsigned int testr, testn, testm, testo; + int testr, testn, testm, testo; unsigned int p, m, n; - unsigned int computed; + unsigned int computed, vco; int tmp; + const unsigned int m_div_val[] = { 1, 2, 4, 8 }; m = n = p = 0; vcomax = 1488000; @@ -490,12 +491,13 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) if (delta == 0) break; for (testo = 5; testo < 33; testo++) { - computed = pllreffreq * (testn + 1) / + vco = pllreffreq * (testn + 1) / (testr + 1); - if (computed < vcomin) + if (vco < vcomin) continue; - if (computed > vcomax) + if (vco > vcomax) continue; + computed = vco / (m_div_val[testm] * (testo + 1)); if (computed > clock) tmpdelta = computed - clock; else diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index 77e564667b5c..240cf962c999 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -229,7 +229,7 @@ nouveau_i2c_init(struct drm_device *dev) } break; case 6: /* NV50- DP AUX */ - port->drive = entry[0]; + port->drive = entry[0] & 0x0f; port->sense = port->drive; port->adapter.algo = &nouveau_dp_i2c_algo; break; diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 1cdfd6e757ce..1866dbb49979 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c @@ -731,7 +731,6 @@ nouveau_card_init(struct drm_device *dev) case 0xa3: case 0xa5: case 0xa8: - case 0xaf: nva3_copy_create(dev); break; } diff --git a/drivers/gpu/drm/nouveau/nv84_fifo.c b/drivers/gpu/drm/nouveau/nv84_fifo.c index cc82d799fc3b..c564c5e4c30a 100644 --- a/drivers/gpu/drm/nouveau/nv84_fifo.c +++ b/drivers/gpu/drm/nouveau/nv84_fifo.c @@ -117,17 +117,22 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned long flags; + u32 save; /* remove channel from playlist, will context switch if active */ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000); nv50_fifo_playlist_update(dev); + save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); + /* tell any engines on this channel to unload their contexts */ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); + nv_wr32(dev, 0x002520, save); + nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); @@ -184,10 +189,13 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv84_fifo_priv *priv = nv_engine(dev, engine); int i; + u32 save; /* set playlist length to zero, fifo will unload context */ nv_wr32(dev, 0x0032ec, 0); + save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); + /* tell all connected engines to unload their contexts */ for (i = 0; i < priv->base.channels; i++) { struct nouveau_channel *chan = dev_priv->channels.ptr[i]; @@ -199,6 +207,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend) } } + nv_wr32(dev, 0x002520, save); nv_wr32(dev, 0x002140, 0); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c index 7c95c44e2887..4e712b10ebdb 100644 --- a/drivers/gpu/drm/nouveau/nvc0_pm.c +++ b/drivers/gpu/drm/nouveau/nvc0_pm.c @@ -557,7 +557,7 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info) nouveau_mem_exec(&exec, info->perflvl); if (dev_priv->chipset < 0xd0) - nv_wr32(dev, 0x611200, 0x00003300); + nv_wr32(dev, 0x611200, 0x00003330); else nv_wr32(dev, 0x62c000, 0x03030300); } diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c index d0d60e1e7f95..dac525b2994e 100644 --- a/drivers/gpu/drm/nouveau/nvd0_display.c +++ b/drivers/gpu/drm/nouveau/nvd0_display.c @@ -790,7 +790,7 @@ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); int ch = EVO_CURS(nv_crtc->index); - evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x); + evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff)); evo_piow(crtc->dev, ch, 0x0080, 0x00000000); return 0; } diff --git a/drivers/gpu/drm/nouveau/nve0_fifo.c b/drivers/gpu/drm/nouveau/nve0_fifo.c index 1855ecbd843b..e98d144e6eb9 100644 --- a/drivers/gpu/drm/nouveau/nve0_fifo.c +++ b/drivers/gpu/drm/nouveau/nve0_fifo.c @@ -294,6 +294,25 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit) printk(" on channel 0x%010llx\n", (u64)inst << 12); } +static int +nve0_fifo_page_flip(struct drm_device *dev, u32 chid) +{ + struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = NULL; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + if (likely(chid >= 0 && chid < priv->base.channels)) { + chan = dev_priv->channels.ptr[chid]; + if (likely(chan)) + ret = nouveau_finish_page_flip(chan, NULL); + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return ret; +} + static void nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) { @@ -303,11 +322,21 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f; u32 subc = (addr & 0x00070000); u32 mthd = (addr & 0x00003ffc); + u32 show = stat; + + if (stat & 0x00200000) { + if (mthd == 0x0054) { + if (!nve0_fifo_page_flip(dev, chid)) + show &= ~0x00200000; + } + } - NV_INFO(dev, "PSUBFIFO %d:", unit); - nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat); - NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n", - unit, chid, subc, mthd, data); + if (show) { + NV_INFO(dev, "PFIFO%d:", unit); + nouveau_bitfield_print(nve0_fifo_subfifo_intr, show); + NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n", + unit, chid, subc, mthd, data); + } nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008); nv_wr32(dev, 0x040108 + (unit * 0x2000), stat); diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 9e6f76fec527..c6fcb5b86a45 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -259,7 +259,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) /* adjust pm to dpms changes BEFORE enabling crtcs */ radeon_pm_compute_clocks(rdev); /* disable crtc pair power gating before programming */ - if (ASIC_IS_DCE6(rdev)) + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) atombios_powergate_crtc(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) @@ -279,7 +279,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; /* power gating is per-pair */ - if (ASIC_IS_DCE6(rdev)) { + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) { struct drm_crtc *other_crtc; struct radeon_crtc *other_radeon_crtc; list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) { @@ -1531,12 +1531,12 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) * crtc virtual pixel clock. */ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { - if (ASIC_IS_DCE5(rdev)) - return ATOM_DCPLL; + if (rdev->clock.dp_extclk) + return ATOM_PPLL_INVALID; else if (ASIC_IS_DCE6(rdev)) return ATOM_PPLL0; - else if (rdev->clock.dp_extclk) - return ATOM_PPLL_INVALID; + else if (ASIC_IS_DCE5(rdev)) + return ATOM_DCPLL; } } } @@ -1635,18 +1635,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + radeon_crtc->in_mode_set = true; /* pick pll */ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); + /* disable crtc pair power gating before programming */ + if (ASIC_IS_DCE6(rdev)) + atombios_powergate_crtc(crtc, ATOM_DISABLE); + atombios_lock_crtc(crtc, ATOM_ENABLE); atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void atombios_crtc_commit(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); atombios_lock_crtc(crtc, ATOM_DISABLE); + radeon_crtc->in_mode_set = false; } static void atombios_crtc_disable(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e585a3b947eb..e93b80a6d4e9 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -1229,24 +1229,8 @@ void evergreen_agp_enable(struct radeon_device *rdev) void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) { - save->vga_control[0] = RREG32(D1VGA_CONTROL); - save->vga_control[1] = RREG32(D2VGA_CONTROL); save->vga_render_control = RREG32(VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); - save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); - save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - if (rdev->num_crtc >= 4) { - save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); - save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); - save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); - save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); - } - if (rdev->num_crtc >= 6) { - save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); - save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); - save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); - save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); - } /* Stop all video */ WREG32(VGA_RENDER_CONTROL, 0); @@ -1357,47 +1341,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s /* Unlock host access */ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); - /* Restore video state */ - WREG32(D1VGA_CONTROL, save->vga_control[0]); - WREG32(D2VGA_CONTROL, save->vga_control[1]); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); - WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); - WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); - } - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); - } WREG32(VGA_RENDER_CONTROL, save->vga_render_control); } @@ -1986,10 +1929,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) rdev->config.evergreen.tile_config |= 1 << 4; else { - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.evergreen.tile_config |= 1 << 4; - else + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.evergreen.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.evergreen.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.evergreen.tile_config |= 2 << 4; + break; + } } rdev->config.evergreen.tile_config |= 0 << 8; rdev->config.evergreen.tile_config |= diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index c16554122ccd..e44a62a07fe3 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -788,6 +788,13 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p, case V_030000_SQ_TEX_DIM_1D_ARRAY: case V_030000_SQ_TEX_DIM_2D_ARRAY: depth = 1; + break; + case V_030000_SQ_TEX_DIM_2D_MSAA: + case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA: + surf.nsamples = 1 << llevel; + llevel = 0; + depth = 1; + break; case V_030000_SQ_TEX_DIM_3D: break; default: @@ -961,13 +968,15 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) if (track->db_dirty) { /* Check stencil buffer */ - if (G_028800_STENCIL_ENABLE(track->db_depth_control)) { + if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID && + G_028800_STENCIL_ENABLE(track->db_depth_control)) { r = evergreen_cs_track_validate_stencil(p); if (r) return r; } /* Check depth buffer */ - if (G_028800_Z_ENABLE(track->db_depth_control)) { + if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID && + G_028800_Z_ENABLE(track->db_depth_control)) { r = evergreen_cs_track_validate_depth(p); if (r) return r; diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index d3bd098e4e19..79347855d9bf 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -1277,6 +1277,8 @@ #define S_028044_FORMAT(x) (((x) & 0x1) << 0) #define G_028044_FORMAT(x) (((x) >> 0) & 0x1) #define C_028044_FORMAT 0xFFFFFFFE +#define V_028044_STENCIL_INVALID 0 +#define V_028044_STENCIL_8 1 #define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7) #define DB_Z_READ_BASE 0x28048 #define DB_STENCIL_READ_BASE 0x2804c diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 9945d86d9001..853800e8582f 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -574,10 +574,18 @@ static void cayman_gpu_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) rdev->config.cayman.tile_config |= 1 << 4; else { - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.cayman.tile_config |= 1 << 4; - else + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.cayman.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.cayman.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.cayman.tile_config |= 2 << 4; + break; + } } rdev->config.cayman.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 637280f541a3..d79c639ae739 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3789,3 +3789,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); } } + +/** + * r600_get_gpu_clock - return GPU clock counter snapshot + * + * @rdev: radeon_device pointer + * + * Fetches a GPU clock counter snapshot (R6xx-cayman). + * Returns the 64 bit clock counter snapshot. + */ +uint64_t r600_get_gpu_clock(struct radeon_device *rdev) +{ + uint64_t clock; + + mutex_lock(&rdev->gpu_clock_mutex); + WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&rdev->gpu_clock_mutex); + return clock; +} diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index ca87f7afaf23..3dab49cb1d4a 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -764,8 +764,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) } /* Check depth buffer */ - if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) || - G_028800_Z_ENABLE(track->db_depth_control))) { + if (track->db_dirty && + G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && + (G_028800_STENCIL_ENABLE(track->db_depth_control) || + G_028800_Z_ENABLE(track->db_depth_control))) { r = r600_cs_track_validate_db(p); if (r) return r; @@ -1557,13 +1559,14 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, u32 tiling_flags) { struct r600_cs_track *track = p->track; - u32 nfaces, llevel, blevel, w0, h0, d0; - u32 word0, word1, l0_size, mipmap_size, word2, word3; + u32 dim, nfaces, llevel, blevel, w0, h0, d0; + u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; u32 height_align, pitch, pitch_align, depth_align; - u32 array, barray, larray; + u32 barray, larray; u64 base_align; struct array_mode_checker array_check; u32 format; + bool is_array; /* on legacy kernel we don't perform advanced check */ if (p->rdev == NULL) @@ -1581,12 +1584,28 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); } word1 = radeon_get_ib_value(p, idx + 1); + word2 = radeon_get_ib_value(p, idx + 2) << 8; + word3 = radeon_get_ib_value(p, idx + 3) << 8; + word4 = radeon_get_ib_value(p, idx + 4); + word5 = radeon_get_ib_value(p, idx + 5); + dim = G_038000_DIM(word0); w0 = G_038000_TEX_WIDTH(word0) + 1; + pitch = (G_038000_PITCH(word0) + 1) * 8; h0 = G_038004_TEX_HEIGHT(word1) + 1; d0 = G_038004_TEX_DEPTH(word1); + format = G_038004_DATA_FORMAT(word1); + blevel = G_038010_BASE_LEVEL(word4); + llevel = G_038014_LAST_LEVEL(word5); + /* pitch in texels */ + array_check.array_mode = G_038000_TILE_MODE(word0); + array_check.group_size = track->group_size; + array_check.nbanks = track->nbanks; + array_check.npipes = track->npipes; + array_check.nsamples = 1; + array_check.blocksize = r600_fmt_get_blocksize(format); nfaces = 1; - array = 0; - switch (G_038000_DIM(word0)) { + is_array = false; + switch (dim) { case V_038000_SQ_TEX_DIM_1D: case V_038000_SQ_TEX_DIM_2D: case V_038000_SQ_TEX_DIM_3D: @@ -1599,29 +1618,25 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, break; case V_038000_SQ_TEX_DIM_1D_ARRAY: case V_038000_SQ_TEX_DIM_2D_ARRAY: - array = 1; + is_array = true; break; - case V_038000_SQ_TEX_DIM_2D_MSAA: case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: + is_array = true; + /* fall through */ + case V_038000_SQ_TEX_DIM_2D_MSAA: + array_check.nsamples = 1 << llevel; + llevel = 0; + break; default: dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); return -EINVAL; } - format = G_038004_DATA_FORMAT(word1); if (!r600_fmt_is_valid_texture(format, p->family)) { dev_warn(p->dev, "%s:%d texture invalid format %d\n", __func__, __LINE__, format); return -EINVAL; } - /* pitch in texels */ - pitch = (G_038000_PITCH(word0) + 1) * 8; - array_check.array_mode = G_038000_TILE_MODE(word0); - array_check.group_size = track->group_size; - array_check.nbanks = track->nbanks; - array_check.npipes = track->npipes; - array_check.nsamples = 1; - array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", @@ -1647,20 +1662,13 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, return -EINVAL; } - word2 = radeon_get_ib_value(p, idx + 2) << 8; - word3 = radeon_get_ib_value(p, idx + 3) << 8; - - word0 = radeon_get_ib_value(p, idx + 4); - word1 = radeon_get_ib_value(p, idx + 5); - blevel = G_038010_BASE_LEVEL(word0); - llevel = G_038014_LAST_LEVEL(word1); if (blevel > llevel) { dev_warn(p->dev, "texture blevel %d > llevel %d\n", blevel, llevel); } - if (array == 1) { - barray = G_038014_BASE_ARRAY(word1); - larray = G_038014_LAST_ARRAY(word1); + if (is_array) { + barray = G_038014_BASE_ARRAY(word5); + larray = G_038014_LAST_ARRAY(word5); nfaces = larray - barray + 1; } @@ -1677,7 +1685,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, return -EINVAL; } /* using get ib will give us the offset into the mipmap bo */ - word3 = radeon_get_ib_value(p, idx + 3) << 8; if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 4b116ae75fc2..fd328f4c3ea8 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -602,6 +602,9 @@ #define RLC_HB_WPTR 0x3f1c #define RLC_HB_WPTR_LSB_ADDR 0x3f14 #define RLC_HB_WPTR_MSB_ADDR 0x3f18 +#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38 +#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c +#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40 #define RLC_MC_CNTL 0x3f44 #define RLC_UCODE_CNTL 0x3f48 #define RLC_UCODE_ADDR 0x3f2c diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5431af292408..99304194a65c 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -300,6 +300,7 @@ struct radeon_bo_va { uint64_t soffset; uint64_t eoffset; uint32_t flags; + struct radeon_fence *fence; bool valid; }; @@ -1533,6 +1534,7 @@ struct radeon_device { unsigned debugfs_count; /* virtual memory */ struct radeon_vm_manager vm_manager; + struct mutex gpu_clock_mutex; }; int radeon_device_init(struct radeon_device *rdev, @@ -1733,11 +1735,11 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) -#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc)) -#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base)) -#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc)) -#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc)) -#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev)) +#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc)) +#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) +#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) +#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) +#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) /* Common functions */ /* AGP */ diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index f4af24310438..18c38d14c8cd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -255,13 +255,10 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev); * rv515 */ struct rv515_mc_save { - u32 d1vga_control; - u32 d2vga_control; u32 vga_render_control; u32 vga_hdp_control; - u32 d1crtc_control; - u32 d2crtc_control; }; + int rv515_init(struct radeon_device *rdev); void rv515_fini(struct radeon_device *rdev); uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); @@ -371,6 +368,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_sa_bo *vb); int r600_mc_wait_for_idle(struct radeon_device *rdev); +uint64_t r600_get_gpu_clock(struct radeon_device *rdev); /* * rv770,rv730,rv710,rv740 @@ -389,11 +387,10 @@ void r700_cp_fini(struct radeon_device *rdev); * evergreen */ struct evergreen_mc_save { - u32 vga_control[6]; u32 vga_render_control; u32 vga_hdp_control; - u32 crtc_control[6]; }; + void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); int evergreen_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); @@ -472,5 +469,6 @@ int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id); void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); +uint64_t si_get_gpu_clock(struct radeon_device *rdev); #endif diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index b1e3820df363..f9c21f9d16bc 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1263,6 +1263,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; }; bool radeon_atombios_sideport_present(struct radeon_device *rdev) @@ -1390,27 +1392,50 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); u16 data_offset, size; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; + union igp_info *igp_info; u8 frev, crev; u16 percentage = 0, rate = 0; /* get any igp specific overrides */ if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { - igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) + igp_info = (union igp_info *) (mode_info->atom_context->bios + data_offset); - switch (id) { - case ASIC_INTERNAL_SS_ON_TMDS: - percentage = le16_to_cpu(igp_info->usDVISSPercentage); - rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); + switch (crev) { + case 6: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz); + break; + } break; - case ASIC_INTERNAL_SS_ON_HDMI: - percentage = le16_to_cpu(igp_info->usHDMISSPercentage); - rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); + case 7: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz); + break; + } break; - case ASIC_INTERNAL_SS_ON_LVDS: - percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); - rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); + default: + DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); break; } if (percentage) diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 576f4f6919f2..f75247d42ffd 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -719,6 +719,34 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde return i2c; } +static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct radeon_i2c_bus_rec i2c; + u16 offset; + u8 id, blocks, clk, data; + int i; + + i2c.valid = false; + + offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); + if (offset) { + blocks = RBIOS8(offset + 2); + for (i = 0; i < blocks; i++) { + id = RBIOS8(offset + 3 + (i * 5) + 0); + if (id == 136) { + clk = RBIOS8(offset + 3 + (i * 5) + 3); + data = RBIOS8(offset + 3 + (i * 5) + 4); + /* gpiopad */ + i2c = combios_setup_i2c_bus(rdev, DDC_MONID, + (1 << clk), (1 << data)); + break; + } + } + } + return i2c; +} + void radeon_combios_i2c_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; @@ -755,30 +783,14 @@ void radeon_combios_i2c_init(struct radeon_device *rdev) } else if (rdev->family == CHIP_RS300 || rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { - u16 offset; - u8 id, blocks, clk, data; - int i; - /* 0x68 */ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); - offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); - if (offset) { - blocks = RBIOS8(offset + 2); - for (i = 0; i < blocks; i++) { - id = RBIOS8(offset + 3 + (i * 5) + 0); - if (id == 136) { - clk = RBIOS8(offset + 3 + (i * 5) + 3); - data = RBIOS8(offset + 3 + (i * 5) + 4); - /* gpiopad */ - i2c = combios_setup_i2c_bus(rdev, DDC_MONID, - (1 << clk), (1 << data)); - rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); - break; - } - } - } + /* gpiopad */ + i2c = radeon_combios_get_i2c_info_from_table(rdev); + if (i2c.valid) + rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); } else if ((rdev->family == CHIP_R200) || (rdev->family >= CHIP_R300)) { /* 0x68 */ @@ -2321,7 +2333,10 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) connector = (tmp >> 12) & 0xf; ddc_type = (tmp >> 8) & 0xf; - ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); + if (ddc_type == 5) + ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev); + else + ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); switch (connector) { case CONNECTOR_PROPRIETARY_LEGACY: diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 8a4c49ef0cc4..b4a0db24f4dd 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return 0; } +static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser, + struct radeon_fence *fence) +{ + struct radeon_fpriv *fpriv = parser->filp->driver_priv; + struct radeon_vm *vm = &fpriv->vm; + struct radeon_bo_list *lobj; + + if (parser->chunk_ib_idx == -1) { + return; + } + if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) { + return; + } + + list_for_each_entry(lobj, &parser->validated, tv.head) { + struct radeon_bo_va *bo_va; + struct radeon_bo *rbo = lobj->bo; + + bo_va = radeon_bo_va(rbo, vm); + radeon_fence_unref(&bo_va->fence); + bo_va->fence = radeon_fence_ref(fence); + } +} + /** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. @@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; - if (!error) + if (!error) { + /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */ + radeon_bo_vm_fence_va(parser, parser->ib.fence); ttm_eu_fence_buffer_objects(&parser->validated, parser->ib.fence); - else + } else { ttm_eu_backoff_reservation(&parser->validated); + } if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { @@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, if (parser->chunk_ib_idx == -1) return 0; - if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) return 0; diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 711e95ad39bf..8794744cdf1a 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c @@ -67,7 +67,8 @@ static void radeon_hide_cursor(struct drm_crtc *crtc) if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); - WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); + WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | + EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); @@ -94,7 +95,8 @@ static void radeon_show_cursor(struct drm_crtc *crtc) if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | - EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); + EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | + EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 742af8244e89..d2e243867ac6 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1009,6 +1009,7 @@ int radeon_device_init(struct radeon_device *rdev, atomic_set(&rdev->ih.lock, 0); mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); + mutex_init(&rdev->gpu_clock_mutex); init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index dcea6f01ae4e..d7269f48d37c 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -59,9 +59,12 @@ * 2.15.0 - add max_pipes query * 2.16.0 - fix evergreen 2D tiled surface calculation * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx + * 2.18.0 - r600-eg: allow "invalid" DB formats + * 2.19.0 - r600-eg: MSAA textures + * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 17 +#define KMS_DRIVER_MINOR 20 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index b3720054614d..bb3b7fe05ccd 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, return -EINVAL; } - if (bo_va->valid) + if (bo_va->valid && mem) return 0; ngpu_pages = radeon_bo_ngpu_pages(bo); @@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_bo *bo) { struct radeon_bo_va *bo_va; + int r; bo_va = radeon_bo_va(bo, vm); if (bo_va == NULL) return 0; + /* wait for va use to end */ + while (bo_va->fence) { + r = radeon_fence_wait(bo_va->fence, false); + if (r) { + DRM_ERROR("error while waiting for fence: %d\n", r); + } + if (r == -EDEADLK) { + r = radeon_gpu_reset(rdev); + if (!r) + continue; + } + break; + } + radeon_fence_unref(&bo_va->fence); + mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); radeon_vm_bo_update_pte(rdev, vm, bo, NULL); @@ -934,7 +950,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) } /** - * radeon_vm_init - tear down a vm instance + * radeon_vm_fini - tear down a vm instance * * @rdev: radeon_device pointer * @vm: requested vm @@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) radeon_vm_unbind_locked(rdev, vm); mutex_unlock(&rdev->vm_manager.lock); - /* remove all bo */ + /* remove all bo at this point non are busy any more because unbind + * waited for the last vm fence to signal + */ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); list_del_init(&bo_va->bo_list); list_del_init(&bo_va->vm_list); + radeon_fence_unref(&bo_va->fence); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); kfree(bo_va); } @@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) r = radeon_bo_reserve(bo_va->bo, false); if (!r) { list_del_init(&bo_va->bo_list); + radeon_fence_unref(&bo_va->fence); radeon_bo_unreserve(bo_va->bo); kfree(bo_va); } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 84d045245739..1b57b0058ad6 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct radeon_device *rdev = rbo->rdev; struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_vm *vm = &fpriv->vm; - struct radeon_bo_va *bo_va, *tmp; if (rdev->family < CHIP_CAYMAN) { return; } if (radeon_bo_reserve(rbo, false)) { + dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n"); return; } - list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) { - if (bo_va->vm == vm) { - /* remove from this vm address space */ - mutex_lock(&vm->mutex); - list_del(&bo_va->vm_list); - mutex_unlock(&vm->mutex); - list_del(&bo_va->bo_list); - kfree(bo_va); - } - } + radeon_vm_bo_rmv(rdev, vm, rbo); radeon_bo_unreserve(rbo); } diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 1d73f16b5d97..414b4acf6947 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -29,6 +29,7 @@ #include "drm_sarea.h" #include "radeon.h" #include "radeon_drm.h" +#include "radeon_asic.h" #include <linux/vga_switcheroo.h> #include <linux/slab.h> @@ -167,17 +168,39 @@ static void radeon_set_filp_rights(struct drm_device *dev, int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; - struct drm_radeon_info *info; + struct drm_radeon_info *info = data; struct radeon_mode_info *minfo = &rdev->mode_info; - uint32_t *value_ptr; - uint32_t value; + uint32_t value, *value_ptr; + uint64_t value64, *value_ptr64; struct drm_crtc *crtc; int i, found; - info = data; + /* TIMESTAMP is a 64-bit value, needs special handling. */ + if (info->request == RADEON_INFO_TIMESTAMP) { + if (rdev->family >= CHIP_R600) { + value_ptr64 = (uint64_t*)((unsigned long)info->value); + if (rdev->family >= CHIP_TAHITI) { + value64 = si_get_gpu_clock(rdev); + } else { + value64 = r600_get_gpu_clock(rdev); + } + + if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) { + DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); + return -EFAULT; + } + return 0; + } else { + DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); + return -EINVAL; + } + } + value_ptr = (uint32_t *)((unsigned long)info->value); - if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) + if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) { + DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); return -EFAULT; + } switch (info->request) { case RADEON_INFO_DEVICE_ID: @@ -337,7 +360,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { - DRM_ERROR("copy_to_user\n"); + DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); return -EFAULT; } return 0; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index d5fd615897ec..94b4a1c12893 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -1025,9 +1025,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, static void radeon_crtc_prepare(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; + radeon_crtc->in_mode_set = true; /* * The hardware wedges sometimes if you reconfigure one CRTC * whilst another is running (see fdo bug #24611). @@ -1038,6 +1040,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc) static void radeon_crtc_commit(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; @@ -1048,6 +1051,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc) if (crtci->enabled) radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); } + radeon_crtc->in_mode_set = false; } static const struct drm_crtc_helper_funcs legacy_helper_funcs = { diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index f380d59c5763..d56978949f34 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -275,6 +275,7 @@ struct radeon_crtc { u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; + bool in_mode_set; uint32_t crtc_offset; struct drm_gem_object *cursor_bo; uint64_t cursor_addr; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 1f1a4c803c1d..1cb014b571ab 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo) list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { /* remove from all vm address space */ - mutex_lock(&bo_va->vm->mutex); - list_del(&bo_va->vm_list); - mutex_unlock(&bo_va->vm->mutex); - list_del(&bo_va->bo_list); - kfree(bo_va); + radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo); } } diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index a12fbcc8ccb6..aa8ef491ef3c 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) { - save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL); - save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL); save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); - save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL); - save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL); /* Stop all video */ WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); @@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) /* Unlock host access */ WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); - /* Restore video state */ - WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control); - WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control); - WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); - WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); - WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control); - WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control); - WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); - WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index c053f8193771..0139e227e3c7 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1639,11 +1639,19 @@ static void si_gpu_init(struct radeon_device *rdev) /* XXX what about 12? */ rdev->config.si.tile_config |= (3 << 0); break; - } - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.si.tile_config |= 1 << 4; - else + } + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.si.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.si.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.si.tile_config |= 2 << 4; + break; + } rdev->config.si.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; rdev->config.si.tile_config |= @@ -3960,3 +3968,22 @@ void si_fini(struct radeon_device *rdev) rdev->bios = NULL; } +/** + * si_get_gpu_clock - return GPU clock counter snapshot + * + * @rdev: radeon_device pointer + * + * Fetches a GPU clock counter snapshot (SI). + * Returns the 64 bit clock counter snapshot. + */ +uint64_t si_get_gpu_clock(struct radeon_device *rdev) +{ + uint64_t clock; + + mutex_lock(&rdev->gpu_clock_mutex); + WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&rdev->gpu_clock_mutex); + return clock; +} diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 7869089e8761..ef4815c27b1c 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -698,6 +698,9 @@ #define RLC_UCODE_ADDR 0xC32C #define RLC_UCODE_DATA 0xC330 +#define RLC_GPU_CLOCK_COUNT_LSB 0xC338 +#define RLC_GPU_CLOCK_COUNT_MSB 0xC33C +#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340 #define RLC_MC_CNTL 0xC344 #define RLC_UCODE_CNTL 0xC348 diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c index 7bd65bdd15a8..291ecc145585 100644 --- a/drivers/gpu/drm/udl/udl_gem.c +++ b/drivers/gpu/drm/udl/udl_gem.c @@ -308,7 +308,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, /* need to attach */ attach = dma_buf_attach(dma_buf, dev->dev); if (IS_ERR(attach)) - return ERR_PTR(PTR_ERR(attach)); + return ERR_CAST(attach); sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sg)) { diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c index 503c7096ed36..908407efc672 100644 --- a/drivers/input/touchscreen/eeti_ts.c +++ b/drivers/input/touchscreen/eeti_ts.c @@ -48,7 +48,7 @@ struct eeti_ts_priv { struct input_dev *input; struct work_struct work; struct mutex mutex; - int irq, irq_active_high; + int irq_gpio, irq, irq_active_high; }; #define EETI_TS_BITDEPTH (11) @@ -62,7 +62,7 @@ struct eeti_ts_priv { static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv) { - return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high; + return gpio_get_value(priv->irq_gpio) == priv->irq_active_high; } static void eeti_ts_read(struct work_struct *work) @@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev) static int __devinit eeti_ts_probe(struct i2c_client *client, const struct i2c_device_id *idp) { - struct eeti_ts_platform_data *pdata; + struct eeti_ts_platform_data *pdata = client->dev.platform_data; struct eeti_ts_priv *priv; struct input_dev *input; unsigned int irq_flags; @@ -199,9 +199,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, priv->client = client; priv->input = input; - priv->irq = client->irq; + priv->irq_gpio = pdata->irq_gpio; + priv->irq = gpio_to_irq(pdata->irq_gpio); - pdata = client->dev.platform_data; + err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name); + if (err < 0) + goto err1; if (pdata) priv->irq_active_high = pdata->irq_active_high; @@ -215,13 +218,13 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, err = input_register_device(input); if (err) - goto err1; + goto err2; err = request_irq(priv->irq, eeti_ts_isr, irq_flags, client->name, priv); if (err) { dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); - goto err2; + goto err3; } /* @@ -233,9 +236,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, device_init_wakeup(&client->dev, 0); return 0; -err2: +err3: input_unregister_device(input); input = NULL; /* so we dont try to free it below */ +err2: + gpio_free(pdata->irq_gpio); err1: input_free_device(input); kfree(priv); diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 6d1cbdfc9b2a..b64502dfa9f4 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -296,8 +296,13 @@ static int iommu_init_device(struct device *dev) } else dma_pdev = pci_dev_get(pdev); + /* Account for quirked devices */ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); + /* + * If it's a multifunction device that does not support our + * required ACS flags, add to the same group as function 0. + */ if (dma_pdev->multifunction && !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) swap_pci_ref(&dma_pdev, @@ -305,14 +310,28 @@ static int iommu_init_device(struct device *dev) PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 0))); + /* + * Devices on the root bus go through the iommu. If that's not us, + * find the next upstream device and test ACS up to the root bus. + * Finding the next device may require skipping virtual buses. + */ while (!pci_is_root_bus(dma_pdev->bus)) { - if (pci_acs_path_enabled(dma_pdev->bus->self, - NULL, REQ_ACS_FLAGS)) + struct pci_bus *bus = dma_pdev->bus; + + while (!bus->self) { + if (!pci_is_root_bus(bus)) + bus = bus->parent; + else + goto root_bus; + } + + if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; - swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); + swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); } +root_bus: group = iommu_group_get(&dma_pdev->dev); pci_dev_put(dma_pdev); if (!group) { diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 500e7f15f5c2..0a2ea317120a 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1131,9 +1131,6 @@ static int __init amd_iommu_init_pci(void) break; } - /* Make sure ACS will be enabled */ - pci_request_acs(); - ret = amd_iommu_init_devices(); print_iommu_info(); @@ -1652,6 +1649,9 @@ static bool detect_ivrs(void) early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); + /* Make sure ACS will be enabled during PCI probe */ + pci_request_acs(); + return true; } diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 45350ff5e93c..80bad32aa463 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -732,9 +732,9 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain) spin_lock_init(&priv->pgtablelock); INIT_LIST_HEAD(&priv->clients); - dom->geometry.aperture_start = 0; - dom->geometry.aperture_end = ~0UL; - dom->geometry.force_aperture = true; + domain->geometry.aperture_start = 0; + domain->geometry.aperture_end = ~0UL; + domain->geometry.force_aperture = true; domain->priv = priv; return 0; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 7469b5346643..2297ec193eb4 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2008,6 +2008,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) if (!drhd) { printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", pci_name(pdev)); + free_domain_mem(domain); return NULL; } iommu = drhd->iommu; @@ -4124,8 +4125,13 @@ static int intel_iommu_add_device(struct device *dev) } else dma_pdev = pci_dev_get(pdev); + /* Account for quirked devices */ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); + /* + * If it's a multifunction device that does not support our + * required ACS flags, add to the same group as function 0. + */ if (dma_pdev->multifunction && !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) swap_pci_ref(&dma_pdev, @@ -4133,14 +4139,28 @@ static int intel_iommu_add_device(struct device *dev) PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 0))); + /* + * Devices on the root bus go through the iommu. If that's not us, + * find the next upstream device and test ACS up to the root bus. + * Finding the next device may require skipping virtual buses. + */ while (!pci_is_root_bus(dma_pdev->bus)) { - if (pci_acs_path_enabled(dma_pdev->bus->self, - NULL, REQ_ACS_FLAGS)) + struct pci_bus *bus = dma_pdev->bus; + + while (!bus->self) { + if (!pci_is_root_bus(bus)) + bus = bus->parent; + else + goto root_bus; + } + + if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; - swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); + swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); } +root_bus: group = iommu_group_get(&dma_pdev->dev); pci_dev_put(dma_pdev); if (!group) { diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 4ba325ab6262..2a4bb36bc688 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -799,14 +799,14 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain, goto out; } } - dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev)); + dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev)); out: spin_unlock(&as->client_lock); } static int smmu_iommu_domain_init(struct iommu_domain *domain) { - int i, err = -ENODEV; + int i, err = -EAGAIN; unsigned long flags; struct smmu_as *as; struct smmu_device *smmu = smmu_handle; @@ -814,11 +814,14 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain) /* Look for a free AS with lock held */ for (i = 0; i < smmu->num_as; i++) { as = &smmu->as[i]; - if (!as->pdir_page) { - err = alloc_pdir(as); - if (!err) - goto found; - } + + if (as->pdir_page) + continue; + + err = alloc_pdir(as); + if (!err) + goto found; + if (err != -EAGAIN) break; } diff --git a/drivers/isdn/isdnloop/isdnloop.c b/drivers/isdn/isdnloop/isdnloop.c index 5405ec644db3..baf2686aa8eb 100644 --- a/drivers/isdn/isdnloop/isdnloop.c +++ b/drivers/isdn/isdnloop/isdnloop.c @@ -16,7 +16,6 @@ #include <linux/sched.h> #include "isdnloop.h" -static char *revision = "$Revision: 1.11.6.7 $"; static char *isdnloop_id = "loop0"; MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card"); @@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1) static int __init isdnloop_init(void) { - char *p; - char rev[10]; - - if ((p = strchr(revision, ':'))) { - strcpy(rev, p + 1); - p = strchr(rev, '$'); - *p = 0; - } else - strcpy(rev, " ??? "); - printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev); - if (isdnloop_id) return (isdnloop_addcard(isdnloop_id)); diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c index 0dc8abca1407..949cabb88f1c 100644 --- a/drivers/isdn/mISDN/layer2.c +++ b/drivers/isdn/mISDN/layer2.c @@ -2222,7 +2222,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei, InitWin(l2); l2->l2m.fsm = &l2fsm; if (test_bit(FLG_LAPB, &l2->flag) || - test_bit(FLG_PTP, &l2->flag) || + test_bit(FLG_FIXED_TEI, &l2->flag) || test_bit(FLG_LAPD_NET, &l2->flag)) l2->l2m.state = ST_L2_4; else diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index 6157cbbf4113..363975b3c925 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -224,7 +224,7 @@ void led_trigger_event(struct led_trigger *trig, struct led_classdev *led_cdev; led_cdev = list_entry(entry, struct led_classdev, trig_list); - led_set_brightness(led_cdev, brightness); + __led_set_brightness(led_cdev, brightness); } read_unlock(&trig->leddev_list_lock); } diff --git a/drivers/leds/leds-lp8788.c b/drivers/leds/leds-lp8788.c index 53bd136f1ef0..0ade6ebfc914 100644 --- a/drivers/leds/leds-lp8788.c +++ b/drivers/leds/leds-lp8788.c @@ -63,7 +63,7 @@ static int lp8788_led_init_device(struct lp8788_led *led, /* scale configuration */ addr = LP8788_ISINK_CTRL; mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET); - val = cfg->scale << cfg->num; + val = cfg->scale << (cfg->num + LP8788_ISINK_SCALE_OFFSET); ret = lp8788_update_bits(led->lp, addr, mask, val); if (ret) return ret; diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c index 9ee12c28059a..771ea067e680 100644 --- a/drivers/leds/leds-renesas-tpu.c +++ b/drivers/leds/leds-renesas-tpu.c @@ -247,7 +247,7 @@ static int __devinit r_tpu_probe(struct platform_device *pdev) if (!cfg) { dev_err(&pdev->dev, "missing platform data\n"); - goto err0; + return -ENODEV; } p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 383421bf5760..683e18a23329 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -925,6 +925,7 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, goto out; } + ret = 0; if (pdata->leds) { int i; diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c index 43a76c41cfcc..db662e2dcfa5 100644 --- a/drivers/mfd/ezx-pcap.c +++ b/drivers/mfd/ezx-pcap.c @@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work) } local_irq_enable(); ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); - } while (gpio_get_value(irq_to_gpio(pcap->spi->irq))); + } while (gpio_get_value(pdata->gpio)); } static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 31bb7e5b504a..8ca417614c57 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -480,7 +480,7 @@ config MTD_NAND_NANDSIM config MTD_NAND_GPMI_NAND bool "GPMI NAND Flash Controller driver" - depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q) + depends on MTD_NAND && MXS_DMA help Enables NAND Flash support for IMX23 or IMX28. The GPMI controller is very powerful, with the help of BCH diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index e9309b3659e7..ac4fd756eda3 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1245,7 +1245,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) goto out_release_mem_region; } else { struct dma_slave_config cfg; - int rc; memset(&cfg, 0, sizeof(cfg)); cfg.src_addr = info->phys_base; @@ -1254,10 +1253,10 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.src_maxburst = 16; cfg.dst_maxburst = 16; - rc = dmaengine_slave_config(info->dma, &cfg); - if (rc) { + err = dmaengine_slave_config(info->dma, &cfg); + if (err) { dev_err(&pdev->dev, "DMA engine slave config failed: %d\n", - rc); + err); goto out_release_mem_region; } info->nand.read_buf = omap_read_buf_dma_pref; diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index 545c09ed9079..cff6f023c03a 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -996,9 +996,7 @@ static int __init cops_module_init(void) printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", cardname); cops_dev = cops_probe(-1); - if (IS_ERR(cops_dev)) - return PTR_ERR(cops_dev); - return 0; + return PTR_RET(cops_dev); } static void __exit cops_module_exit(void) diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c index 0910dce3996d..b5782cdf0bca 100644 --- a/drivers/net/appletalk/ltpc.c +++ b/drivers/net/appletalk/ltpc.c @@ -1243,9 +1243,7 @@ static int __init ltpc_module_init(void) "ltpc: Autoprobing is not recommended for modules\n"); dev_ltpc = ltpc_probe(); - if (IS_ERR(dev_ltpc)) - return PTR_ERR(dev_ltpc); - return 0; + return PTR_RET(dev_ltpc); } module_init(ltpc_module_init); #endif diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index f0c8bd54ce29..021d69c5d9bc 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -1712,7 +1712,7 @@ e100_set_network_leds(int active) static void e100_netpoll(struct net_device* netdev) { - e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev); } #endif diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 77bcd4cb4ffb..463b9ec57d80 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1278,7 +1278,7 @@ struct bnx2x { #define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) #define BNX2X_FW_RX_ALIGN_END \ - max(1UL << BNX2X_RX_ALIGN_SHIFT, \ + max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index dd451c3dd83d..02b5a343b195 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -4041,20 +4041,6 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) return val != 0; } -/* - * Reset the load status for the current engine. - */ -static void bnx2x_clear_load_status(struct bnx2x *bp) -{ - u32 val; - u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : - BNX2X_PATH0_LOAD_CNT_MASK); - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); - val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); -} - static void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); @@ -9384,32 +9370,24 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) return rc; } -static bool __devinit bnx2x_can_flr(struct bnx2x *bp) -{ - int pos; - u32 cap; - struct pci_dev *dev = bp->pdev; - - pos = pci_pcie_cap(dev); - if (!pos) - return false; - - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); - if (!(cap & PCI_EXP_DEVCAP_FLR)) - return false; - - return true; -} - static int __devinit bnx2x_do_flr(struct bnx2x *bp) { int i, pos; u16 status; struct pci_dev *dev = bp->pdev; - /* probe the capability first */ - if (bnx2x_can_flr(bp)) - return -ENOTTY; + + if (CHIP_IS_E1x(bp)) { + BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); + return -EINVAL; + } + + /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ + if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { + BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", + bp->common.bc_ver); + return -EINVAL; + } pos = pci_pcie_cap(dev); if (!pos) @@ -9429,12 +9407,8 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp) "transaction is not cleared; proceeding with reset anyway\n"); clear: - if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { - BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", - bp->common.bc_ver); - return -EINVAL; - } + BNX2X_DEV_INFO("Initiating FLR\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); return 0; @@ -9454,8 +9428,21 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp) * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ - if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp)) - return bnx2x_do_flr(bp); + rc = bnx2x_test_firmware_version(bp, false); + + if (!rc) { + /* fw version is good */ + BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); + rc = bnx2x_do_flr(bp); + } + + if (!rc) { + /* FLR was performed */ + BNX2X_DEV_INFO("FLR successful\n"); + return 0; + } + + BNX2X_DEV_INFO("Could not FLR\n"); /* Close the MCP request, return failure*/ rc = bnx2x_prev_mcp_done(bp); @@ -11427,9 +11414,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, if (!chip_is_e1x) REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - /* Reset the load counter */ - bnx2x_clear_load_status(bp); - dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &bnx2x_netdev_ops; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 734fd87cd990..62f754bd0dfe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2485,6 +2485,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, break; default: + kfree(new_cmd); BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c60de89b6669..90a903d83d87 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter) if (adapter->num_rx_qs != MAX_RX_QS) dev_info(&adapter->pdev->dev, - "Created only %d receive queues", adapter->num_rx_qs); + "Created only %d receive queues\n", adapter->num_rx_qs); return 0; } diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c index 0b3bade957fd..080c89093feb 100644 --- a/drivers/net/ethernet/intel/e1000e/82571.c +++ b/drivers/net/ethernet/intel/e1000e/82571.c @@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) **/ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) { - u32 ctrl, ctrl_ext, eecd; + u32 ctrl, ctrl_ext, eecd, tctl; s32 ret_val; /* @@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) ew32(IMC, 0xffffffff); ew32(RCTL, 0); - ew32(TCTL, E1000_TCTL_PSP); + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); e1e_flush(); usleep_range(10000, 20000); @@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) * auto-negotiation in the TXCW register and disable * forced link in the Device Control register in an * attempt to auto-negotiate with our link partner. - * If the partner code word is null, stop forcing - * and restart auto negotiation. */ - if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { + if (rxcw & E1000_RXCW_C) { /* Enable autoneg, and unforce link up */ ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 95b245310f17..46c3b1f9ff89 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); } +static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, + struct e1000_buffer *bi) +{ + int i; + struct e1000_ps_page *ps_page; + + for (i = 0; i < adapter->rx_ps_pages; i++) { + ps_page = &bi->ps_pages[i]; + + if (ps_page->page) { + pr_info("packet dump for ps_page %d:\n", i); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, page_address(ps_page->page), + PAGE_SIZE, true); + } + } +} + /* * e1000e_dump - Print registers, Tx-ring and Rx-ring */ @@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) (unsigned long long)buffer_info->time_stamp, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); + 16, 1, buffer_info->skb->data, + buffer_info->skb->len, true); } /* Print Rx Ring Summary */ @@ -381,10 +399,8 @@ rx_ring_summary: buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter)) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt(buffer_info->dma), - adapter->rx_ps_bsize0, true); + e1000e_dump_ps_pages(adapter, + buffer_info); } } break; @@ -444,12 +460,12 @@ rx_ring_summary: (unsigned long long)buffer_info->dma, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter)) + if (netif_msg_pktdata(adapter) && + buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt - (buffer_info->dma), + buffer_info->skb->data, adapter->rx_buffer_len, true); } diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 5e84eaac48c1..ba994fb4cec6 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) */ size += NVM_WORD_SIZE_BASE_SHIFT; + /* + * Check for invalid size + */ + if ((hw->mac.type == e1000_82576) && (size > 15)) { + pr_notice("The NVM size is not valid, defaulting to 32K\n"); + size = 15; + } + nvm->word_size = 1 << size; if (hw->mac.type < e1000_i210) { nvm->opcode_bits = 8; @@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) } else nvm->type = e1000_nvm_flash_hw; - /* - * Check for invalid size - */ - if ((hw->mac.type == e1000_82576) && (size > 15)) { - pr_notice("The NVM size is not valid, defaulting to 32K\n"); - size = 15; - } - /* NVM Function Pointers */ switch (hw->mac.type) { case e1000_82580: diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 10efcd88dca0..28394bea5253 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -156,8 +156,12 @@ : (0x0E018 + ((_n) * 0x40))) #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ : (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) #define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ : (0x0E038 + ((_n) * 0x40))) #define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index a19c84cad0e9..70591117051b 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ if (igb_check_reset_block(hw)) { - dev_err(&adapter->pdev->dev, "Cannot change link " - "characteristics when SoL/IDER is active.\n"); + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when SoL/IDER is active.\n"); return -EINVAL; } @@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, wr32(reg, (_test[pat] & write)); val = rd32(reg) & mask; if (val != (_test[pat] & write & mask)) { - dev_err(&adapter->pdev->dev, "pattern test reg %04X " - "failed: got 0x%08X expected 0x%08X\n", + dev_err(&adapter->pdev->dev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", reg, val, (_test[pat] & write & mask)); *data = reg; return 1; @@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, wr32(reg, write & mask); val = rd32(reg); if ((write & mask) != (val & mask)) { - dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" - " got 0x%08X expected 0x%08X\n", reg, + dev_err(&adapter->pdev->dev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; return 1; @@ -1171,8 +1171,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) wr32(E1000_STATUS, toggle); after = rd32(E1000_STATUS) & toggle; if (value != after) { - dev_err(&adapter->pdev->dev, "failed STATUS register test " - "got: 0x%08X expected: 0x%08X\n", after, value); + dev_err(&adapter->pdev->dev, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); *data = 1; return 1; } @@ -1497,6 +1498,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) break; } + /* add small delay to avoid loopback test failure */ + msleep(50); + /* force 1000, set loopback */ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); @@ -1777,16 +1781,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) * sessions are active */ if (igb_check_reset_block(&adapter->hw)) { dev_err(&adapter->pdev->dev, - "Cannot do PHY loopback test " - "when SoL/IDER is active.\n"); + "Cannot do PHY loopback test when SoL/IDER is active.\n"); *data = 0; goto out; } if ((adapter->hw.mac.type == e1000_i210) - || (adapter->hw.mac.type == e1000_i210)) { + || (adapter->hw.mac.type == e1000_i211)) { dev_err(&adapter->pdev->dev, - "Loopback test not supported " - "on this part at this time.\n"); + "Loopback test not supported on this part at this time.\n"); *data = 0; goto out; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index b7c2d5050572..48cc4fb1a307 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter) (u64)buffer_info->time_stamp, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), + 16, 1, buffer_info->skb->data, buffer_info->length, true); } } @@ -547,18 +547,17 @@ rx_ring_summary: (u64)buffer_info->dma, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter)) { + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->skb) { print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt(buffer_info->dma), - IGB_RX_HDR_LEN, true); + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + IGB_RX_HDR_LEN, true); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt( - buffer_info->page_dma + - buffer_info->page_offset), + page_address(buffer_info->page) + + buffer_info->page_offset, PAGE_SIZE/2, true); } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 50fc137501da..18bf08c9d7a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { /* Set KX4/KX/KR support according to speed requested */ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); - if (speed & IXGBE_LINK_SPEED_10GB_FULL) + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) autoc |= IXGBE_AUTOC_KX4_SUPP; if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && (hw->phy.smart_speed_active == false)) autoc |= IXGBE_AUTOC_KR_SUPP; + } if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index f32e70300770..5aba5ecdf1e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* If source MAC is equal to our own MAC and not performing * the selftest or flb disabled - drop the packet */ if (s_mac == priv->mac && - (!(dev->features & NETIF_F_LOOPBACK) || - !priv->validate_loopback)) + !((dev->features & NETIF_F_LOOPBACK) || + priv->validate_loopback)) goto next; /* diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 019d856b1334..10bba09c44ea 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->cons = 0xffffffff; ring->last_nr_txbb = 1; ring->poll_cnt = 0; - ring->blocked = 0; memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); memset(ring->buf, 0, ring->buf_size); @@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ring->cons += txbbs_skipped; netdev_tx_completed_queue(ring->tx_queue, packets, bytes); - /* Wakeup Tx queue if this ring stopped it */ - if (unlikely(ring->blocked)) { - if ((u32) (ring->prod - ring->cons) <= - ring->size - HEADROOM - MAX_DESC_TXBBS) { - ring->blocked = 0; - netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; - } + /* + * Wakeup Tx queue if this stopped, and at least 1 packet + * was completed + */ + if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { + netif_tx_wake_queue(ring->tx_queue); + priv->port_stats.wake_queue++; } } @@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->size - HEADROOM - MAX_DESC_TXBBS)) { /* every full Tx ring stops queue */ netif_tx_stop_queue(ring->tx_queue); - ring->blocked = 1; priv->port_stats.queue_stopped++; return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 48d0e90194cb..827b72dfce99 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev, "on this HCA, aborting.\n"); return -EINVAL; } - if (port_type[i] == MLX4_PORT_TYPE_ETH && - port_type[i + 1] == MLX4_PORT_TYPE_IB) - return -EINVAL; } } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 5f1ab105debc..9d27e42264e2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -248,7 +248,6 @@ struct mlx4_en_tx_ring { u32 doorbell_qpn; void *buf; u16 poll_cnt; - int blocked; struct mlx4_en_tx_info *tx_info; u8 *bounce_buf; u32 last_nr_txbb; diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c index 802498293528..34ee09bae36e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/sense.c +++ b/drivers/net/ethernet/mellanox/mlx4/sense.c @@ -81,20 +81,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev, } /* - * Adjust port configuration: - * If port 1 sensed nothing and port 2 is IB, set both as IB - * If port 2 sensed nothing and port 1 is Eth, set both as Eth - */ - if (stype[0] == MLX4_PORT_TYPE_ETH) { - for (i = 1; i < dev->caps.num_ports; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH; - } - if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) { - for (i = 0; i < dev->caps.num_ports - 1; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB; - } - - /* * If sensed nothing, remain in current configuration. */ for (i = 0; i < dev->caps.num_ports; i++) diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 4069edab229e..53743f7a2ca9 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -346,28 +346,15 @@ static phy_interface_t lpc_phy_interface_mode(struct device *dev) "phy-mode", NULL); if (mode && !strcmp(mode, "mii")) return PHY_INTERFACE_MODE_MII; - return PHY_INTERFACE_MODE_RMII; } - - /* non-DT */ -#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT - return PHY_INTERFACE_MODE_MII; -#else return PHY_INTERFACE_MODE_RMII; -#endif } static bool use_iram_for_net(struct device *dev) { if (dev && dev->of_node) return of_property_read_bool(dev->of_node, "use-iram"); - - /* non-DT */ -#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET - return true; -#else return false; -#endif } /* Receive Status information word */ diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 70554a1b2b02..65a8d49106a4 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx) goto fail2; } + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { + rc = -EINVAL; + goto fail3; + } efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; rc = efx_probe_filters(efx); @@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); + net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; rtnl_lock(); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index be8f9158a714..70755c97251a 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -30,6 +30,7 @@ extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); @@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_MAX_EVQ_SIZE 16384UL #define EFX_MIN_EVQ_SIZE 512UL -/* The smallest [rt]xq_entries that the driver supports. Callers of - * efx_wake_queue() assume that they can subsequently send at least one - * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ -#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest [rt]xq_entries that the driver supports. RX minimum + * is a bit arbitrary. For TX, we must have space for at least 2 + * TSO skbs. + */ +#define EFX_RXQ_MIN_ENT 128U +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) /* Filters */ extern int efx_probe_filters(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 10536f93b561..8cba2df82b18 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); + u32 txq_entries; if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->tx_pending > EFX_MAX_DMAQ_SIZE) return -EINVAL; - if (ring->rx_pending < EFX_MIN_RING_SIZE || - ring->tx_pending < EFX_MIN_RING_SIZE) { + if (ring->rx_pending < EFX_RXQ_MIN_ENT) { netif_err(efx, drv, efx->net_dev, - "TX and RX queues cannot be smaller than %ld\n", - EFX_MIN_RING_SIZE); + "RX queues cannot be smaller than %u\n", + EFX_RXQ_MIN_ENT); return -EINVAL; } - return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); + if (txq_entries != ring->tx_pending) + netif_warn(efx, drv, efx->net_dev, + "increasing TX queue size to minimum of %u\n", + txq_entries); + + return efx_realloc_channels(efx, ring->rx_pending, txq_entries); } static int efx_ethtool_set_pauseparam(struct net_device *net_dev, diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 9b225a7769f7..18713436b443 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) return len; } +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Header and payload descriptor for each output segment, plus + * one for every input fragment boundary within a segment + */ + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + + /* Possibly one more per segment for the alignment workaround */ + if (EFX_WORKAROUND_5391(efx)) + max_descs += EFX_TSO_MAX_SEGS; + + /* Possibly more for PCIe page boundaries within input fragments */ + if (PAGE_SIZE > EFX_PAGE_SIZE) + max_descs += max_t(unsigned int, MAX_SKB_FRAGS, + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + + return max_descs; +} + /* * Add a socket buffer to a TX queue * diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index cd01ee7ecef1..b93245c11995 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, * the necessary resources and invokes the main to init * the net device, register the mdio bus etc. */ -static int stmmac_pltfr_probe(struct platform_device *pdev) +static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 482648fcf0b6..98934bdf6acf 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1003,6 +1003,7 @@ static int ixp4xx_nway_reset(struct net_device *dev) } int ixp46x_phc_index = -1; +EXPORT_SYMBOL_GPL(ixp46x_phc_index); static int ixp4xx_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 6cee2917eb02..4a1a5f58fa73 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device) unsigned long flags; net_device = hv_get_drvdata(device); - spin_lock_irqsave(&device->channel->inbound_lock, flags); - net_device->destroy = true; - spin_unlock_irqrestore(&device->channel->inbound_lock, flags); - - /* Wait for all send completions */ - wait_event(net_device->wait_drain, - atomic_read(&net_device->num_outstanding_sends) == 0); netvsc_disconnect_vsp(net_device); diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index e5d6146937fa..1e88a1095934 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -718,6 +718,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; + struct netvsc_device *nvdev = dev->net_dev; + struct hv_device *hdev = nvdev->dev; + ulong flags; /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@ -735,6 +738,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev) dev->state = RNDIS_DEV_UNINITIALIZED; cleanup: + spin_lock_irqsave(&hdev->channel->inbound_lock, flags); + nvdev->destroy = true; + spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); + + /* Wait for all send completions */ + wait_event(nvdev->wait_drain, + atomic_read(&nvdev->num_outstanding_sends) == 0); + if (request) put_rndis_request(dev, request); return; diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c index a561ae44a9ac..c6a0299aa9f9 100644 --- a/drivers/net/irda/bfin_sir.c +++ b/drivers/net/irda/bfin_sir.c @@ -158,7 +158,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) /* If not add the 'RPOLC', we can't catch the receive interrupt. * It's related with the HW layout and the IR transiver. */ - val |= IREN | RPOLC; + val |= UMOD_IRDA | RPOLC; UART_PUT_GCTL(port, val); return ret; } @@ -432,7 +432,7 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev bfin_sir_stop_rx(port); val = UART_GET_GCTL(port); - val &= ~(UCEN | IREN | RPOLC); + val &= ~(UCEN | UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); #ifdef CONFIG_SIR_BFIN_DMA @@ -518,10 +518,10 @@ static void bfin_sir_send_work(struct work_struct *work) * reset all the UART. */ val = UART_GET_GCTL(port); - val &= ~(IREN | RPOLC); + val &= ~(UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); SSYNC(); - val |= IREN | RPOLC; + val |= UMOD_IRDA | RPOLC; UART_PUT_GCTL(port, val); SSYNC(); /* bfin_sir_set_speed(port, self->speed); */ diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 0737bd4d1669..0f0f9ce3a776 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -94,7 +94,8 @@ static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q) int i; for (i = 0; i < MAX_MACVTAP_QUEUES; i++) { - if (rcu_dereference(vlan->taps[i]) == q) + if (rcu_dereference_protected(vlan->taps[i], + lockdep_is_held(&macvtap_lock)) == q) return i; } diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c index e0cc4ef33dee..eefe49e8713c 100644 --- a/drivers/net/phy/mdio-mux-gpio.c +++ b/drivers/net/phy/mdio-mux-gpio.c @@ -101,7 +101,6 @@ err: n--; gpio_free(s->gpio[n]); } - devm_kfree(&pdev->dev, s); return r; } diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 1c98321b56cc..162464fe86bf 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (sk_pppox(po)->sk_state & PPPOX_DEAD) goto tx_error; - rt = ip_route_output_ports(&init_net, &fl4, NULL, + rt = ip_route_output_ports(sock_net(sk), &fl4, NULL, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, IPPROTO_GRE, @@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.private = sk; po->chan.ops = &pptp_chan_ops; - rt = ip_route_output_ports(&init_net, &fl4, sk, + rt = ip_route_output_ports(sock_net(sk), &fl4, sk, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 926d4db5cb38..3a16d4fdaa05 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun) netif_tx_lock_bh(tun->dev); netif_carrier_off(tun->dev); tun->tfile = NULL; - tun->socket.file = NULL; netif_tx_unlock_bh(tun->dev); /* Drop read queue */ diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c index 64610048ce87..7d78669000d7 100644 --- a/drivers/net/usb/cdc-phonet.c +++ b/drivers/net/usb/cdc-phonet.c @@ -232,6 +232,7 @@ static int usbpn_open(struct net_device *dev) struct urb *req = usb_alloc_urb(0, GFP_KERNEL); if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { + usb_free_urb(req); usbpn_close(dev); return -ENOMEM; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index f4ce5957df32..4cd582a4f625 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1225,6 +1225,26 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long) &wwan_info, }, + /* Dell branded MBM devices like DW5550 */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x413c, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + + /* Toshiba branded MBM devices */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x0930, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index cfa91ab7acf8..60b6a9daff7e 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -730,6 +730,7 @@ int ath9k_hw_init(struct ath_hw *ah) case AR9300_DEVID_QCA955X: case AR9300_DEVID_AR9580: case AR9300_DEVID_AR9462: + case AR9485_DEVID_AR1111: break; default: if (common->bus_ops->ath_bus_type == ATH_USB) diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index dd0c146d81dc..ce7332c64efb 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -49,6 +49,7 @@ #define AR9300_DEVID_AR9462 0x0034 #define AR9300_DEVID_AR9330 0x0035 #define AR9300_DEVID_QCA955X 0x0038 +#define AR9485_DEVID_AR1111 0x0037 #define AR5416_AR9100_DEVID 0x000b diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c index 87b89d55e637..d455de9162ec 100644 --- a/drivers/net/wireless/ath/ath9k/pci.c +++ b/drivers/net/wireless/ath/ath9k/pci.c @@ -37,6 +37,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ + { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ { 0 } }; diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index b80352b308d5..a140165dfee0 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -2719,32 +2719,37 @@ static int b43_gpio_init(struct b43_wldev *dev) if (dev->dev->chip_id == 0x4301) { mask |= 0x0060; set |= 0x0060; + } else if (dev->dev->chip_id == 0x5354) { + /* Don't allow overtaking buttons GPIOs */ + set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */ } - if (dev->dev->chip_id == 0x5354) - set &= 0xff02; + if (0 /* FIXME: conditional unknown */ ) { b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0100); - mask |= 0x0180; - set |= 0x0180; + /* BT Coexistance Input */ + mask |= 0x0080; + set |= 0x0080; + /* BT Coexistance Out */ + mask |= 0x0100; + set |= 0x0100; } if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) { + /* PA is controlled by gpio 9, let ucode handle it */ b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } - if (dev->dev->core_rev >= 2) - mask |= 0x0010; /* FIXME: This is redundant. */ switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL, (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc, - BCMA_CC_GPIOCTL) & mask) | set); + BCMA_CC_GPIOCTL) & ~mask) | set); break; #endif #ifdef CONFIG_B43_SSB @@ -2753,7 +2758,7 @@ static int b43_gpio_init(struct b43_wldev *dev) if (gpiodev) ssb_write32(gpiodev, B43_GPIO_CONTROL, (ssb_read32(gpiodev, B43_GPIO_CONTROL) - & mask) | set); + & ~mask) | set); break; #endif } diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c index 9a4c63f927cb..7ed7d7577024 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c @@ -382,9 +382,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, { struct brcms_c_info *wlc = wlc_cm->wlc; struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; - const struct ieee80211_reg_rule *reg_rule; struct txpwr_limits txpwr; - int ret; brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); @@ -393,8 +391,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, ); /* set or restore gmode as required by regulatory */ - ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, ®_rule); - if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM)) + if (ch->flags & IEEE80211_CHAN_NO_OFDM) brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false); else brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 9e79d47e077f..192ad5c1fcc8 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -121,7 +121,8 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = { IEEE80211_CHAN_NO_HT40PLUS), CHAN2GHZ(14, 2484, IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS | - IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) + IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS | + IEEE80211_CHAN_NO_OFDM) }; static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = { diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c index 6fddd2785e6e..a82f46c10f5e 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -707,11 +707,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, */ static bool rs_use_green(struct ieee80211_sta *sta) { - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->ctx; - - return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && - !(ctx->ht.non_gf_sta_present); + /* + * There's a bug somewhere in this code that causes the + * scaling to get stuck because GF+SGI can't be combined + * in SISO rates. Until we find that bug, disable GF, it + * has only limited benefit and we still interoperate with + * GF APs since we can always receive GF transmissions. + */ + return false; } /** diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index eb5de800ed90..1c10b542ab23 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -1254,6 +1254,7 @@ static int lbs_associate(struct lbs_private *priv, netif_tx_wake_all_queues(priv->dev); } + kfree(cmd); done: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c index 76caebaa4397..e970897f6ab5 100644 --- a/drivers/net/wireless/libertas/if_sdio.c +++ b/drivers/net/wireless/libertas/if_sdio.c @@ -1314,6 +1314,7 @@ static void if_sdio_remove(struct sdio_func *func) kfree(packet); } + kfree(card); lbs_deb_leave(LBS_DEB_SDIO); } diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 58048189bd24..fe1ea43c5149 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c @@ -571,7 +571,10 @@ static int lbs_thread(void *data) netdev_info(dev, "Timeout submitting command 0x%04x\n", le16_to_cpu(cmdnode->cmdbuf->command)); lbs_complete_command(priv, cmdnode, -ETIMEDOUT); - if (priv->reset_card) + + /* Reset card, but only when it isn't in the process + * of being shutdown anyway. */ + if (!dev->dismantle && priv->reset_card) priv->reset_card(priv); } priv->cmd_timed_out = 0; diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index 88455b1b9fe0..cb8c2aca54e4 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -221,6 +221,67 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, mutex_unlock(&rt2x00dev->csr_mutex); } +static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + int i, count; + + rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); + if (rt2x00_get_field32(reg, WLAN_EN)) + return 0; + + rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); + rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); + rt2x00_set_field32(®, WLAN_CLK_EN, 0); + rt2x00_set_field32(®, WLAN_EN, 1); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + + udelay(REGISTER_BUSY_DELAY); + + count = 0; + do { + /* + * Check PLL_LD & XTAL_RDY. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, CMB_CTRL, ®); + if (rt2x00_get_field32(reg, PLL_LD) && + rt2x00_get_field32(reg, XTAL_RDY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + if (i >= REGISTER_BUSY_COUNT) { + + if (count >= 10) + return -EIO; + + rt2800_register_write(rt2x00dev, 0x58, 0x018); + udelay(REGISTER_BUSY_DELAY); + rt2800_register_write(rt2x00dev, 0x58, 0x418); + udelay(REGISTER_BUSY_DELAY); + rt2800_register_write(rt2x00dev, 0x58, 0x618); + udelay(REGISTER_BUSY_DELAY); + count++; + } else { + count = 0; + } + + rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); + rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0); + rt2x00_set_field32(®, WLAN_CLK_EN, 1); + rt2x00_set_field32(®, WLAN_RESET, 1); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + udelay(10); + rt2x00_set_field32(®, WLAN_RESET, 0); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + udelay(10); + rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); + } while (count != 0); + + return 0; +} + void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, const u8 command, const u8 token, const u8 arg0, const u8 arg1) @@ -400,6 +461,13 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, { unsigned int i; u32 reg; + int retval; + + if (rt2x00_rt(rt2x00dev, RT3290)) { + retval = rt2800_enable_wlan_rt3290(rt2x00dev); + if (retval) + return -EBUSY; + } /* * If driver doesn't wake up firmware here, diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index 235376e9cb04..98aa426a3564 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c @@ -980,66 +980,6 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) return rt2800_validate_eeprom(rt2x00dev); } -static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) -{ - u32 reg; - int i, count; - - rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - if (rt2x00_get_field32(reg, WLAN_EN)) - return 0; - - rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); - rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); - rt2x00_set_field32(®, WLAN_CLK_EN, 0); - rt2x00_set_field32(®, WLAN_EN, 1); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - - udelay(REGISTER_BUSY_DELAY); - - count = 0; - do { - /* - * Check PLL_LD & XTAL_RDY. - */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2800_register_read(rt2x00dev, CMB_CTRL, ®); - if (rt2x00_get_field32(reg, PLL_LD) && - rt2x00_get_field32(reg, XTAL_RDY)) - break; - udelay(REGISTER_BUSY_DELAY); - } - - if (i >= REGISTER_BUSY_COUNT) { - - if (count >= 10) - return -EIO; - - rt2800_register_write(rt2x00dev, 0x58, 0x018); - udelay(REGISTER_BUSY_DELAY); - rt2800_register_write(rt2x00dev, 0x58, 0x418); - udelay(REGISTER_BUSY_DELAY); - rt2800_register_write(rt2x00dev, 0x58, 0x618); - udelay(REGISTER_BUSY_DELAY); - count++; - } else { - count = 0; - } - - rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0); - rt2x00_set_field32(®, WLAN_CLK_EN, 1); - rt2x00_set_field32(®, WLAN_RESET, 1); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - udelay(10); - rt2x00_set_field32(®, WLAN_RESET, 0); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - udelay(10); - rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); - } while (count != 0); - - return 0; -} static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; @@ -1063,17 +1003,6 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) return retval; /* - * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan - * clk for rt3290. That avoid the MCU fail in start phase. - */ - if (rt2x00_rt(rt2x00dev, RT3290)) { - retval = rt2800_enable_wlan_rt3290(rt2x00dev); - - if (retval) - return retval; - } - - /* * This device has multiple filters for control frames * and has a separate filter for PS Poll frames. */ diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index f32259686b45..3f7bc5cadf9a 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev) { - struct ieee80211_conf conf = { .flags = 0 }; - struct rt2x00lib_conf libconf = { .conf = &conf }; + struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf }; rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } diff --git a/drivers/pinctrl/pinctrl-imx23.c b/drivers/pinctrl/pinctrl-imx23.c index 75d3eff94296..3674d877ed7c 100644 --- a/drivers/pinctrl/pinctrl-imx23.c +++ b/drivers/pinctrl/pinctrl-imx23.c @@ -292,7 +292,7 @@ static int __init imx23_pinctrl_init(void) { return platform_driver_register(&imx23_pinctrl_driver); } -arch_initcall(imx23_pinctrl_init); +postcore_initcall(imx23_pinctrl_init); static void __exit imx23_pinctrl_exit(void) { diff --git a/drivers/pinctrl/pinctrl-imx28.c b/drivers/pinctrl/pinctrl-imx28.c index b973026811a2..0f5b2122b1ba 100644 --- a/drivers/pinctrl/pinctrl-imx28.c +++ b/drivers/pinctrl/pinctrl-imx28.c @@ -408,7 +408,7 @@ static int __init imx28_pinctrl_init(void) { return platform_driver_register(&imx28_pinctrl_driver); } -arch_initcall(imx28_pinctrl_init); +postcore_initcall(imx28_pinctrl_init); static void __exit imx28_pinctrl_exit(void) { diff --git a/drivers/pinctrl/pinctrl-nomadik-db8500.c b/drivers/pinctrl/pinctrl-nomadik-db8500.c index 6f99769c6733..5f3e9d0221e1 100644 --- a/drivers/pinctrl/pinctrl-nomadik-db8500.c +++ b/drivers/pinctrl/pinctrl-nomadik-db8500.c @@ -766,7 +766,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1", DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1"); DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1"); DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1"); -DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2"); +DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2"); DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1"); DB8500_FUNC_GROUPS(usb, "usb_a_1"); DB8500_FUNC_GROUPS(trig, "trig_b_1"); diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c index 53b0d49a7a1c..ec6ac501b23a 100644 --- a/drivers/pinctrl/pinctrl-nomadik.c +++ b/drivers/pinctrl/pinctrl-nomadik.c @@ -1731,7 +1731,6 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev) for (i = 0; i < npct->soc->gpio_num_ranges; i++) { if (!nmk_gpio_chips[i]) { dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); - devm_kfree(&pdev->dev, npct); return -EPROBE_DEFER; } npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip; diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c index 2aae8a8978e9..7fca6ce5952b 100644 --- a/drivers/pinctrl/pinctrl-sirf.c +++ b/drivers/pinctrl/pinctrl-sirf.c @@ -1217,7 +1217,6 @@ out_no_rsc_remap: iounmap(spmx->gpio_virtbase); out_no_gpio_remap: platform_set_drvdata(pdev, NULL); - devm_kfree(&pdev->dev, spmx); return ret; } diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c index a7ad8c112d91..309f5b9a70ec 100644 --- a/drivers/pinctrl/pinctrl-u300.c +++ b/drivers/pinctrl/pinctrl-u300.c @@ -1121,10 +1121,8 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev) upmx->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENOENT; - goto out_no_resource; - } + if (!res) + return -ENOENT; upmx->phybase = res->start; upmx->physize = resource_size(res); @@ -1165,8 +1163,6 @@ out_no_remap: platform_set_drvdata(pdev, NULL); out_no_memregion: release_mem_region(upmx->phybase, upmx->physize); -out_no_resource: - devm_kfree(&pdev->dev, upmx); return ret; } diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 2ca7dd1ab3e4..cd33add118ce 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -350,6 +350,7 @@ static void cmpc_accel_idev_init_v4(struct input_dev *inputdev) inputdev->close = cmpc_accel_close_v4; } +#ifdef CONFIG_PM_SLEEP static int cmpc_accel_suspend_v4(struct device *dev) { struct input_dev *inputdev; @@ -384,6 +385,7 @@ static int cmpc_accel_resume_v4(struct device *dev) return 0; } +#endif static int cmpc_accel_add_v4(struct acpi_device *acpi) { @@ -752,6 +754,7 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type) return cmpc_remove_acpi_notify_device(acpi); } +#ifdef CONFIG_PM_SLEEP static int cmpc_tablet_resume(struct device *dev) { struct input_dev *inputdev = dev_get_drvdata(dev); @@ -761,6 +764,7 @@ static int cmpc_tablet_resume(struct device *dev) input_report_switch(inputdev, SW_TABLET_MODE, !val); return 0; } +#endif static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume); diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c index d2e41735a47b..7acae3f85f3b 100644 --- a/drivers/platform/x86/fujitsu-tablet.c +++ b/drivers/platform/x86/fujitsu-tablet.c @@ -440,11 +440,13 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_fujitsu_resume(struct device *dev) { fujitsu_reset(); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume); diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c index d9ab6f64dcec..777c7e3dda51 100644 --- a/drivers/platform/x86/hdaps.c +++ b/drivers/platform/x86/hdaps.c @@ -305,10 +305,12 @@ static int hdaps_probe(struct platform_device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP static int hdaps_resume(struct device *dev) { return hdaps_device_init(); } +#endif static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume); diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index f4d91154ad67..6b9af989632b 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -352,7 +352,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type) } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int lis3lv02d_suspend(struct device *dev) { /* make sure the device is off when we suspend */ diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index f64441844317..2111dbb7e1e3 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -85,7 +85,9 @@ #define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4 #define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4) +#ifdef CONFIG_PM_SLEEP static int msi_laptop_resume(struct device *device); +#endif static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume); #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f @@ -753,6 +755,7 @@ err_bluetooth: return retval; } +#ifdef CONFIG_PM_SLEEP static int msi_laptop_resume(struct device *device) { u8 data; @@ -773,6 +776,7 @@ static int msi_laptop_resume(struct device *device) return 0; } +#endif static int __init msi_laptop_input_setup(void) { diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 24480074bcf0..8e8caa767d6a 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -188,7 +188,9 @@ static const struct acpi_device_id pcc_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, pcc_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_pcc_hotkey_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume); static struct acpi_driver acpi_pcc_driver = { @@ -540,6 +542,7 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc) /* kernel module interface */ +#ifdef CONFIG_PM_SLEEP static int acpi_pcc_hotkey_resume(struct device *dev) { struct pcc_acpi *pcc; @@ -556,6 +559,7 @@ static int acpi_pcc_hotkey_resume(struct device *dev) return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode); } +#endif static int acpi_pcc_hotkey_add(struct acpi_device *device) { diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 9363969ad07a..daaddec68def 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -140,7 +140,10 @@ MODULE_PARM_DESC(kbd_backlight_timeout, "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " "(default: 0)"); +#ifdef CONFIG_PM_SLEEP static void sony_nc_kbd_backlight_resume(void); +static void sony_nc_thermal_resume(void); +#endif static int sony_nc_kbd_backlight_setup(struct platform_device *pd, unsigned int handle); static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd); @@ -151,7 +154,6 @@ static void sony_nc_battery_care_cleanup(struct platform_device *pd); static int sony_nc_thermal_setup(struct platform_device *pd); static void sony_nc_thermal_cleanup(struct platform_device *pd); -static void sony_nc_thermal_resume(void); static int sony_nc_lid_resume_setup(struct platform_device *pd); static void sony_nc_lid_resume_cleanup(struct platform_device *pd); @@ -1431,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) sony_nc_handles_cleanup(pd); } +#ifdef CONFIG_PM_SLEEP static void sony_nc_function_resume(void) { unsigned int i, result, bitmask, arg; @@ -1508,6 +1511,7 @@ static int sony_nc_resume(struct device *dev) return 0; } +#endif static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume); @@ -1872,6 +1876,7 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) } } +#ifdef CONFIG_PM_SLEEP static void sony_nc_kbd_backlight_resume(void) { int ignore = 0; @@ -1888,6 +1893,7 @@ static void sony_nc_kbd_backlight_resume(void) (kbdbl_ctl->base + 0x200) | (kbdbl_ctl->timeout << 0x10), &ignore); } +#endif struct battery_care_control { struct device_attribute attrs[2]; @@ -2210,6 +2216,7 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd) } } +#ifdef CONFIG_PM_SLEEP static void sony_nc_thermal_resume(void) { unsigned int status = sony_nc_thermal_mode_get(); @@ -2217,6 +2224,7 @@ static void sony_nc_thermal_resume(void) if (status != th_handle->mode) sony_nc_thermal_mode_set(th_handle->mode); } +#endif /* resume on LID open */ struct snc_lid_resume_control { @@ -4287,6 +4295,7 @@ err_free_resources: return result; } +#ifdef CONFIG_PM_SLEEP static int sony_pic_suspend(struct device *dev) { if (sony_pic_disable(to_acpi_device(dev))) @@ -4300,6 +4309,7 @@ static int sony_pic_resume(struct device *dev) spic_dev.cur_ioport, spic_dev.cur_irq); return 0; } +#endif static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index e7f73287636c..f28f36ccdcf4 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -922,6 +922,7 @@ static struct input_dev *tpacpi_inputdev; static struct mutex tpacpi_inputdev_send_mutex; static LIST_HEAD(tpacpi_all_drivers); +#ifdef CONFIG_PM_SLEEP static int tpacpi_suspend_handler(struct device *dev) { struct ibm_struct *ibm, *itmp; @@ -949,6 +950,7 @@ static int tpacpi_resume_handler(struct device *dev) return 0; } +#endif static SIMPLE_DEV_PM_OPS(tpacpi_pm, tpacpi_suspend_handler, tpacpi_resume_handler); diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index c13ba5bac93f..5f1256d5e933 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -1296,6 +1296,7 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int toshiba_acpi_suspend(struct device *device) { struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device)); @@ -1317,6 +1318,7 @@ static int toshiba_acpi_resume(struct device *device) return 0; } +#endif static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm, toshiba_acpi_suspend, toshiba_acpi_resume); diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c index 715a43cb5e3c..5e5d6317d690 100644 --- a/drivers/platform/x86/toshiba_bluetooth.c +++ b/drivers/platform/x86/toshiba_bluetooth.c @@ -41,7 +41,9 @@ static const struct acpi_device_id bt_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, bt_device_ids); +#ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume); static struct acpi_driver toshiba_bt_rfkill_driver = { @@ -90,10 +92,12 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event) toshiba_bluetooth_enable(device->handle); } +#ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev) { return toshiba_bluetooth_enable(to_acpi_device(dev)->handle); } +#endif static int toshiba_bt_rfkill_add(struct acpi_device *device) { diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c index 849c07c13bf6..38ba39d7ca7d 100644 --- a/drivers/platform/x86/xo15-ebook.c +++ b/drivers/platform/x86/xo15-ebook.c @@ -77,10 +77,12 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int ebook_switch_resume(struct device *dev) { return ebook_send_state(to_acpi_device(dev)); } +#endif static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index eb415bd76494..9592b936b71b 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -582,6 +582,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events) { + pm_stay_awake(rtc->dev.parent); schedule_work(&rtc->irqwork); } EXPORT_SYMBOL_GPL(rtc_update_irq); @@ -844,6 +845,7 @@ void rtc_timer_do_work(struct work_struct *work) mutex_lock(&rtc->ops_lock); again: + pm_relax(rtc->dev.parent); __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); while ((next = timerqueue_getnext(&rtc->timerqueue))) { diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 132333d75408..4267789ca995 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -568,7 +568,6 @@ static irqreturn_t cmos_interrupt(int irq, void *p) hpet_mask_rtc_irq_bit(RTC_AIE); CMOS_READ(RTC_INTR_FLAGS); - pm_wakeup_event(cmos_rtc.dev, 0); } spin_unlock(&rtc_lock); diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 6a6f76bf6e3d..b1032931a1c4 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c @@ -242,11 +242,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) switch (sdias_evbuf.event_status) { case EVSTATE_ALL_STORED: TRACE("all stored\n"); + break; case EVSTATE_PART_STORED: TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); break; case EVSTATE_NO_DATA: TRACE("no data\n"); + /* fall through */ default: pr_err("Error from SCLP while copying hsa. " "Event status = %x\n", diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 2374468615ed..32c26d795ed0 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -324,8 +324,16 @@ int __init register_intc_controller(struct intc_desc *desc) res = irq_create_identity_mapping(d->domain, irq); if (unlikely(res)) { - pr_err("can't get irq_desc for %d\n", irq); - continue; + if (res == -EEXIST) { + res = irq_domain_associate(d->domain, irq, irq); + if (unlikely(res)) { + pr_err("domain association failure\n"); + continue; + } + } else { + pr_err("can't identity map IRQ %d\n", irq); + continue; + } } intc_irq_xlate_set(irq, vect->enum_id, d); @@ -345,8 +353,19 @@ int __init register_intc_controller(struct intc_desc *desc) */ res = irq_create_identity_mapping(d->domain, irq2); if (unlikely(res)) { - pr_err("can't get irq_desc for %d\n", irq2); - continue; + if (res == -EEXIST) { + res = irq_domain_associate(d->domain, + irq, irq); + if (unlikely(res)) { + pr_err("domain association " + "failure\n"); + continue; + } + } else { + pr_err("can't identity map IRQ %d\n", + irq); + continue; + } } vect2->enum_id = 0; diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 646a7657fe62..cfa2c35dfeed 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -826,7 +826,7 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( struct spi_device *spi) { struct s3c64xx_spi_csinfo *cs; - struct device_node *slave_np, *data_np; + struct device_node *slave_np, *data_np = NULL; u32 fb_delay = 0; slave_np = spi->dev.of_node; diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c index e7d75d295988..f8b2d91851f7 100644 --- a/drivers/usb/host/ohci-omap.c +++ b/drivers/usb/host/ohci-omap.c @@ -403,8 +403,6 @@ err0: static inline void usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev) { - struct ohci_hcd *ohci = hcd_to_ohci (hcd); - usb_remove_hcd(hcd); if (!IS_ERR_OR_NULL(hcd->phy)) { (void) otg_set_host(hcd->phy->otg, 0); diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index e4e2fd1b5107..202bba6c997c 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -9,3 +9,6 @@ config VHOST_NET To compile this driver as a module, choose M here: the module will be called vhost_net. +if STAGING +source "drivers/vhost/Kconfig.tcm" +endif diff --git a/drivers/vhost/Kconfig.tcm b/drivers/vhost/Kconfig.tcm new file mode 100644 index 000000000000..a9c6f76e3208 --- /dev/null +++ b/drivers/vhost/Kconfig.tcm @@ -0,0 +1,6 @@ +config TCM_VHOST + tristate "TCM_VHOST fabric module (EXPERIMENTAL)" + depends on TARGET_CORE && EVENTFD && EXPERIMENTAL && m + default n + ---help--- + Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile index 72dd02050bb9..a27b053bc9ab 100644 --- a/drivers/vhost/Makefile +++ b/drivers/vhost/Makefile @@ -1,2 +1,4 @@ obj-$(CONFIG_VHOST_NET) += vhost_net.o vhost_net-y := vhost.o net.o + +obj-$(CONFIG_TCM_VHOST) += tcm_vhost.o diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c new file mode 100644 index 000000000000..fb366540ed54 --- /dev/null +++ b/drivers/vhost/tcm_vhost.c @@ -0,0 +1,1628 @@ +/******************************************************************************* + * Vhost kernel TCM fabric driver for virtio SCSI initiators + * + * (C) Copyright 2010-2012 RisingTide Systems LLC. + * (C) Copyright 2010-2012 IBM Corp. + * + * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * + * Authors: Nicholas A. Bellinger <nab@risingtidesystems.com> + * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + ****************************************************************************/ + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <generated/utsrelease.h> +#include <linux/utsname.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/kthread.h> +#include <linux/types.h> +#include <linux/string.h> +#include <linux/configfs.h> +#include <linux/ctype.h> +#include <linux/compat.h> +#include <linux/eventfd.h> +#include <linux/vhost.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <asm/unaligned.h> +#include <scsi/scsi.h> +#include <scsi/scsi_tcq.h> +#include <target/target_core_base.h> +#include <target/target_core_fabric.h> +#include <target/target_core_fabric_configfs.h> +#include <target/target_core_configfs.h> +#include <target/configfs_macros.h> +#include <linux/vhost.h> +#include <linux/virtio_net.h> /* TODO vhost.h currently depends on this */ +#include <linux/virtio_scsi.h> + +#include "vhost.c" +#include "vhost.h" +#include "tcm_vhost.h" + +struct vhost_scsi { + atomic_t vhost_ref_cnt; + struct tcm_vhost_tpg *vs_tpg; + struct vhost_dev dev; + struct vhost_virtqueue vqs[3]; + + struct vhost_work vs_completion_work; /* cmd completion work item */ + struct list_head vs_completion_list; /* cmd completion queue */ + spinlock_t vs_completion_lock; /* protects s_completion_list */ +}; + +/* Local pointer to allocated TCM configfs fabric module */ +static struct target_fabric_configfs *tcm_vhost_fabric_configfs; + +static struct workqueue_struct *tcm_vhost_workqueue; + +/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ +static DEFINE_MUTEX(tcm_vhost_mutex); +static LIST_HEAD(tcm_vhost_list); + +static int tcm_vhost_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int tcm_vhost_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +static char *tcm_vhost_get_fabric_name(void) +{ + return "vhost"; +} + +static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_fabric_proto_ident(se_tpg); + case SCSI_PROTOCOL_FCP: + return fc_get_fabric_proto_ident(se_tpg); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_fabric_proto_ident(se_tpg); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_fabric_proto_ident(se_tpg); +} + +static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + return &tport->tport_name[0]; +} + +static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + return tpg->tport_tpgt; +} + +static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static u32 tcm_vhost_get_pr_transport_id( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); +} + +static u32 tcm_vhost_get_pr_transport_id_len( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); +} + +static char *tcm_vhost_parse_pr_out_transport_id( + struct se_portal_group *se_tpg, + const char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + case SCSI_PROTOCOL_FCP: + return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + case SCSI_PROTOCOL_ISCSI: + return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); +} + +static struct se_node_acl *tcm_vhost_alloc_fabric_acl( + struct se_portal_group *se_tpg) +{ + struct tcm_vhost_nacl *nacl; + + nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); + if (!nacl) { + pr_err("Unable to alocate struct tcm_vhost_nacl\n"); + return NULL; + } + + return &nacl->se_node_acl; +} + +static void tcm_vhost_release_fabric_acl( + struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl) +{ + struct tcm_vhost_nacl *nacl = container_of(se_nacl, + struct tcm_vhost_nacl, se_node_acl); + kfree(nacl); +} + +static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) +{ + return; +} + +static int tcm_vhost_shutdown_session(struct se_session *se_sess) +{ + return 0; +} + +static void tcm_vhost_close_session(struct se_session *se_sess) +{ + return; +} + +static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static int tcm_vhost_write_pending(struct se_cmd *se_cmd) +{ + /* Go ahead and process the write immediately */ + target_execute_cmd(se_cmd); + return 0; +} + +static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) +{ + return 0; +} + +static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) +{ + return; +} + +static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) +{ + return 0; +} + +static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *); + +static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) +{ + struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, + struct tcm_vhost_cmd, tvc_se_cmd); + vhost_scsi_complete_cmd(tv_cmd); + return 0; +} + +static int tcm_vhost_queue_status(struct se_cmd *se_cmd) +{ + struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, + struct tcm_vhost_cmd, tvc_se_cmd); + vhost_scsi_complete_cmd(tv_cmd); + return 0; +} + +static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) +{ + return 0; +} + +static u16 tcm_vhost_set_fabric_sense_len(struct se_cmd *se_cmd, + u32 sense_length) +{ + return 0; +} + +static u16 tcm_vhost_get_fabric_sense_len(void) +{ + return 0; +} + +static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) +{ + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + + /* TODO locking against target/backend threads? */ + transport_generic_free_cmd(se_cmd, 1); + + if (tv_cmd->tvc_sgl_count) { + u32 i; + for (i = 0; i < tv_cmd->tvc_sgl_count; i++) + put_page(sg_page(&tv_cmd->tvc_sgl[i])); + + kfree(tv_cmd->tvc_sgl); + } + + kfree(tv_cmd); +} + +/* Dequeue a command from the completion list */ +static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion( + struct vhost_scsi *vs) +{ + struct tcm_vhost_cmd *tv_cmd = NULL; + + spin_lock_bh(&vs->vs_completion_lock); + if (list_empty(&vs->vs_completion_list)) { + spin_unlock_bh(&vs->vs_completion_lock); + return NULL; + } + + list_for_each_entry(tv_cmd, &vs->vs_completion_list, + tvc_completion_list) { + list_del(&tv_cmd->tvc_completion_list); + break; + } + spin_unlock_bh(&vs->vs_completion_lock); + return tv_cmd; +} + +/* Fill in status and signal that we are done processing this command + * + * This is scheduled in the vhost work queue so we are called with the owner + * process mm and can access the vring. + */ +static void vhost_scsi_complete_cmd_work(struct vhost_work *work) +{ + struct vhost_scsi *vs = container_of(work, struct vhost_scsi, + vs_completion_work); + struct tcm_vhost_cmd *tv_cmd; + + while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs)) != NULL) { + struct virtio_scsi_cmd_resp v_rsp; + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + int ret; + + pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, + tv_cmd, se_cmd->residual_count, se_cmd->scsi_status); + + memset(&v_rsp, 0, sizeof(v_rsp)); + v_rsp.resid = se_cmd->residual_count; + /* TODO is status_qualifier field needed? */ + v_rsp.status = se_cmd->scsi_status; + v_rsp.sense_len = se_cmd->scsi_sense_length; + memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf, + v_rsp.sense_len); + ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); + if (likely(ret == 0)) + vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0); + else + pr_err("Faulted on virtio_scsi_cmd_resp\n"); + + vhost_scsi_free_cmd(tv_cmd); + } + + vhost_signal(&vs->dev, &vs->vqs[2]); +} + +static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) +{ + struct vhost_scsi *vs = tv_cmd->tvc_vhost; + + pr_debug("%s tv_cmd %p\n", __func__, tv_cmd); + + spin_lock_bh(&vs->vs_completion_lock); + list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); + spin_unlock_bh(&vs->vs_completion_lock); + + vhost_work_queue(&vs->dev, &vs->vs_completion_work); +} + +static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( + struct tcm_vhost_tpg *tv_tpg, + struct virtio_scsi_cmd_req *v_req, + u32 exp_data_len, + int data_direction) +{ + struct tcm_vhost_cmd *tv_cmd; + struct tcm_vhost_nexus *tv_nexus; + struct se_portal_group *se_tpg = &tv_tpg->se_tpg; + struct se_session *se_sess; + struct se_cmd *se_cmd; + int sam_task_attr; + + tv_nexus = tv_tpg->tpg_nexus; + if (!tv_nexus) { + pr_err("Unable to locate active struct tcm_vhost_nexus\n"); + return ERR_PTR(-EIO); + } + se_sess = tv_nexus->tvn_se_sess; + + tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); + if (!tv_cmd) { + pr_err("Unable to allocate struct tcm_vhost_cmd\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&tv_cmd->tvc_completion_list); + tv_cmd->tvc_tag = v_req->tag; + + se_cmd = &tv_cmd->tvc_se_cmd; + /* + * Locate the SAM Task Attr from virtio_scsi_cmd_req + */ + sam_task_attr = v_req->task_attr; + /* + * Initialize struct se_cmd descriptor from TCM infrastructure + */ + transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, exp_data_len, + data_direction, sam_task_attr, + &tv_cmd->tvc_sense_buf[0]); + +#if 0 /* FIXME: vhost_scsi_allocate_cmd() BIDI operation */ + if (bidi) + se_cmd->se_cmd_flags |= SCF_BIDI; +#endif + return tv_cmd; +} + +/* + * Map a user memory range into a scatterlist + * + * Returns the number of scatterlist entries used or -errno on error. + */ +static int vhost_scsi_map_to_sgl(struct scatterlist *sgl, + unsigned int sgl_count, void __user *ptr, size_t len, int write) +{ + struct scatterlist *sg = sgl; + unsigned int npages = 0; + int ret; + + while (len > 0) { + struct page *page; + unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK; + unsigned int nbytes = min_t(unsigned int, + PAGE_SIZE - offset, len); + + if (npages == sgl_count) { + ret = -ENOBUFS; + goto err; + } + + ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page); + BUG_ON(ret == 0); /* we should either get our page or fail */ + if (ret < 0) + goto err; + + sg_set_page(sg, page, nbytes, offset); + ptr += nbytes; + len -= nbytes; + sg++; + npages++; + } + return npages; + +err: + /* Put pages that we hold */ + for (sg = sgl; sg != &sgl[npages]; sg++) + put_page(sg_page(sg)); + return ret; +} + +static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd, + struct iovec *iov, unsigned int niov, int write) +{ + int ret; + unsigned int i; + u32 sgl_count; + struct scatterlist *sg; + + /* + * Find out how long sglist needs to be + */ + sgl_count = 0; + for (i = 0; i < niov; i++) { + sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len + + PAGE_SIZE - 1) >> PAGE_SHIFT) - + ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT); + } + /* TODO overflow checking */ + + sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); + if (!sg) + return -ENOMEM; + pr_debug("%s sg %p sgl_count %u is_err %ld\n", __func__, + sg, sgl_count, IS_ERR(sg)); + sg_init_table(sg, sgl_count); + + tv_cmd->tvc_sgl = sg; + tv_cmd->tvc_sgl_count = sgl_count; + + pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); + for (i = 0; i < niov; i++) { + ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base, + iov[i].iov_len, write); + if (ret < 0) { + for (i = 0; i < tv_cmd->tvc_sgl_count; i++) + put_page(sg_page(&tv_cmd->tvc_sgl[i])); + kfree(tv_cmd->tvc_sgl); + tv_cmd->tvc_sgl = NULL; + tv_cmd->tvc_sgl_count = 0; + return ret; + } + + sg += ret; + sgl_count -= ret; + } + return 0; +} + +static void tcm_vhost_submission_work(struct work_struct *work) +{ + struct tcm_vhost_cmd *tv_cmd = + container_of(work, struct tcm_vhost_cmd, work); + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; + int rc, sg_no_bidi = 0; + /* + * Locate the struct se_lun pointer based on v_req->lun, and + * attach it to struct se_cmd + */ + rc = transport_lookup_cmd_lun(&tv_cmd->tvc_se_cmd, tv_cmd->tvc_lun); + if (rc < 0) { + pr_err("Failed to look up lun: %d\n", tv_cmd->tvc_lun); + transport_send_check_condition_and_sense(&tv_cmd->tvc_se_cmd, + tv_cmd->tvc_se_cmd.scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + + rc = target_setup_cmd_from_cdb(se_cmd, tv_cmd->tvc_cdb); + if (rc == -ENOMEM) { + transport_send_check_condition_and_sense(se_cmd, + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } else if (rc < 0) { + if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) + tcm_vhost_queue_status(se_cmd); + else + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + + if (tv_cmd->tvc_sgl_count) { + sg_ptr = tv_cmd->tvc_sgl; + /* + * For BIDI commands, pass in the extra READ buffer + * to transport_generic_map_mem_to_cmd() below.. + */ +/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ +#if 0 + if (se_cmd->se_cmd_flags & SCF_BIDI) { + sg_bidi_ptr = NULL; + sg_no_bidi = 0; + } +#endif + } else { + sg_ptr = NULL; + } + + rc = transport_generic_map_mem_to_cmd(se_cmd, sg_ptr, + tv_cmd->tvc_sgl_count, sg_bidi_ptr, + sg_no_bidi); + if (rc < 0) { + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + transport_handle_cdb_direct(se_cmd); +} + +static void vhost_scsi_handle_vq(struct vhost_scsi *vs) +{ + struct vhost_virtqueue *vq = &vs->vqs[2]; + struct virtio_scsi_cmd_req v_req; + struct tcm_vhost_tpg *tv_tpg; + struct tcm_vhost_cmd *tv_cmd; + u32 exp_data_len, data_first, data_num, data_direction; + unsigned out, in, i; + int head, ret; + + /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ + tv_tpg = vs->vs_tpg; + if (unlikely(!tv_tpg)) { + pr_err("%s endpoint not set\n", __func__); + return; + } + + mutex_lock(&vq->mutex); + vhost_disable_notify(&vs->dev, vq); + + for (;;) { + head = vhost_get_vq_desc(&vs->dev, vq, vq->iov, + ARRAY_SIZE(vq->iov), &out, &in, + NULL, NULL); + pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", + head, out, in); + /* On error, stop handling until the next kick. */ + if (unlikely(head < 0)) + break; + /* Nothing new? Wait for eventfd to tell us they refilled. */ + if (head == vq->num) { + if (unlikely(vhost_enable_notify(&vs->dev, vq))) { + vhost_disable_notify(&vs->dev, vq); + continue; + } + break; + } + +/* FIXME: BIDI operation */ + if (out == 1 && in == 1) { + data_direction = DMA_NONE; + data_first = 0; + data_num = 0; + } else if (out == 1 && in > 1) { + data_direction = DMA_FROM_DEVICE; + data_first = out + 1; + data_num = in - 1; + } else if (out > 1 && in == 1) { + data_direction = DMA_TO_DEVICE; + data_first = 1; + data_num = out - 1; + } else { + vq_err(vq, "Invalid buffer layout out: %u in: %u\n", + out, in); + break; + } + + /* + * Check for a sane resp buffer so we can report errors to + * the guest. + */ + if (unlikely(vq->iov[out].iov_len != + sizeof(struct virtio_scsi_cmd_resp))) { + vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" + " bytes\n", vq->iov[out].iov_len); + break; + } + + if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) { + vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu" + " bytes\n", vq->iov[0].iov_len); + break; + } + pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p," + " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req)); + ret = __copy_from_user(&v_req, vq->iov[0].iov_base, + sizeof(v_req)); + if (unlikely(ret)) { + vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); + break; + } + + exp_data_len = 0; + for (i = 0; i < data_num; i++) + exp_data_len += vq->iov[data_first + i].iov_len; + + tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req, + exp_data_len, data_direction); + if (IS_ERR(tv_cmd)) { + vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", + PTR_ERR(tv_cmd)); + break; + } + pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" + ": %d\n", tv_cmd, exp_data_len, data_direction); + + tv_cmd->tvc_vhost = vs; + + if (unlikely(vq->iov[out].iov_len != + sizeof(struct virtio_scsi_cmd_resp))) { + vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" + " bytes, out: %d, in: %d\n", + vq->iov[out].iov_len, out, in); + break; + } + + tv_cmd->tvc_resp = vq->iov[out].iov_base; + + /* + * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb + * that will be used by tcm_vhost_new_cmd_map() and down into + * target_setup_cmd_from_cdb() + */ + memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE); + /* + * Check that the recieved CDB size does not exceeded our + * hardcoded max for tcm_vhost + */ + /* TODO what if cdb was too small for varlen cdb header? */ + if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) > + TCM_VHOST_MAX_CDB_SIZE)) { + vq_err(vq, "Received SCSI CDB with command_size: %d that" + " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", + scsi_command_size(tv_cmd->tvc_cdb), + TCM_VHOST_MAX_CDB_SIZE); + break; /* TODO */ + } + tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; + + pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", + tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun); + + if (data_direction != DMA_NONE) { + ret = vhost_scsi_map_iov_to_sgl(tv_cmd, + &vq->iov[data_first], data_num, + data_direction == DMA_TO_DEVICE); + if (unlikely(ret)) { + vq_err(vq, "Failed to map iov to sgl\n"); + break; /* TODO */ + } + } + + /* + * Save the descriptor from vhost_get_vq_desc() to be used to + * complete the virtio-scsi request in TCM callback context via + * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() + */ + tv_cmd->tvc_vq_desc = head; + /* + * Dispatch tv_cmd descriptor for cmwq execution in process + * context provided by tcm_vhost_workqueue. This also ensures + * tv_cmd is executed on the same kworker CPU as this vhost + * thread to gain positive L2 cache locality effects.. + */ + INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work); + queue_work(tcm_vhost_workqueue, &tv_cmd->work); + } + + mutex_unlock(&vq->mutex); +} + +static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) +{ + pr_err("%s: The handling func for control queue.\n", __func__); +} + +static void vhost_scsi_evt_handle_kick(struct vhost_work *work) +{ + pr_err("%s: The handling func for event queue.\n", __func__); +} + +static void vhost_scsi_handle_kick(struct vhost_work *work) +{ + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, + poll.work); + struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); + + vhost_scsi_handle_vq(vs); +} + +/* + * Called from vhost_scsi_ioctl() context to walk the list of available + * tcm_vhost_tpg with an active struct tcm_vhost_nexus + */ +static int vhost_scsi_set_endpoint( + struct vhost_scsi *vs, + struct vhost_scsi_target *t) +{ + struct tcm_vhost_tport *tv_tport; + struct tcm_vhost_tpg *tv_tpg; + int index; + + mutex_lock(&vs->dev.mutex); + /* Verify that ring has been setup correctly. */ + for (index = 0; index < vs->dev.nvqs; ++index) { + /* Verify that ring has been setup correctly. */ + if (!vhost_vq_access_ok(&vs->vqs[index])) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + } + + if (vs->vs_tpg) { + mutex_unlock(&vs->dev.mutex); + return -EEXIST; + } + mutex_unlock(&vs->dev.mutex); + + mutex_lock(&tcm_vhost_mutex); + list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { + mutex_lock(&tv_tpg->tv_tpg_mutex); + if (!tv_tpg->tpg_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + continue; + } + if (atomic_read(&tv_tpg->tv_tpg_vhost_count)) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + continue; + } + tv_tport = tv_tpg->tport; + + if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) && + (tv_tpg->tport_tpgt == t->vhost_tpgt)) { + atomic_inc(&tv_tpg->tv_tpg_vhost_count); + smp_mb__after_atomic_inc(); + mutex_unlock(&tv_tpg->tv_tpg_mutex); + mutex_unlock(&tcm_vhost_mutex); + + mutex_lock(&vs->dev.mutex); + vs->vs_tpg = tv_tpg; + atomic_inc(&vs->vhost_ref_cnt); + smp_mb__after_atomic_inc(); + mutex_unlock(&vs->dev.mutex); + return 0; + } + mutex_unlock(&tv_tpg->tv_tpg_mutex); + } + mutex_unlock(&tcm_vhost_mutex); + return -EINVAL; +} + +static int vhost_scsi_clear_endpoint( + struct vhost_scsi *vs, + struct vhost_scsi_target *t) +{ + struct tcm_vhost_tport *tv_tport; + struct tcm_vhost_tpg *tv_tpg; + int index; + + mutex_lock(&vs->dev.mutex); + /* Verify that ring has been setup correctly. */ + for (index = 0; index < vs->dev.nvqs; ++index) { + if (!vhost_vq_access_ok(&vs->vqs[index])) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + } + + if (!vs->vs_tpg) { + mutex_unlock(&vs->dev.mutex); + return -ENODEV; + } + tv_tpg = vs->vs_tpg; + tv_tport = tv_tpg->tport; + + if (strcmp(tv_tport->tport_name, t->vhost_wwpn) || + (tv_tpg->tport_tpgt != t->vhost_tpgt)) { + mutex_unlock(&vs->dev.mutex); + pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu" + " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", + tv_tport->tport_name, tv_tpg->tport_tpgt, + t->vhost_wwpn, t->vhost_tpgt); + return -EINVAL; + } + atomic_dec(&tv_tpg->tv_tpg_vhost_count); + vs->vs_tpg = NULL; + mutex_unlock(&vs->dev.mutex); + + return 0; +} + +static int vhost_scsi_open(struct inode *inode, struct file *f) +{ + struct vhost_scsi *s; + int r; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); + INIT_LIST_HEAD(&s->vs_completion_list); + spin_lock_init(&s->vs_completion_lock); + + s->vqs[0].handle_kick = vhost_scsi_ctl_handle_kick; + s->vqs[1].handle_kick = vhost_scsi_evt_handle_kick; + s->vqs[2].handle_kick = vhost_scsi_handle_kick; + r = vhost_dev_init(&s->dev, s->vqs, 3); + if (r < 0) { + kfree(s); + return r; + } + + f->private_data = s; + return 0; +} + +static int vhost_scsi_release(struct inode *inode, struct file *f) +{ + struct vhost_scsi *s = f->private_data; + + if (s->vs_tpg && s->vs_tpg->tport) { + struct vhost_scsi_target backend; + + memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name, + sizeof(backend.vhost_wwpn)); + backend.vhost_tpgt = s->vs_tpg->tport_tpgt; + vhost_scsi_clear_endpoint(s, &backend); + } + + vhost_dev_cleanup(&s->dev, false); + kfree(s); + return 0; +} + +static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) +{ + if (features & ~VHOST_FEATURES) + return -EOPNOTSUPP; + + mutex_lock(&vs->dev.mutex); + if ((features & (1 << VHOST_F_LOG_ALL)) && + !vhost_log_access_ok(&vs->dev)) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + vs->dev.acked_features = features; + /* TODO possibly smp_wmb() and flush vqs */ + mutex_unlock(&vs->dev.mutex); + return 0; +} + +static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct vhost_scsi *vs = f->private_data; + struct vhost_scsi_target backend; + void __user *argp = (void __user *)arg; + u64 __user *featurep = argp; + u64 features; + int r; + + switch (ioctl) { + case VHOST_SCSI_SET_ENDPOINT: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + + return vhost_scsi_set_endpoint(vs, &backend); + case VHOST_SCSI_CLEAR_ENDPOINT: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + + return vhost_scsi_clear_endpoint(vs, &backend); + case VHOST_SCSI_GET_ABI_VERSION: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + + backend.abi_version = VHOST_SCSI_ABI_VERSION; + + if (copy_to_user(argp, &backend, sizeof backend)) + return -EFAULT; + return 0; + case VHOST_GET_FEATURES: + features = VHOST_FEATURES; + if (copy_to_user(featurep, &features, sizeof features)) + return -EFAULT; + return 0; + case VHOST_SET_FEATURES: + if (copy_from_user(&features, featurep, sizeof features)) + return -EFAULT; + return vhost_scsi_set_features(vs, features); + default: + mutex_lock(&vs->dev.mutex); + r = vhost_dev_ioctl(&vs->dev, ioctl, arg); + mutex_unlock(&vs->dev.mutex); + return r; + } +} + +static const struct file_operations vhost_scsi_fops = { + .owner = THIS_MODULE, + .release = vhost_scsi_release, + .unlocked_ioctl = vhost_scsi_ioctl, + /* TODO compat ioctl? */ + .open = vhost_scsi_open, + .llseek = noop_llseek, +}; + +static struct miscdevice vhost_scsi_misc = { + MISC_DYNAMIC_MINOR, + "vhost-scsi", + &vhost_scsi_fops, +}; + +static int __init vhost_scsi_register(void) +{ + return misc_register(&vhost_scsi_misc); +} + +static int vhost_scsi_deregister(void) +{ + return misc_deregister(&vhost_scsi_misc); +} + +static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) +{ + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return "SAS"; + case SCSI_PROTOCOL_FCP: + return "FCP"; + case SCSI_PROTOCOL_ISCSI: + return "iSCSI"; + default: + break; + } + + return "Unknown"; +} + +static int tcm_vhost_port_link( + struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + atomic_inc(&tv_tpg->tv_tpg_port_count); + smp_mb__after_atomic_inc(); + + return 0; +} + +static void tcm_vhost_port_unlink( + struct se_portal_group *se_tpg, + struct se_lun *se_lun) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + atomic_dec(&tv_tpg->tv_tpg_port_count); + smp_mb__after_atomic_dec(); +} + +static struct se_node_acl *tcm_vhost_make_nodeacl( + struct se_portal_group *se_tpg, + struct config_group *group, + const char *name) +{ + struct se_node_acl *se_nacl, *se_nacl_new; + struct tcm_vhost_nacl *nacl; + u64 wwpn = 0; + u32 nexus_depth; + + /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); */ + se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); + if (!se_nacl_new) + return ERR_PTR(-ENOMEM); + + nexus_depth = 1; + /* + * se_nacl_new may be released by core_tpg_add_initiator_node_acl() + * when converting a NodeACL from demo mode -> explict + */ + se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, + name, nexus_depth); + if (IS_ERR(se_nacl)) { + tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); + return se_nacl; + } + /* + * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN + */ + nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); + nacl->iport_wwpn = wwpn; + + return se_nacl; +} + +static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) +{ + struct tcm_vhost_nacl *nacl = container_of(se_acl, + struct tcm_vhost_nacl, se_node_acl); + core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); + kfree(nacl); +} + +static int tcm_vhost_make_nexus( + struct tcm_vhost_tpg *tv_tpg, + const char *name) +{ + struct se_portal_group *se_tpg; + struct tcm_vhost_nexus *tv_nexus; + + mutex_lock(&tv_tpg->tv_tpg_mutex); + if (tv_tpg->tpg_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_debug("tv_tpg->tpg_nexus already exists\n"); + return -EEXIST; + } + se_tpg = &tv_tpg->se_tpg; + + tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); + if (!tv_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_err("Unable to allocate struct tcm_vhost_nexus\n"); + return -ENOMEM; + } + /* + * Initialize the struct se_session pointer + */ + tv_nexus->tvn_se_sess = transport_init_session(); + if (IS_ERR(tv_nexus->tvn_se_sess)) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + kfree(tv_nexus); + return -ENOMEM; + } + /* + * Since we are running in 'demo mode' this call with generate a + * struct se_node_acl for the tcm_vhost struct se_portal_group with + * the SCSI Initiator port name of the passed configfs group 'name'. + */ + tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( + se_tpg, (unsigned char *)name); + if (!tv_nexus->tvn_se_sess->se_node_acl) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_debug("core_tpg_check_initiator_node_acl() failed" + " for %s\n", name); + transport_free_session(tv_nexus->tvn_se_sess); + kfree(tv_nexus); + return -ENOMEM; + } + /* + * Now register the TCM vHost virtual I_T Nexus as active with the + * call to __transport_register_session() + */ + __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, + tv_nexus->tvn_se_sess, tv_nexus); + tv_tpg->tpg_nexus = tv_nexus; + + mutex_unlock(&tv_tpg->tv_tpg_mutex); + return 0; +} + +static int tcm_vhost_drop_nexus( + struct tcm_vhost_tpg *tpg) +{ + struct se_session *se_sess; + struct tcm_vhost_nexus *tv_nexus; + + mutex_lock(&tpg->tv_tpg_mutex); + tv_nexus = tpg->tpg_nexus; + if (!tv_nexus) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + + se_sess = tv_nexus->tvn_se_sess; + if (!se_sess) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + + if (atomic_read(&tpg->tv_tpg_port_count)) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to remove TCM_vHost I_T Nexus with" + " active TPG port count: %d\n", + atomic_read(&tpg->tv_tpg_port_count)); + return -EPERM; + } + + if (atomic_read(&tpg->tv_tpg_vhost_count)) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to remove TCM_vHost I_T Nexus with" + " active TPG vhost count: %d\n", + atomic_read(&tpg->tv_tpg_vhost_count)); + return -EPERM; + } + + pr_debug("TCM_vHost_ConfigFS: Removing I_T Nexus to emulated" + " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), + tv_nexus->tvn_se_sess->se_node_acl->initiatorname); + /* + * Release the SCSI I_T Nexus to the emulated vHost Target Port + */ + transport_deregister_session(tv_nexus->tvn_se_sess); + tpg->tpg_nexus = NULL; + mutex_unlock(&tpg->tv_tpg_mutex); + + kfree(tv_nexus); + return 0; +} + +static ssize_t tcm_vhost_tpg_show_nexus( + struct se_portal_group *se_tpg, + char *page) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_nexus *tv_nexus; + ssize_t ret; + + mutex_lock(&tv_tpg->tv_tpg_mutex); + tv_nexus = tv_tpg->tpg_nexus; + if (!tv_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + return -ENODEV; + } + ret = snprintf(page, PAGE_SIZE, "%s\n", + tv_nexus->tvn_se_sess->se_node_acl->initiatorname); + mutex_unlock(&tv_tpg->tv_tpg_mutex); + + return ret; +} + +static ssize_t tcm_vhost_tpg_store_nexus( + struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport_wwn = tv_tpg->tport; + unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; + int ret; + /* + * Shutdown the active I_T nexus if 'NULL' is passed.. + */ + if (!strncmp(page, "NULL", 4)) { + ret = tcm_vhost_drop_nexus(tv_tpg); + return (!ret) ? count : ret; + } + /* + * Otherwise make sure the passed virtual Initiator port WWN matches + * the fabric protocol_id set in tcm_vhost_make_tport(), and call + * tcm_vhost_make_nexus(). + */ + if (strlen(page) >= TCM_VHOST_NAMELEN) { + pr_err("Emulated NAA Sas Address: %s, exceeds" + " max: %d\n", page, TCM_VHOST_NAMELEN); + return -EINVAL; + } + snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); + + ptr = strstr(i_port, "naa."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { + pr_err("Passed SAS Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + ptr = strstr(i_port, "fc."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { + pr_err("Passed FCP Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[3]; /* Skip over "fc." */ + goto check_newline; + } + ptr = strstr(i_port, "iqn."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { + pr_err("Passed iSCSI Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + pr_err("Unable to locate prefix for emulated Initiator Port:" + " %s\n", i_port); + return -EINVAL; + /* + * Clear any trailing newline for the NAA WWN + */ +check_newline: + if (i_port[strlen(i_port)-1] == '\n') + i_port[strlen(i_port)-1] = '\0'; + + ret = tcm_vhost_make_nexus(tv_tpg, port_ptr); + if (ret < 0) + return ret; + + return count; +} + +TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { + &tcm_vhost_tpg_nexus.attr, + NULL, +}; + +static struct se_portal_group *tcm_vhost_make_tpg( + struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct tcm_vhost_tport *tport = container_of(wwn, + struct tcm_vhost_tport, tport_wwn); + + struct tcm_vhost_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + return ERR_PTR(-EINVAL); + + tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct tcm_vhost_tpg"); + return ERR_PTR(-ENOMEM); + } + mutex_init(&tpg->tv_tpg_mutex); + INIT_LIST_HEAD(&tpg->tv_tpg_list); + tpg->tport = tport; + tpg->tport_tpgt = tpgt; + + ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, + &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); + if (ret < 0) { + kfree(tpg); + return NULL; + } + mutex_lock(&tcm_vhost_mutex); + list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); + mutex_unlock(&tcm_vhost_mutex); + + return &tpg->se_tpg; +} + +static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + mutex_lock(&tcm_vhost_mutex); + list_del(&tpg->tv_tpg_list); + mutex_unlock(&tcm_vhost_mutex); + /* + * Release the virtual I_T Nexus for this vHost TPG + */ + tcm_vhost_drop_nexus(tpg); + /* + * Deregister the se_tpg from TCM.. + */ + core_tpg_deregister(se_tpg); + kfree(tpg); +} + +static struct se_wwn *tcm_vhost_make_tport( + struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_vhost_tport *tport; + char *ptr; + u64 wwpn = 0; + int off = 0; + + /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); */ + + tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); + if (!tport) { + pr_err("Unable to allocate struct tcm_vhost_tport"); + return ERR_PTR(-ENOMEM); + } + tport->tport_wwpn = wwpn; + /* + * Determine the emulated Protocol Identifier and Target Port Name + * based on the incoming configfs directory name. + */ + ptr = strstr(name, "naa."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_SAS; + goto check_len; + } + ptr = strstr(name, "fc."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_FCP; + off = 3; /* Skip over "fc." */ + goto check_len; + } + ptr = strstr(name, "iqn."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; + goto check_len; + } + + pr_err("Unable to locate prefix for emulated Target Port:" + " %s\n", name); + kfree(tport); + return ERR_PTR(-EINVAL); + +check_len: + if (strlen(name) >= TCM_VHOST_NAMELEN) { + pr_err("Emulated %s Address: %s, exceeds" + " max: %d\n", name, tcm_vhost_dump_proto_id(tport), + TCM_VHOST_NAMELEN); + kfree(tport); + return ERR_PTR(-EINVAL); + } + snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); + + pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" + " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); + + return &tport->tport_wwn; +} + +static void tcm_vhost_drop_tport(struct se_wwn *wwn) +{ + struct tcm_vhost_tport *tport = container_of(wwn, + struct tcm_vhost_tport, tport_wwn); + + pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" + " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), + tport->tport_name); + + kfree(tport); +} + +static ssize_t tcm_vhost_wwn_show_attr_version( + struct target_fabric_configfs *tf, + char *page) +{ + return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" + "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, + utsname()->machine); +} + +TF_WWN_ATTR_RO(tcm_vhost, version); + +static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { + &tcm_vhost_wwn_version.attr, + NULL, +}; + +static struct target_core_fabric_ops tcm_vhost_ops = { + .get_fabric_name = tcm_vhost_get_fabric_name, + .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, + .tpg_get_wwn = tcm_vhost_get_fabric_wwn, + .tpg_get_tag = tcm_vhost_get_tag, + .tpg_get_default_depth = tcm_vhost_get_default_depth, + .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, + .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, + .tpg_check_demo_mode = tcm_vhost_check_true, + .tpg_check_demo_mode_cache = tcm_vhost_check_true, + .tpg_check_demo_mode_write_protect = tcm_vhost_check_false, + .tpg_check_prod_mode_write_protect = tcm_vhost_check_false, + .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, + .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, + .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, + .release_cmd = tcm_vhost_release_cmd, + .shutdown_session = tcm_vhost_shutdown_session, + .close_session = tcm_vhost_close_session, + .sess_get_index = tcm_vhost_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = tcm_vhost_write_pending, + .write_pending_status = tcm_vhost_write_pending_status, + .set_default_node_attributes = tcm_vhost_set_default_node_attrs, + .get_task_tag = tcm_vhost_get_task_tag, + .get_cmd_state = tcm_vhost_get_cmd_state, + .queue_data_in = tcm_vhost_queue_data_in, + .queue_status = tcm_vhost_queue_status, + .queue_tm_rsp = tcm_vhost_queue_tm_rsp, + .get_fabric_sense_len = tcm_vhost_get_fabric_sense_len, + .set_fabric_sense_len = tcm_vhost_set_fabric_sense_len, + /* + * Setup callers for generic logic in target_core_fabric_configfs.c + */ + .fabric_make_wwn = tcm_vhost_make_tport, + .fabric_drop_wwn = tcm_vhost_drop_tport, + .fabric_make_tpg = tcm_vhost_make_tpg, + .fabric_drop_tpg = tcm_vhost_drop_tpg, + .fabric_post_link = tcm_vhost_port_link, + .fabric_pre_unlink = tcm_vhost_port_unlink, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_make_nodeacl = tcm_vhost_make_nodeacl, + .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, +}; + +static int tcm_vhost_register_configfs(void) +{ + struct target_fabric_configfs *fabric; + int ret; + + pr_debug("TCM_VHOST fabric module %s on %s/%s" + " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, + utsname()->machine); + /* + * Register the top level struct config_item_type with TCM core + */ + fabric = target_fabric_configfs_init(THIS_MODULE, "vhost"); + if (IS_ERR(fabric)) { + pr_err("target_fabric_configfs_init() failed\n"); + return PTR_ERR(fabric); + } + /* + * Setup fabric->tf_ops from our local tcm_vhost_ops + */ + fabric->tf_ops = tcm_vhost_ops; + /* + * Setup default attribute lists for various fabric->tf_cit_tmpl + */ + TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + /* + * Register the fabric for use within TCM + */ + ret = target_fabric_configfs_register(fabric); + if (ret < 0) { + pr_err("target_fabric_configfs_register() failed" + " for TCM_VHOST\n"); + return ret; + } + /* + * Setup our local pointer to *fabric + */ + tcm_vhost_fabric_configfs = fabric; + pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); + return 0; +}; + +static void tcm_vhost_deregister_configfs(void) +{ + if (!tcm_vhost_fabric_configfs) + return; + + target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); + tcm_vhost_fabric_configfs = NULL; + pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); +}; + +static int __init tcm_vhost_init(void) +{ + int ret = -ENOMEM; + + tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); + if (!tcm_vhost_workqueue) + goto out; + + ret = vhost_scsi_register(); + if (ret < 0) + goto out_destroy_workqueue; + + ret = tcm_vhost_register_configfs(); + if (ret < 0) + goto out_vhost_scsi_deregister; + + return 0; + +out_vhost_scsi_deregister: + vhost_scsi_deregister(); +out_destroy_workqueue: + destroy_workqueue(tcm_vhost_workqueue); +out: + return ret; +}; + +static void tcm_vhost_exit(void) +{ + tcm_vhost_deregister_configfs(); + vhost_scsi_deregister(); + destroy_workqueue(tcm_vhost_workqueue); +}; + +MODULE_DESCRIPTION("TCM_VHOST series fabric driver"); +MODULE_LICENSE("GPL"); +module_init(tcm_vhost_init); +module_exit(tcm_vhost_exit); diff --git a/drivers/vhost/tcm_vhost.h b/drivers/vhost/tcm_vhost.h new file mode 100644 index 000000000000..c983ed21e413 --- /dev/null +++ b/drivers/vhost/tcm_vhost.h @@ -0,0 +1,101 @@ +#define TCM_VHOST_VERSION "v0.1" +#define TCM_VHOST_NAMELEN 256 +#define TCM_VHOST_MAX_CDB_SIZE 32 + +struct tcm_vhost_cmd { + /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ + int tvc_vq_desc; + /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ + u64 tvc_tag; + /* The number of scatterlists associated with this cmd */ + u32 tvc_sgl_count; + /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ + u32 tvc_lun; + /* Pointer to the SGL formatted memory from virtio-scsi */ + struct scatterlist *tvc_sgl; + /* Pointer to response */ + struct virtio_scsi_cmd_resp __user *tvc_resp; + /* Pointer to vhost_scsi for our device */ + struct vhost_scsi *tvc_vhost; + /* The TCM I/O descriptor that is accessed via container_of() */ + struct se_cmd tvc_se_cmd; + /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ + struct work_struct work; + /* Copy of the incoming SCSI command descriptor block (CDB) */ + unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; + /* Sense buffer that will be mapped into outgoing status */ + unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; + /* Completed commands list, serviced from vhost worker thread */ + struct list_head tvc_completion_list; +}; + +struct tcm_vhost_nexus { + /* Pointer to TCM session for I_T Nexus */ + struct se_session *tvn_se_sess; +}; + +struct tcm_vhost_nacl { + /* Binary World Wide unique Port Name for Vhost Initiator port */ + u64 iport_wwpn; + /* ASCII formatted WWPN for Sas Initiator port */ + char iport_name[TCM_VHOST_NAMELEN]; + /* Returned by tcm_vhost_make_nodeacl() */ + struct se_node_acl se_node_acl; +}; + +struct tcm_vhost_tpg { + /* Vhost port target portal group tag for TCM */ + u16 tport_tpgt; + /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ + atomic_t tv_tpg_port_count; + /* Used for vhost_scsi device reference to tpg_nexus */ + atomic_t tv_tpg_vhost_count; + /* list for tcm_vhost_list */ + struct list_head tv_tpg_list; + /* Used to protect access for tpg_nexus */ + struct mutex tv_tpg_mutex; + /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ + struct tcm_vhost_nexus *tpg_nexus; + /* Pointer back to tcm_vhost_tport */ + struct tcm_vhost_tport *tport; + /* Returned by tcm_vhost_make_tpg() */ + struct se_portal_group se_tpg; +}; + +struct tcm_vhost_tport { + /* SCSI protocol the tport is providing */ + u8 tport_proto_id; + /* Binary World Wide unique Port Name for Vhost Target port */ + u64 tport_wwpn; + /* ASCII formatted WWPN for Vhost Target port */ + char tport_name[TCM_VHOST_NAMELEN]; + /* Returned by tcm_vhost_make_tport() */ + struct se_wwn tport_wwn; +}; + +/* + * As per request from MST, keep TCM_VHOST related ioctl defines out of + * linux/vhost.h (user-space) for now.. + */ + +#include <linux/vhost.h> + +/* + * Used by QEMU userspace to ensure a consistent vhost-scsi ABI. + * + * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate + + * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage + */ + +#define VHOST_SCSI_ABI_VERSION 0 + +struct vhost_scsi_target { + int abi_version; + unsigned char vhost_wwpn[TRANSPORT_IQN_LEN]; + unsigned short vhost_tpgt; +}; + +/* VHOST_SCSI specific defines */ +#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) +#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) +#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, struct vhost_scsi_target) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index bc2f6ffff3cf..7bb755677a22 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -664,10 +664,6 @@ static noinline int btrfs_mksubvol(struct path *parent, struct dentry *dentry; int error; - error = mnt_want_write(parent->mnt); - if (error) - return error; - mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); dentry = lookup_one_len(name, parent->dentry, namelen); @@ -703,7 +699,6 @@ out_dput: dput(dentry); out_unlock: mutex_unlock(&dir->i_mutex); - mnt_drop_write(parent->mnt); return error; } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 93d8d6c9494d..aba15f1b7ad2 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -703,13 +703,16 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = iocb->ki_filp->f_mapping->host; + struct fuse_conn *fc = get_fuse_conn(inode); - if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { + /* + * In auto invalidate mode, always update attributes on read. + * Otherwise, only update if we attempt to read past EOF (to ensure + * i_size is up to date). + */ + if (fc->auto_inval_data || + (pos + iov_length(iov, nr_segs) > i_size_read(inode))) { int err; - /* - * If trying to read past EOF, make sure the i_size - * attribute is up-to-date. - */ err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); if (err) return err; @@ -1700,7 +1703,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) size_t n; u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; - for (n = 0; n < count; n++) { + for (n = 0; n < count; n++, iov++) { if (iov->iov_len > (size_t) max) return -ENOMEM; max -= iov->iov_len; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 771fb6322c07..e24dd74e3068 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -484,6 +484,9 @@ struct fuse_conn { /** Is fallocate not implemented by fs? */ unsigned no_fallocate:1; + /** Use enhanced/automatic page cache invalidation. */ + unsigned auto_inval_data:1; + /** The number of requests waiting for completion */ atomic_t num_waiting; diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 1cd61652018c..ce0a2838ccd0 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -197,6 +197,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); loff_t oldsize; + struct timespec old_mtime; spin_lock(&fc->lock); if (attr_version != 0 && fi->attr_version > attr_version) { @@ -204,15 +205,35 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, return; } + old_mtime = inode->i_mtime; fuse_change_attributes_common(inode, attr, attr_valid); oldsize = inode->i_size; i_size_write(inode, attr->size); spin_unlock(&fc->lock); - if (S_ISREG(inode->i_mode) && oldsize != attr->size) { - truncate_pagecache(inode, oldsize, attr->size); - invalidate_inode_pages2(inode->i_mapping); + if (S_ISREG(inode->i_mode)) { + bool inval = false; + + if (oldsize != attr->size) { + truncate_pagecache(inode, oldsize, attr->size); + inval = true; + } else if (fc->auto_inval_data) { + struct timespec new_mtime = { + .tv_sec = attr->mtime, + .tv_nsec = attr->mtimensec, + }; + + /* + * Auto inval mode also checks and invalidates if mtime + * has changed. + */ + if (!timespec_equal(&old_mtime, &new_mtime)) + inval = true; + } + + if (inval) + invalidate_inode_pages2(inode->i_mapping); } } @@ -834,6 +855,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->big_writes = 1; if (arg->flags & FUSE_DONT_MASK) fc->dont_mask = 1; + if (arg->flags & FUSE_AUTO_INVAL_DATA) + fc->auto_inval_data = 1; } else { ra_pages = fc->max_read / PAGE_CACHE_SIZE; fc->no_lock = 1; @@ -859,7 +882,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | - FUSE_FLOCK_LOCKS; + FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | + FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 7ff5c99b1638..c78bb997e2c6 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -213,9 +213,12 @@ {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 58056865b8e9..dc3a8cd7db8a 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h @@ -964,6 +964,8 @@ struct drm_radeon_cs { #define RADEON_INFO_IB_VM_MAX_SIZE 0x0f /* max pipes - needed for compute shaders */ #define RADEON_INFO_MAX_PIPES 0x10 +/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */ +#define RADEON_INFO_TIMESTAMP 0x11 struct drm_radeon_info { uint32_t request; diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h index 3c80885fa829..d323a4b4143c 100644 --- a/include/linux/bcma/bcma_driver_chipcommon.h +++ b/include/linux/bcma/bcma_driver_chipcommon.h @@ -89,6 +89,12 @@ #define BCMA_CC_CHIPST_4313_OTP_PRESENT 2 #define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2 #define BCMA_CC_CHIPST_4331_OTP_PRESENT 4 +#define BCMA_CC_CHIPST_43228_ILP_DIV_EN 0x00000001 +#define BCMA_CC_CHIPST_43228_OTP_PRESENT 0x00000002 +#define BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL 0x00000004 +#define BCMA_CC_CHIPST_43228_SDIO_MODE 0x00000008 +#define BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT 0x00000010 +#define BCMA_CC_CHIPST_43228_SDIO_RESET 0x00000020 #define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */ #define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */ #define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */ diff --git a/include/linux/can.h b/include/linux/can.h index 018055efc034..e52958d7c2d1 100644 --- a/include/linux/can.h +++ b/include/linux/can.h @@ -74,20 +74,21 @@ struct can_frame { /* * defined bits for canfd_frame.flags * - * As the default for CAN FD should be to support the high data rate in the - * payload section of the frame (HDR) and to support up to 64 byte in the - * data section (EDL) the bits are only set in the non-default case. - * Btw. as long as there's no real implementation for CAN FD network driver - * these bits are only preliminary. + * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to + * be set in the CAN frame bitstream on the wire. The EDL bit switch turns + * the CAN controllers bitstream processor into the CAN FD mode which creates + * two new options within the CAN FD frame specification: * - * RX: NOHDR/NOEDL - info about received CAN FD frame - * ESI - bit from originating CAN controller - * TX: NOHDR/NOEDL - control per-frame settings if supported by CAN controller - * ESI - bit is set by local CAN controller + * Bit Rate Switch - to indicate a second bitrate is/was used for the payload + * Error State Indicator - represents the error state of the transmitting node + * + * As the CANFD_ESI bit is internally generated by the transmitting CAN + * controller only the CANFD_BRS bit is relevant for real CAN controllers when + * building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make + * sense for virtual CAN interfaces to test applications with echoed frames. */ -#define CANFD_NOHDR 0x01 /* frame without high data rate */ -#define CANFD_NOEDL 0x02 /* frame without extended data length */ -#define CANFD_ESI 0x04 /* error state indicator */ +#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */ +#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */ /** * struct canfd_frame - CAN flexible data rate frame structure diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 9303348965fb..d8c713e148e3 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -57,6 +57,9 @@ * * 7.19 * - add FUSE_FALLOCATE + * + * 7.20 + * - add FUSE_AUTO_INVAL_DATA */ #ifndef _LINUX_FUSE_H @@ -88,7 +91,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 19 +#define FUSE_KERNEL_MINOR_VERSION 20 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -163,10 +166,19 @@ struct fuse_file_lock { /** * INIT request/reply flags * + * FUSE_ASYNC_READ: asynchronous read requests * FUSE_POSIX_LOCKS: remote locking for POSIX file locks + * FUSE_FILE_OPS: kernel sends file handle for fstat, etc... (not yet supported) + * FUSE_ATOMIC_O_TRUNC: handles the O_TRUNC open flag in the filesystem * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." + * FUSE_BIG_WRITES: filesystem can handle write size larger than 4kB * FUSE_DONT_MASK: don't apply umask to file mode on create operations + * FUSE_SPLICE_WRITE: kernel supports splice write on the device + * FUSE_SPLICE_MOVE: kernel supports splice move on the device + * FUSE_SPLICE_READ: kernel supports splice read on the device * FUSE_FLOCK_LOCKS: remote locking for BSD style file locks + * FUSE_HAS_IOCTL_DIR: kernel supports ioctl on directories + * FUSE_AUTO_INVAL_DATA: automatically invalidate cached pages */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -175,7 +187,12 @@ struct fuse_file_lock { #define FUSE_EXPORT_SUPPORT (1 << 4) #define FUSE_BIG_WRITES (1 << 5) #define FUSE_DONT_MASK (1 << 6) +#define FUSE_SPLICE_WRITE (1 << 7) +#define FUSE_SPLICE_MOVE (1 << 8) +#define FUSE_SPLICE_READ (1 << 9) #define FUSE_FLOCK_LOCKS (1 << 10) +#define FUSE_HAS_IOCTL_DIR (1 << 11) +#define FUSE_AUTO_INVAL_DATA (1 << 12) /** * CUSE INIT request/reply flags diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h index f875b316249d..16625d799b6f 100644 --- a/include/linux/input/eeti_ts.h +++ b/include/linux/input/eeti_ts.h @@ -2,6 +2,7 @@ #define LINUX_INPUT_EETI_TS_H struct eeti_ts_platform_data { + int irq_gpio; unsigned int irq_active_high; }; diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 54d6d690073c..7e83370e6fd2 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -20,6 +20,7 @@ #define __LINUX_IOMMU_H #include <linux/errno.h> +#include <linux/types.h> #define IOMMU_READ (1) #define IOMMU_WRITE (2) @@ -30,6 +31,7 @@ struct iommu_group; struct bus_type; struct device; struct iommu_domain; +struct notifier_block; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 379e433e15e0..879db26ec401 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -369,6 +369,7 @@ struct ipv6_pinfo { __u8 rcv_tclass; __u32 dst_cookie; + __u32 rx_dst_cookie; struct ipv6_mc_socklist __rcu *ipv6_mc_list; struct ipv6_ac_socklist *ipv6_ac_list; diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h index 40c372165f3e..32a1b5cfeba1 100644 --- a/include/linux/mfd/ezx-pcap.h +++ b/include/linux/mfd/ezx-pcap.h @@ -16,6 +16,7 @@ struct pcap_subdev { struct pcap_platform_data { unsigned int irq_base; unsigned int config; + int gpio; void (*init) (void *); /* board specific init */ int num_subdevs; struct pcap_subdev *subdevs; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index eb06e58bed0b..a9db4f33407f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1300,6 +1300,8 @@ struct net_device { /* for setting kernel sock attribute on TCP connection setup */ #define GSO_MAX_SIZE 65536 unsigned int gso_max_size; +#define GSO_MAX_SEGS 65535 + u16 gso_max_segs; #ifdef CONFIG_DCB /* Data Center Bridging netlink ops */ diff --git a/include/linux/sched.h b/include/linux/sched.h index c147e7024f11..b8c86648a2f9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -334,14 +334,6 @@ static inline void lockup_detector_init(void) } #endif -#if defined(CONFIG_LOCKUP_DETECTOR) && defined(CONFIG_SUSPEND) -void lockup_detector_bootcpu_resume(void); -#else -static inline void lockup_detector_bootcpu_resume(void) -{ -} -#endif - #ifdef CONFIG_DETECT_HUNG_TASK extern unsigned int sysctl_hung_task_panic; extern unsigned long sysctl_hung_task_check_count; diff --git a/include/linux/security.h b/include/linux/security.h index 4e5a73cdbbef..3dea6a9d568f 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -1242,8 +1242,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Check that the @parent process has sufficient permission to trace the * current process before allowing the current process to present itself * to the @parent process for tracing. - * The parent process will still have to undergo the ptrace_access_check - * checks before it is allowed to trace this one. * @parent contains the task_struct structure for debugger process. * Return 0 if permission is granted. * @capget: diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 493fa0c79005..3d254e10ff30 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -96,6 +96,7 @@ enum ieee80211_band { * is not permitted. * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel * is not permitted. + * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel. */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = 1<<0, @@ -104,6 +105,7 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_RADAR = 1<<3, IEEE80211_CHAN_NO_HT40PLUS = 1<<4, IEEE80211_CHAN_NO_HT40MINUS = 1<<5, + IEEE80211_CHAN_NO_OFDM = 1<<6, }; #define IEEE80211_CHAN_NO_HT40 \ diff --git a/include/net/codel.h b/include/net/codel.h index 550debfc2403..389cf621161d 100644 --- a/include/net/codel.h +++ b/include/net/codel.h @@ -305,6 +305,8 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, } } } else if (drop) { + u32 delta; + if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; } else { @@ -320,9 +322,11 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, * assume that the drop rate that controlled the queue on the * last cycle is a good starting point to control it now. */ - if (codel_time_before(now - vars->drop_next, + delta = vars->count - vars->lastcount; + if (delta > 1 && + codel_time_before(now - vars->drop_next, 16 * params->interval)) { - vars->count = (vars->count - vars->lastcount) | 1; + vars->count = delta; /* we dont care if rec_inv_sqrt approximation * is not very precise : * Next Newton steps will correct it quadratically. diff --git a/include/net/dst.h b/include/net/dst.h index baf597890064..621e3513ef5e 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -110,7 +110,7 @@ struct dst_entry { }; extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); -extern const u32 dst_default_metrics[RTAX_MAX]; +extern const u32 dst_default_metrics[]; #define DST_METRICS_READ_ONLY 0x1UL #define __DST_METRICS_PTR(Y) \ diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index 5ee66f517b4f..ba1d3615acbb 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -39,6 +39,7 @@ struct inet_connection_sock_af_ops { int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl); void (*send_check)(struct sock *sk, struct sk_buff *skb); int (*rebuild_header)(struct sock *sk); + void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); int (*conn_request)(struct sock *sk, struct sk_buff *skb); struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, struct request_sock *req, diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 83b567fe1941..613cfa401672 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -249,13 +249,4 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk) return flags; } -static inline void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - - dst_hold(dst); - sk->sk_rx_dst = dst; - inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; -} - #endif /* _INET_SOCK_H */ diff --git a/include/net/ip.h b/include/net/ip.h index bd5e444a19ce..5a5d84d3d2c6 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -120,7 +120,7 @@ extern struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct sk_buff_head *queue, struct inet_cork *cork); -extern int ip_send_skb(struct sk_buff *skb); +extern int ip_send_skb(struct net *net, struct sk_buff *skb); extern int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); extern void ip_flush_pending_frames(struct sock *sk); extern struct sk_buff *ip_make_skb(struct sock *sk, diff --git a/include/net/sock.h b/include/net/sock.h index b3730239bf18..72132aef53fc 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -218,6 +218,7 @@ struct cg_proto; * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) * @sk_gso_max_size: Maximum GSO segment size to build + * @sk_gso_max_segs: Maximum number of GSO segments * @sk_lingertime: %SO_LINGER l_linger setting * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct @@ -338,6 +339,7 @@ struct sock { netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; + u16 sk_gso_max_segs; int sk_rcvlowat; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; diff --git a/include/net/tcp.h b/include/net/tcp.h index e19124b84cd2..1f000ffe7075 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -464,6 +464,7 @@ extern int tcp_disconnect(struct sock *sk, int flags); void tcp_connect_init(struct sock *sk); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); +void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); /* From syncookies.c */ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; diff --git a/include/net/xfrm.h b/include/net/xfrm.h index d9509eb29b80..62b619e82a90 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -213,6 +213,9 @@ struct xfrm_state { struct xfrm_lifetime_cur curlft; struct tasklet_hrtimer mtimer; + /* used to fix curlft->add_time when changing date */ + long saved_tmo; + /* Last used time */ unsigned long lastused; @@ -238,6 +241,7 @@ static inline struct net *xs_net(struct xfrm_state *x) /* xflags - make enum if more show up */ #define XFRM_TIME_DEFER 1 +#define XFRM_SOFT_EXPIRE 2 enum { XFRM_STATE_VOID, diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 1da39ea248fd..c8b7446b27df 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -178,9 +178,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) arch_suspend_enable_irqs(); BUG_ON(irqs_disabled()); - /* Kick the lockup detector */ - lockup_detector_bootcpu_resume(); - Enable_cpus: enable_nonboot_cpus(); diff --git a/kernel/printk.c b/kernel/printk.c index 6a76ab9d4476..66a2ea37b576 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -1034,6 +1034,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) struct log *msg = log_from_idx(idx); len += msg_print_text(msg, prev, true, NULL, 0); + prev = msg->flags; idx = log_next(idx); seq++; } @@ -1046,6 +1047,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) struct log *msg = log_from_idx(idx); len -= msg_print_text(msg, prev, true, NULL, 0); + prev = msg->flags; idx = log_next(idx); seq++; } diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 69add8a9da68..4b1dfba70f7c 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -575,7 +575,7 @@ out: /* * Create/destroy watchdog threads as CPUs come and go: */ -static int +static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; @@ -610,27 +610,10 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) return NOTIFY_OK; } -static struct notifier_block cpu_nfb = { +static struct notifier_block __cpuinitdata cpu_nfb = { .notifier_call = cpu_callback }; -#ifdef CONFIG_SUSPEND -/* - * On exit from suspend we force an offline->online transition on the boot CPU - * so that the PMU state that was lost while in suspended state gets set up - * properly for the boot CPU. This information is required for restarting the - * NMI watchdog. - */ -void lockup_detector_bootcpu_resume(void) -{ - void *cpu = (void *)(long)smp_processor_id(); - - cpu_callback(&cpu_nfb, CPU_DEAD_FROZEN, cpu); - cpu_callback(&cpu_nfb, CPU_UP_PREPARE_FROZEN, cpu); - cpu_callback(&cpu_nfb, CPU_ONLINE_FROZEN, cpu); -} -#endif - void __init lockup_detector_init(void) { void *cpu = (void *)(long)smp_processor_id(); diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index b421cc49d2cd..fc866f2e4528 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c @@ -200,11 +200,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv) if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) goto out; - if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect)) - goto out; - curr_gw = batadv_gw_get_selected_gw_node(bat_priv); + if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw) + goto out; + next_gw = batadv_gw_get_best_gw_node(bat_priv); if (curr_gw == next_gw) diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index a438f4b582fc..99dd8f75b3ff 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -197,6 +197,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, del: list_del(&entry->list); kfree(entry); + kfree(tt_change_node); event_removed = true; goto unlock; } diff --git a/net/core/dev.c b/net/core/dev.c index 0cb3fe8d8e72..a39354ee1432 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1055,6 +1055,8 @@ rollback: */ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) { + char *new_ifalias; + ASSERT_RTNL(); if (len >= IFALIASZ) @@ -1068,9 +1070,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) return 0; } - dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); - if (!dev->ifalias) + new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); + if (!new_ifalias) return -ENOMEM; + dev->ifalias = new_ifalias; strlcpy(dev->ifalias, alias, len+1); return len; @@ -2134,6 +2137,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) __be16 protocol = skb->protocol; netdev_features_t features = skb->dev->features; + if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) + features &= ~NETIF_F_GSO_MASK; + if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; @@ -5986,6 +5992,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev_net_set(dev, &init_net); dev->gso_max_size = GSO_MAX_SIZE; + dev->gso_max_segs = GSO_MAX_SEGS; INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->unreg_list); diff --git a/net/core/dst.c b/net/core/dst.c index 069d51d29414..56d63612e1e4 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -149,7 +149,15 @@ int dst_discard(struct sk_buff *skb) } EXPORT_SYMBOL(dst_discard); -const u32 dst_default_metrics[RTAX_MAX]; +const u32 dst_default_metrics[RTAX_MAX + 1] = { + /* This initializer is needed to force linker to place this variable + * into const section. Otherwise it might end into bss section. + * We really want to avoid false sharing on this variable, and catch + * any writes on it. + */ + [RTAX_MAX] = 0xdeadbeef, +}; + void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, int initial_obsolete, unsigned short flags) diff --git a/net/core/sock.c b/net/core/sock.c index 6b654b3ddfda..8f67ced8d6a8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1458,6 +1458,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk->sk_gso_max_size = dst->dev->gso_max_size; + sk->sk_gso_max_segs = dst->dev->gso_max_segs; } } } diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index f0cdb30921c0..57bd978483e1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -367,7 +367,7 @@ static void __leaf_free_rcu(struct rcu_head *head) static inline void free_leaf(struct leaf *l) { - call_rcu_bh(&l->rcu, __leaf_free_rcu); + call_rcu(&l->rcu, __leaf_free_rcu); } static inline void free_leaf_info(struct leaf_info *leaf) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ba39a52d18c1..147ccc3e93db 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -197,7 +197,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) neigh = __ipv4_neigh_lookup_noref(dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); - if (neigh) { + if (!IS_ERR(neigh)) { int res = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); @@ -1366,9 +1366,8 @@ out: return skb; } -int ip_send_skb(struct sk_buff *skb) +int ip_send_skb(struct net *net, struct sk_buff *skb) { - struct net *net = sock_net(skb->sk); int err; err = ip_local_out(skb); @@ -1391,7 +1390,7 @@ int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4) return 0; /* Netfilter gets whole the not fragmented skb. */ - return ip_send_skb(skb); + return ip_send_skb(sock_net(sk), skb); } /* @@ -1536,6 +1535,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; + skb_orphan(nskb); skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); ip_push_pending_frames(sk, &fl4); } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index c035251beb07..e4ba974f143c 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -70,7 +70,6 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/mm.h> -#include <linux/bootmem.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> @@ -80,7 +79,6 @@ #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/init.h> -#include <linux/workqueue.h> #include <linux/skbuff.h> #include <linux/inetdevice.h> #include <linux/igmp.h> @@ -88,11 +86,9 @@ #include <linux/mroute.h> #include <linux/netfilter_ipv4.h> #include <linux/random.h> -#include <linux/jhash.h> #include <linux/rcupdate.h> #include <linux/times.h> #include <linux/slab.h> -#include <linux/prefetch.h> #include <net/dst.h> #include <net/net_namespace.h> #include <net/protocol.h> diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e7e6eeae49c0..2109ff4a1daf 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -811,7 +811,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, old_size_goal + mss_now > xmit_size_goal)) { xmit_size_goal = old_size_goal; } else { - tp->xmit_size_goal_segs = xmit_size_goal / mss_now; + tp->xmit_size_goal_segs = + min_t(u16, xmit_size_goal / mss_now, + sk->sk_gso_max_segs); xmit_size_goal = tp->xmit_size_goal_segs * mss_now; } } diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 4d4db16e336e..1432cdb0644c 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -291,7 +291,8 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) left = tp->snd_cwnd - in_flight; if (sk_can_gso(sk) && left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && - left * tp->mss_cache < sk->sk_gso_max_size) + left * tp->mss_cache < sk->sk_gso_max_size && + left < sk->sk_gso_max_segs) return true; return left <= tcp_max_tso_deferred_mss(tp); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2fd2bc9e3c64..85308b90df80 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -5392,6 +5392,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, { struct tcp_sock *tp = tcp_sk(sk); + if (unlikely(sk->sk_rx_dst == NULL)) + inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. * The code loosely follows the one in the famous @@ -5605,7 +5607,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) tcp_set_state(sk, TCP_ESTABLISHED); if (skb != NULL) { - inet_sk_rx_dst_set(sk, skb); + icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); security_inet_conn_established(sk, skb); } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 42b2a6a73092..767823764016 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1627,9 +1627,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) sk->sk_rx_dst = NULL; } } - if (unlikely(sk->sk_rx_dst == NULL)) - inet_sk_rx_dst_set(sk, skb); - if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; @@ -1872,10 +1869,21 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = { .twsk_destructor= tcp_twsk_destructor, }; +void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + + dst_hold(dst); + sk->sk_rx_dst = dst; + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; +} +EXPORT_SYMBOL(inet_sk_rx_dst_set); + const struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, + .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v4_conn_request, .syn_recv_sock = tcp_v4_syn_recv_sock, .net_header_len = sizeof(struct iphdr), diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 2288a6399e1e..0abe67bb4d3a 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -731,6 +731,18 @@ static int __net_init tcp_net_metrics_init(struct net *net) static void __net_exit tcp_net_metrics_exit(struct net *net) { + unsigned int i; + + for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) { + struct tcp_metrics_block *tm, *next; + + tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1); + while (tm) { + next = rcu_dereference_protected(tm->tcpm_next, 1); + kfree(tm); + tm = next; + } + } kfree(net->ipv4.tcp_metrics_hash); } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 232a90c3ec86..d9c9dcef2de3 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -387,7 +387,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct tcp_sock *oldtp = tcp_sk(sk); struct tcp_cookie_values *oldcvp = oldtp->cookie_values; - inet_sk_rx_dst_set(newsk, skb); + newicsk->icsk_af_ops->sk_rx_dst_set(newsk, skb); /* TCP Cookie Transactions require space for the cookie pair, * as it differs for each connection. There is no need to diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 3f1bcff0b10b..20dfd892c86f 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -940,7 +940,7 @@ void __init tcp_tasklet_init(void) * We cant xmit new skbs from this context, as we might already * hold qdisc lock. */ -void tcp_wfree(struct sk_buff *skb) +static void tcp_wfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct tcp_sock *tp = tcp_sk(sk); @@ -1522,21 +1522,21 @@ static void tcp_cwnd_validate(struct sock *sk) * when we would be allowed to send the split-due-to-Nagle skb fully. */ static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, - unsigned int mss_now, unsigned int cwnd) + unsigned int mss_now, unsigned int max_segs) { const struct tcp_sock *tp = tcp_sk(sk); - u32 needed, window, cwnd_len; + u32 needed, window, max_len; window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; - cwnd_len = mss_now * cwnd; + max_len = mss_now * max_segs; - if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) - return cwnd_len; + if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) + return max_len; needed = min(skb->len, window); - if (cwnd_len <= needed) - return cwnd_len; + if (max_len <= needed) + return max_len; return needed - needed % mss_now; } @@ -1765,7 +1765,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) limit = min(send_win, cong_win); /* If a full-sized TSO skb can be sent, do it. */ - if (limit >= sk->sk_gso_max_size) + if (limit >= min_t(unsigned int, sk->sk_gso_max_size, + sk->sk_gso_max_segs * tp->mss_cache)) goto send_now; /* Middle in queue won't get any more data, full sendable already? */ @@ -1999,7 +2000,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, limit = mss_now; if (tso_segs > 1 && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, - cwnd_quota); + min_t(unsigned int, + cwnd_quota, + sk->sk_gso_max_segs)); if (skb->len > limit && unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index b4c3582a991f..6f6d1aca3c3d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -758,7 +758,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) uh->check = CSUM_MANGLED_0; send: - err = ip_send_skb(skb); + err = ip_send_skb(sock_net(sk), skb); if (err) { if (err == -ENOBUFS && !inet->recverr) { UDP_INC_STATS_USER(sock_net(sk), diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index c66b90f71c9b..bb9ce2b2f377 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1447,7 +1447,17 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ + struct dst_entry *dst = sk->sk_rx_dst; + sock_rps_save_rxhash(sk, skb); + if (dst) { + if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || + dst->ops->check(dst, np->rx_dst_cookie) == NULL) { + dst_release(dst); + sk->sk_rx_dst = NULL; + } + } + if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) goto reset; if (opt_skb) @@ -1705,9 +1715,9 @@ static void tcp_v6_early_demux(struct sk_buff *skb) struct dst_entry *dst = sk->sk_rx_dst; struct inet_sock *icsk = inet_sk(sk); if (dst) - dst = dst_check(dst, 0); + dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); if (dst && - icsk->rx_dst_ifindex == inet6_iif(skb)) + icsk->rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } @@ -1719,10 +1729,23 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_destructor= tcp_twsk_destructor, }; +static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + const struct rt6_info *rt = (const struct rt6_info *)dst; + + dst_hold(dst); + sk->sk_rx_dst = dst; + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; + if (rt->rt6i_node) + inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; +} + static const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, + .sk_rx_dst_set = inet6_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct ipv6hdr), @@ -1754,6 +1777,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, + .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct iphdr), diff --git a/net/llc/llc_station.c b/net/llc/llc_station.c index 39a8d8924b9c..6828e39ec2ec 100644 --- a/net/llc/llc_station.c +++ b/net/llc/llc_station.c @@ -268,7 +268,7 @@ static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb) out: return rc; free: - kfree_skb(skb); + kfree_skb(nskb); goto out; } @@ -293,7 +293,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb) out: return rc; free: - kfree_skb(skb); + kfree_skb(nskb); goto out; } @@ -322,7 +322,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb) out: return rc; free: - kfree_skb(skb); + kfree_skb(nskb); goto out; } diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 6fac18c0423f..85572353a7e3 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -622,6 +622,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) del_timer_sync(&sdata->u.mesh.housekeeping_timer); del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); + del_timer_sync(&sdata->u.mesh.mesh_path_timer); /* * If the timer fired while we waited for it, it will have * requeued the work. Now the work will be running again @@ -634,6 +635,8 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) local->fif_other_bss--; atomic_dec(&local->iff_allmultis); ieee80211_configure_filter(local); + + sdata->u.mesh.timers_running = 0; } static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index cef0c9e79aba..a4a5acdbaa4d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1430,6 +1430,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, del_timer_sync(&sdata->u.mgd.bcn_mon_timer); del_timer_sync(&sdata->u.mgd.timer); del_timer_sync(&sdata->u.mgd.chswitch_timer); + + sdata->u.mgd.timers_running = 0; } void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index bcaee5d12839..839dd9737989 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -299,7 +299,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, if (local->scan_req != local->int_scan_req) cfg80211_scan_done(local->scan_req, aborted); local->scan_req = NULL; - local->scan_sdata = NULL; + rcu_assign_pointer(local->scan_sdata, NULL); local->scanning = 0; local->scan_channel = NULL; @@ -984,7 +984,6 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata) kfree(local->sched_scan_ies.ie[i]); drv_sched_scan_stop(local, sdata); - rcu_assign_pointer(local->sched_scan_sdata, NULL); } out: mutex_unlock(&local->mtx); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ceaca7c134a0..8ac890a1a4c0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1079,7 +1079,7 @@ static void *packet_current_rx_frame(struct packet_sock *po, default: WARN(1, "TPACKET version not supported\n"); BUG(); - return 0; + return NULL; } } @@ -1936,7 +1936,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb) if (likely(po->tx_ring.pg_vec)) { ph = skb_shinfo(skb)->destructor_arg; - BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING); BUG_ON(atomic_read(&po->tx_ring.pending) == 0); atomic_dec(&po->tx_ring.pending); __packet_set_status(po, ph, TP_STATUS_AVAILABLE); diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c index f10fb8256442..05d60859d8e3 100644 --- a/net/sched/act_gact.c +++ b/net/sched/act_gact.c @@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, struct tcf_common *pc; int ret = 0; int err; +#ifdef CONFIG_GACT_PROB + struct tc_gact_p *p_parm = NULL; +#endif if (nla == NULL) return -EINVAL; @@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, #ifndef CONFIG_GACT_PROB if (tb[TCA_GACT_PROB] != NULL) return -EOPNOTSUPP; +#else + if (tb[TCA_GACT_PROB]) { + p_parm = nla_data(tb[TCA_GACT_PROB]); + if (p_parm->ptype >= MAX_RAND) + return -EINVAL; + } #endif pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info); @@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, spin_lock_bh(&gact->tcf_lock); gact->tcf_action = parm->action; #ifdef CONFIG_GACT_PROB - if (tb[TCA_GACT_PROB] != NULL) { - struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]); + if (p_parm) { gact->tcfg_paction = p_parm->paction; gact->tcfg_pval = p_parm->pval; gact->tcfg_ptype = p_parm->ptype; @@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a, spin_lock(&gact->tcf_lock); #ifdef CONFIG_GACT_PROB - if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL) + if (gact->tcfg_ptype) action = gact_rand[gact->tcfg_ptype](gact); else action = gact->tcf_action; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 60e281ad0f07..58fb3c7aab9e 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -185,7 +185,12 @@ err3: err2: kfree(tname); err1: - kfree(pc); + if (ret == ACT_P_CREATED) { + if (est) + gen_kill_estimator(&pc->tcfc_bstats, + &pc->tcfc_rate_est); + kfree_rcu(pc, tcfc_rcu); + } return err; } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 26aa2f6ce257..45c53ab067a6 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -74,7 +74,10 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, p = to_pedit(pc); keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) { - kfree(pc); + if (est) + gen_kill_estimator(&pc->tcfc_bstats, + &pc->tcfc_rate_est); + kfree_rcu(pc, tcfc_rcu); return -ENOMEM; } ret = ACT_P_CREATED; diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c index 3922f2a2821b..3714f60f0b3c 100644 --- a/net/sched/act_simple.c +++ b/net/sched/act_simple.c @@ -131,7 +131,10 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, d = to_defact(pc); ret = alloc_defdata(d, defdata); if (ret < 0) { - kfree(pc); + if (est) + gen_kill_estimator(&pc->tcfc_bstats, + &pc->tcfc_rate_est); + kfree_rcu(pc, tcfc_rcu); return ret; } d->tcf_action = parm->action; diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 9af01f3df18c..e4723d31fdd5 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -203,6 +203,34 @@ out: return index; } +/* Length of the next packet (0 if the queue is empty). */ +static unsigned int qdisc_peek_len(struct Qdisc *sch) +{ + struct sk_buff *skb; + + skb = sch->ops->peek(sch); + return skb ? qdisc_pkt_len(skb) : 0; +} + +static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *); +static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, + unsigned int len); + +static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl, + u32 lmax, u32 inv_w, int delta_w) +{ + int i; + + /* update qfq-specific data */ + cl->lmax = lmax; + cl->inv_w = inv_w; + i = qfq_calc_index(cl->inv_w, cl->lmax); + + cl->grp = &q->groups[i]; + + q->wsum += delta_w; +} + static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { @@ -250,6 +278,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, lmax = 1UL << QFQ_MTU_SHIFT; if (cl != NULL) { + bool need_reactivation = false; + if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), @@ -258,12 +288,29 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return err; } - if (inv_w != cl->inv_w) { - sch_tree_lock(sch); - q->wsum += delta_w; - cl->inv_w = inv_w; - sch_tree_unlock(sch); + if (lmax == cl->lmax && inv_w == cl->inv_w) + return 0; /* nothing to update */ + + i = qfq_calc_index(inv_w, lmax); + sch_tree_lock(sch); + if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { + /* + * shift cl->F back, to not charge the + * class for the not-yet-served head + * packet + */ + cl->F = cl->S; + /* remove class from its slot in the old group */ + qfq_deactivate_class(q, cl); + need_reactivation = true; } + + qfq_update_class_params(q, cl, lmax, inv_w, delta_w); + + if (need_reactivation) /* activate in new group */ + qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); + sch_tree_unlock(sch); + return 0; } @@ -273,11 +320,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, cl->refcnt = 1; cl->common.classid = classid; - cl->lmax = lmax; - cl->inv_w = inv_w; - i = qfq_calc_index(cl->inv_w, cl->lmax); - cl->grp = &q->groups[i]; + qfq_update_class_params(q, cl, lmax, inv_w, delta_w); cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); @@ -294,7 +338,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return err; } } - q->wsum += weight; sch_tree_lock(sch); qdisc_class_hash_insert(&q->clhash, &cl->common); @@ -711,15 +754,6 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V) } } -/* What is length of next packet in queue (0 if queue is empty) */ -static unsigned int qdisc_peek_len(struct Qdisc *sch) -{ - struct sk_buff *skb; - - skb = sch->ops->peek(sch); - return skb ? qdisc_pkt_len(skb) : 0; -} - /* * Updates the class, returns true if also the group needs to be updated. */ @@ -843,11 +877,8 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl) static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct qfq_sched *q = qdisc_priv(sch); - struct qfq_group *grp; struct qfq_class *cl; int err; - u64 roundedS; - int s; cl = qfq_classify(skb, sch, &err); if (cl == NULL) { @@ -876,11 +907,25 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) return err; /* If reach this point, queue q was idle */ - grp = cl->grp; + qfq_activate_class(q, cl, qdisc_pkt_len(skb)); + + return err; +} + +/* + * Handle class switch from idle to backlogged. + */ +static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, + unsigned int pkt_len) +{ + struct qfq_group *grp = cl->grp; + u64 roundedS; + int s; + qfq_update_start(q, cl); /* compute new finish time and rounded start. */ - cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w; + cl->F = cl->S + (u64)pkt_len * cl->inv_w; roundedS = qfq_round_down(cl->S, grp->slot_shift); /* @@ -917,8 +962,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) skip_update: qfq_slot_insert(grp, cl, roundedS); - - return err; } diff --git a/net/wireless/core.c b/net/wireless/core.c index 31b40cc4a9c3..dcd64d5b07aa 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -952,6 +952,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, */ synchronize_rcu(); INIT_LIST_HEAD(&wdev->list); + /* + * Ensure that all events have been processed and + * freed. + */ + cfg80211_process_wdev_events(wdev); break; case NETDEV_PRE_UP: if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) diff --git a/net/wireless/core.h b/net/wireless/core.h index 5206c6844fd7..bc7430b54771 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, u32 *flags, struct vif_params *params); void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); +void cfg80211_process_wdev_events(struct wireless_dev *wdev); int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 2303ee73b50a..2ded3c7fad06 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -680,6 +680,8 @@ static u32 map_regdom_flags(u32 rd_flags) channel_flags |= IEEE80211_CHAN_NO_IBSS; if (rd_flags & NL80211_RRF_DFS) channel_flags |= IEEE80211_CHAN_RADAR; + if (rd_flags & NL80211_RRF_NO_OFDM) + channel_flags |= IEEE80211_CHAN_NO_OFDM; return channel_flags; } @@ -901,7 +903,21 @@ static void handle_channel(struct wiphy *wiphy, chan->max_antenna_gain = min(chan->orig_mag, (int) MBI_TO_DBI(power_rule->max_antenna_gain)); chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); - chan->max_power = min(chan->max_power, chan->max_reg_power); + if (chan->orig_mpwr) { + /* + * Devices that have their own custom regulatory domain + * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the + * passed country IE power settings. + */ + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && + wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY && + wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) + chan->max_power = chan->max_reg_power; + else + chan->max_power = min(chan->orig_mpwr, + chan->max_reg_power); + } else + chan->max_power = chan->max_reg_power; } static void handle_band(struct wiphy *wiphy, @@ -1885,6 +1901,7 @@ static void restore_custom_reg_settings(struct wiphy *wiphy) chan->flags = chan->orig_flags; chan->max_antenna_gain = chan->orig_mag; chan->max_power = chan->orig_mpwr; + chan->beacon_found = false; } } } diff --git a/net/wireless/util.c b/net/wireless/util.c index 26f8cd30f712..994e2f0cc7a8 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -735,7 +735,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev) wdev->connect_keys = NULL; } -static void cfg80211_process_wdev_events(struct wireless_dev *wdev) +void cfg80211_process_wdev_events(struct wireless_dev *wdev) { struct cfg80211_event *ev; unsigned long flags; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 5b228f97d4b3..87cd0e4d4282 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -415,8 +415,17 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) if (x->lft.hard_add_expires_seconds) { long tmo = x->lft.hard_add_expires_seconds + x->curlft.add_time - now; - if (tmo <= 0) - goto expired; + if (tmo <= 0) { + if (x->xflags & XFRM_SOFT_EXPIRE) { + /* enter hard expire without soft expire first?! + * setting a new date could trigger this. + * workarbound: fix x->curflt.add_time by below: + */ + x->curlft.add_time = now - x->saved_tmo - 1; + tmo = x->lft.hard_add_expires_seconds - x->saved_tmo; + } else + goto expired; + } if (tmo < next) next = tmo; } @@ -433,10 +442,14 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) if (x->lft.soft_add_expires_seconds) { long tmo = x->lft.soft_add_expires_seconds + x->curlft.add_time - now; - if (tmo <= 0) + if (tmo <= 0) { warn = 1; - else if (tmo < next) + x->xflags &= ~XFRM_SOFT_EXPIRE; + } else if (tmo < next) { next = tmo; + x->xflags |= XFRM_SOFT_EXPIRE; + x->saved_tmo = tmo; + } } if (x->lft.soft_use_expires_seconds) { long tmo = x->lft.soft_use_expires_seconds + diff --git a/scripts/decodecode b/scripts/decodecode index 18ba881c3415..4f8248d5a11f 100755 --- a/scripts/decodecode +++ b/scripts/decodecode @@ -89,7 +89,7 @@ echo $code >> $T.s disas $T cat $T.dis >> $T.aa -faultline=`cat $T.dis | head -1 | cut -d":" -f2` +faultline=`cat $T.dis | head -1 | cut -d":" -f2-` faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` cat $T.oo | sed -e "s/\($faultline\)/\*\1 <-- trapping instruction/g" diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 83554ee8a587..d51b7c76c37d 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -290,10 +290,51 @@ static int yama_ptrace_access_check(struct task_struct *child, return rc; } +/** + * yama_ptrace_traceme - validate PTRACE_TRACEME calls + * @parent: task that will become the ptracer of the current task + * + * Returns 0 if following the ptrace is allowed, -ve on error. + */ +static int yama_ptrace_traceme(struct task_struct *parent) +{ + int rc; + + /* If standard caps disallows it, so does Yama. We should + * only tighten restrictions further. + */ + rc = cap_ptrace_traceme(parent); + if (rc) + return rc; + + /* Only disallow PTRACE_TRACEME on more aggressive settings. */ + switch (ptrace_scope) { + case YAMA_SCOPE_CAPABILITY: + if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE)) + rc = -EPERM; + break; + case YAMA_SCOPE_NO_ATTACH: + rc = -EPERM; + break; + } + + if (rc) { + char name[sizeof(current->comm)]; + printk_ratelimited(KERN_NOTICE + "ptraceme of pid %d was attempted by: %s (pid %d)\n", + current->pid, + get_task_comm(name, parent), + parent->pid); + } + + return rc; +} + static struct security_operations yama_ops = { .name = "yama", .ptrace_access_check = yama_ptrace_access_check, + .ptrace_traceme = yama_ptrace_traceme, .task_prctl = yama_task_prctl, .task_free = yama_task_free, }; diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c index 4e7ec2b49873..d0f00356fc11 100644 --- a/sound/core/sgbuf.c +++ b/sound/core/sgbuf.c @@ -101,7 +101,7 @@ void *snd_malloc_sgbuf_pages(struct device *device, if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device, chunk, &tmpb) < 0) { if (!sgbuf->pages) - return NULL; + goto _failed; if (!res_size) goto _failed; size = sgbuf->pages * PAGE_SIZE; diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c index 4f502a2bdc3c..0a436626182b 100644 --- a/sound/pci/emu10k1/memory.c +++ b/sound/pci/emu10k1/memory.c @@ -326,7 +326,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst for (page = blk->first_page; page <= blk->last_page; page++, idx++) { unsigned long ofs = idx << PAGE_SHIFT; dma_addr_t addr; - addr = snd_pcm_sgbuf_get_addr(substream, ofs); + if (ofs >= runtime->dma_bytes) + addr = emu->silent_page.addr; + else + addr = snd_pcm_sgbuf_get_addr(substream, ofs); if (! is_valid_page(emu, addr)) { printk(KERN_ERR "emu: failure page = %d\n", idx); mutex_unlock(&hdr->block_mutex); diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 647218d69f68..4f7d2dfcef7b 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -332,13 +332,12 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec, if (cfg->dig_outs) snd_printd(" dig-out=0x%x/0x%x\n", cfg->dig_out_pins[0], cfg->dig_out_pins[1]); - snd_printd(" inputs:"); + snd_printd(" inputs:\n"); for (i = 0; i < cfg->num_inputs; i++) { - snd_printd(" %s=0x%x", + snd_printd(" %s=0x%x\n", hda_get_autocfg_input_label(codec, cfg, i), cfg->inputs[i].pin); } - snd_printd("\n"); if (cfg->dig_in_pin) snd_printd(" dig-in=0x%x\n", cfg->dig_in_pin); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 14361184ae1e..5e22a8f43d2e 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -2967,12 +2967,10 @@ static const char * const cxt5066_models[CXT5066_MODELS] = { }; static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { - SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO), SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), - SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD), @@ -2988,14 +2986,10 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), - SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO), - SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO), SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), - SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO), - SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO), {} }; diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 69b928449789..8f23374fa642 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -877,8 +877,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, struct hdmi_eld *eld; struct hdmi_spec_per_cvt *per_cvt = NULL; - hinfo->nid = 0; /* clear the leftover value */ - /* Validate hinfo */ pin_idx = hinfo_to_pin_index(spec, hinfo); if (snd_BUG_ON(pin_idx < 0)) @@ -1163,6 +1161,14 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format); } +static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) +{ + snd_hda_codec_cleanup_stream(codec, hinfo->nid); + return 0; +} + static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) @@ -1202,6 +1208,7 @@ static const struct hda_pcm_ops generic_ops = { .open = hdmi_pcm_open, .close = hdmi_pcm_close, .prepare = generic_hdmi_playback_pcm_prepare, + .cleanup = generic_hdmi_playback_pcm_cleanup, }; static int generic_hdmi_build_pcms(struct hda_codec *codec) @@ -1220,7 +1227,6 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec) pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK]; pstr->substreams = 1; pstr->ops = generic_ops; - pstr->nid = 1; /* FIXME: just for avoiding a debug WARNING */ /* other pstr fields are set in open */ } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b9a5c4503336..4f81dd44c837 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6099,6 +6099,8 @@ static const struct alc_fixup alc269_fixups[] = { [ALC269_FIXUP_PCM_44K] = { .type = ALC_FIXUP_FUNC, .v.func = alc269_fixup_pcm_44k, + .chained = true, + .chain_id = ALC269_FIXUP_QUANTA_MUTE }, [ALC269_FIXUP_STEREO_DMIC] = { .type = ALC_FIXUP_FUNC, @@ -6207,9 +6209,10 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE), - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K), + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), #if 0 |