diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2015-07-27 11:15:16 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2015-07-27 11:15:16 -0700 |
commit | 722b262c966a388a1f8bfcdd100cd19c4ce59ab9 (patch) | |
tree | 34e755b07dbca16f65ad7eb5bb1fade06032cebf | |
parent | 4e02bea82b4e6c1eebeac6e6f4ecb93e9c2aa8c0 (diff) | |
parent | cbfe8fa6cd672011c755c3cd85c9ffd4e2d10a6f (diff) | |
download | linux-722b262c966a388a1f8bfcdd100cd19c4ce59ab9.tar.bz2 |
Merge 4.2-rc4 into usb-next
We want the USB fixes that went into that release in this branch as
well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
274 files changed, 2107 insertions, 1580 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt index c03eec116872..3443e0f838df 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt @@ -35,3 +35,6 @@ the PCIe specification. NOTE: this only applies to the SMMU itself, not masters connected upstream of the SMMU. + +- hisilicon,broken-prefetch-cmd + : Avoid sending CMD_PREFETCH_* commands to the SMMU. diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt index 5d0376b8f202..211e7785f4d2 100644 --- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt +++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt @@ -17,7 +17,6 @@ Required properties: "fsl,imx6sx-usdhc" Optional properties: -- fsl,cd-controller : Indicate to use controller internal card detection - fsl,wp-controller : Indicate to use controller internal write protection - fsl,delay-line : Specify the number of delay cells for override mode. This is used to set the clock delay for DLL(Delay Line) on override mode @@ -35,7 +34,6 @@ esdhc@70004000 { compatible = "fsl,imx51-esdhc"; reg = <0x70004000 0x4000>; interrupts = <1>; - fsl,cd-controller; fsl,wp-controller; }; diff --git a/MAINTAINERS b/MAINTAINERS index a2264167791a..9289ecb57b68 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5899,7 +5899,6 @@ S: Supported F: Documentation/s390/kvm.txt F: arch/s390/include/asm/kvm* F: arch/s390/kvm/ -F: drivers/s390/kvm/ KERNEL VIRTUAL MACHINE (KVM) FOR ARM M: Christoffer Dall <christoffer.dall@linaro.org> @@ -6839,6 +6838,12 @@ T: git git://linuxtv.org/anttip/media_tree.git S: Maintained F: drivers/media/usb/msi2500/ +MSYSTEMS DISKONCHIP G3 MTD DRIVER +M: Robert Jarzmik <robert.jarzmik@free.fr> +L: linux-mtd@lists.infradead.org +S: Maintained +F: drivers/mtd/devices/docg3* + MT9M032 APTINA SENSOR DRIVER M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> L: linux-media@vger.kernel.org @@ -10896,6 +10901,15 @@ F: drivers/block/virtio_blk.c F: include/linux/virtio_*.h F: include/uapi/linux/virtio_*.h +VIRTIO DRIVERS FOR S390 +M: Christian Borntraeger <borntraeger@de.ibm.com> +M: Cornelia Huck <cornelia.huck@de.ibm.com> +L: linux-s390@vger.kernel.org +L: virtualization@lists.linux-foundation.org +L: kvm@vger.kernel.org +S: Supported +F: drivers/s390/virtio/ + VIRTIO GPU DRIVER M: David Airlie <airlied@linux.ie> M: Gerd Hoffmann <kraxel@redhat.com> @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 2 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts index dd45e6971bc3..9351296356dc 100644 --- a/arch/arm/boot/dts/imx25-pdk.dts +++ b/arch/arm/boot/dts/imx25-pdk.dts @@ -10,6 +10,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> #include "imx25.dtsi" @@ -114,8 +115,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio2 1 0>; - wp-gpios = <&gpio2 0 0>; + cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts index 93d3ea12328c..0f3fe29b816e 100644 --- a/arch/arm/boot/dts/imx51-apf51dev.dts +++ b/arch/arm/boot/dts/imx51-apf51dev.dts @@ -98,7 +98,7 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>; bus-width = <4>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts index e9337ad52f59..3bc18835fb4b 100644 --- a/arch/arm/boot/dts/imx53-ard.dts +++ b/arch/arm/boot/dts/imx53-ard.dts @@ -103,8 +103,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio1 1 0>; - wp-gpios = <&gpio1 9 0>; + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts index d0e0f57eb432..53f40885c530 100644 --- a/arch/arm/boot/dts/imx53-m53evk.dts +++ b/arch/arm/boot/dts/imx53-m53evk.dts @@ -124,8 +124,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio1 1 0>; - wp-gpios = <&gpio1 9 0>; + cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi index ab4ba39f2ed9..b0d5542ac829 100644 --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi @@ -147,8 +147,8 @@ &esdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc3>; - cd-gpios = <&gpio3 11 0>; - wp-gpios = <&gpio3 12 0>; + cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>; bus-width = <8>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts index 1d325576bcc0..fc89ce1e5763 100644 --- a/arch/arm/boot/dts/imx53-smd.dts +++ b/arch/arm/boot/dts/imx53-smd.dts @@ -41,8 +41,8 @@ &esdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; - cd-gpios = <&gpio3 13 0>; - wp-gpios = <&gpio4 11 0>; + cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi index 4f1f0e2868bf..e03373a58760 100644 --- a/arch/arm/boot/dts/imx53-tqma53.dtsi +++ b/arch/arm/boot/dts/imx53-tqma53.dtsi @@ -41,8 +41,8 @@ pinctrl-0 = <&pinctrl_esdhc2>, <&pinctrl_esdhc2_cdwp>; vmmc-supply = <®_3p3v>; - wp-gpios = <&gpio1 2 0>; - cd-gpios = <&gpio1 4 0>; + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi index 704bd72cbfec..d3e50b22064f 100644 --- a/arch/arm/boot/dts/imx53-tx53.dtsi +++ b/arch/arm/boot/dts/imx53-tx53.dtsi @@ -183,7 +183,7 @@ }; &esdhc1 { - cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>; fsl,wp-controller; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc1>; @@ -191,7 +191,7 @@ }; &esdhc2 { - cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; fsl,wp-controller; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc2>; diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts index c17d3ad6dba5..fc51b87ad208 100644 --- a/arch/arm/boot/dts/imx53-voipac-bsb.dts +++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts @@ -119,8 +119,8 @@ &esdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_esdhc2>; - cd-gpios = <&gpio3 25 0>; - wp-gpios = <&gpio2 19 0>; + cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts index 43cb3fd76be7..5111f5170d53 100644 --- a/arch/arm/boot/dts/imx6dl-riotboard.dts +++ b/arch/arm/boot/dts/imx6dl-riotboard.dts @@ -305,8 +305,8 @@ &usdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio1 4 0>; - wp-gpios = <&gpio1 2 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -314,8 +314,8 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 0>; - wp-gpios = <&gpio7 1 0>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts index 78df05e9d1ce..d6515f7a56c4 100644 --- a/arch/arm/boot/dts/imx6q-arm2.dts +++ b/arch/arm/boot/dts/imx6q-arm2.dts @@ -11,6 +11,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include "imx6q.dtsi" / { @@ -196,8 +197,8 @@ }; &usdhc3 { - cd-gpios = <&gpio6 11 0>; - wp-gpios = <&gpio6 14 0>; + cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3 diff --git a/arch/arm/boot/dts/imx6q-gk802.dts b/arch/arm/boot/dts/imx6q-gk802.dts index 703539cf36d3..00bd63e63d0c 100644 --- a/arch/arm/boot/dts/imx6q-gk802.dts +++ b/arch/arm/boot/dts/imx6q-gk802.dts @@ -7,6 +7,7 @@ */ /dts-v1/; +#include <dt-bindings/gpio/gpio.h> #include "imx6q.dtsi" / { @@ -161,7 +162,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <4>; - cd-gpios = <&gpio6 11 0>; + cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts index a43abfa21e33..5645d52850a7 100644 --- a/arch/arm/boot/dts/imx6q-tbs2910.dts +++ b/arch/arm/boot/dts/imx6q-tbs2910.dts @@ -251,7 +251,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; - cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -260,7 +260,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <4>; - cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi index e6d9195a1da7..f4d6ae564ead 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi @@ -173,7 +173,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; status = "okay"; }; @@ -181,7 +181,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi index 1d85de2befb3..a47a0399a172 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi @@ -392,7 +392,7 @@ &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; - cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; no-1-8-v; status = "okay"; }; @@ -400,7 +400,7 @@ &usdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; no-1-8-v; status = "okay"; diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi index 59e5d15e3ec4..ff41f83551de 100644 --- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi @@ -258,6 +258,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio1 4 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi index 2c253d6d20bd..45e7c39e80d5 100644 --- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi +++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi @@ -1,3 +1,5 @@ +#include <dt-bindings/gpio/gpio.h> + / { regulators { compatible = "simple-bus"; @@ -181,7 +183,7 @@ &usdhc2 { /* module slot */ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio2 2 0>; + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi index b5756c21ea1d..4493f6e99330 100644 --- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi @@ -318,7 +318,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi index 86f03c1b147c..a857d1294609 100644 --- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi @@ -324,7 +324,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi index 4a8d97f47759..1afe3385e2d2 100644 --- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi @@ -417,7 +417,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi index 62a82f3eba88..6dd0b764e036 100644 --- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi +++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi @@ -299,6 +299,6 @@ &pinctrl_hummingboard_usdhc2 >; vmmc-supply = <®_3p3v>; - cd-gpios = <&gpio1 4 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi index 3af16dfe417b..d7fe6672d00c 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi @@ -453,7 +453,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 0>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -461,7 +461,7 @@ &usdhc4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; - cd-gpios = <&gpio2 6 0>; + cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 1ce6133b67f5..9e6ecd99b472 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi @@ -409,8 +409,8 @@ &usdhc2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; - cd-gpios = <&gpio1 4 0>; - wp-gpios = <&gpio1 2 0>; + cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; status = "disabled"; }; @@ -418,7 +418,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3 &pinctrl_usdhc3_cdwp>; - cd-gpios = <&gpio1 27 0>; - wp-gpios = <&gpio1 29 0>; + cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi index 488a640796ac..3373fd958e95 100644 --- a/arch/arm/boot/dts/imx6qdl-rex.dtsi +++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi @@ -342,7 +342,7 @@ pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; - wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -351,6 +351,6 @@ pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <4>; cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; - wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi index 3b24b12651b2..e329ca5c3322 100644 --- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi @@ -467,8 +467,8 @@ pinctrl-0 = <&pinctrl_usdhc3>; pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; - cd-gpios = <&gpio6 15 0>; - wp-gpios = <&gpio1 13 0>; + cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi index e00c44f6a0df..782379320517 100644 --- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi @@ -448,8 +448,8 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio7 0 0>; - wp-gpios = <&gpio7 1 0>; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; vmmc-supply = <®_3p3v>; status = "okay"; }; @@ -457,7 +457,7 @@ &usdhc4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; - cd-gpios = <&gpio2 6 0>; + cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; vmmc-supply = <®_3p3v>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index a626e6dd8022..944eb81cb2b8 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi @@ -562,8 +562,8 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <8>; - cd-gpios = <&gpio2 2 0>; - wp-gpios = <&gpio2 3 0>; + cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -571,8 +571,8 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; bus-width = <8>; - cd-gpios = <&gpio2 0 0>; - wp-gpios = <&gpio2 1 0>; + cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi index f02b80b41d4f..da08de324e9e 100644 --- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi +++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi @@ -680,7 +680,7 @@ pinctrl-0 = <&pinctrl_usdhc1>; bus-width = <4>; no-1-8-v; - cd-gpios = <&gpio7 2 0>; + cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>; fsl,wp-controller; status = "okay"; }; @@ -690,7 +690,7 @@ pinctrl-0 = <&pinctrl_usdhc2>; bus-width = <4>; no-1-8-v; - cd-gpios = <&gpio7 3 0>; + cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>; fsl,wp-controller; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi index 5fb091675582..9e096d811bed 100644 --- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi +++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi @@ -9,6 +9,8 @@ * */ +#include <dt-bindings/gpio/gpio.h> + / { regulators { compatible = "simple-bus"; @@ -250,13 +252,13 @@ &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; - cd-gpios = <&gpio1 2 0>; + cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>; status = "okay"; }; &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - cd-gpios = <&gpio3 9 0>; + cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index 945887d3fdb3..b84dff2e94ea 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts @@ -617,8 +617,8 @@ pinctrl-1 = <&pinctrl_usdhc1_100mhz>; pinctrl-2 = <&pinctrl_usdhc1_200mhz>; bus-width = <8>; - cd-gpios = <&gpio4 7 0>; - wp-gpios = <&gpio4 6 0>; + cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -627,8 +627,8 @@ pinctrl-0 = <&pinctrl_usdhc2>; pinctrl-1 = <&pinctrl_usdhc2_100mhz>; pinctrl-2 = <&pinctrl_usdhc2_200mhz>; - cd-gpios = <&gpio5 0 0>; - wp-gpios = <&gpio4 29 0>; + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>; status = "okay"; }; @@ -637,6 +637,6 @@ pinctrl-0 = <&pinctrl_usdhc3>; pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; - cd-gpios = <&gpio3 22 0>; + cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts index e3c0b63c2205..115f3fd78971 100644 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts @@ -49,7 +49,7 @@ pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; bus-width = <8>; - cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; keep-power-in-suspend; enable-sdio-wakeup; @@ -61,7 +61,7 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; bus-width = <8>; - cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>; no-1-8-v; keep-power-in-suspend; enable-sdio-wakup; diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi index cef04cef3a80..ac88c3467078 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi @@ -293,7 +293,7 @@ pinctrl-1 = <&pinctrl_usdhc3_100mhz>; pinctrl-2 = <&pinctrl_usdhc3_200mhz>; bus-width = <8>; - cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; keep-power-in-suspend; enable-sdio-wakeup; @@ -304,7 +304,7 @@ &usdhc4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc4>; - cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>; + cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>; wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index 4d1a4b977d84..fdd1d7c9a5cc 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts @@ -234,8 +234,8 @@ &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; - cd-gpios = <&gpio5 0 0>; - wp-gpios = <&gpio5 1 0>; + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; + wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>; enable-sdio-wakeup; keep-power-in-suspend; status = "okay"; diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 4550d247e308..c011e2296cb1 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -74,32 +74,52 @@ struct jit_ctx { int bpf_jit_enable __read_mostly; -static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) +static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, + unsigned int size) +{ + void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); + + if (!ptr) + return -EFAULT; + memcpy(ret, ptr, size); + return 0; +} + +static u64 jit_get_skb_b(struct sk_buff *skb, int offset) { u8 ret; int err; - err = skb_copy_bits(skb, offset, &ret, 1); + if (offset < 0) + err = call_neg_helper(skb, offset, &ret, 1); + else + err = skb_copy_bits(skb, offset, &ret, 1); return (u64)err << 32 | ret; } -static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) +static u64 jit_get_skb_h(struct sk_buff *skb, int offset) { u16 ret; int err; - err = skb_copy_bits(skb, offset, &ret, 2); + if (offset < 0) + err = call_neg_helper(skb, offset, &ret, 2); + else + err = skb_copy_bits(skb, offset, &ret, 2); return (u64)err << 32 | ntohs(ret); } -static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) +static u64 jit_get_skb_w(struct sk_buff *skb, int offset) { u32 ret; int err; - err = skb_copy_bits(skb, offset, &ret, 4); + if (offset < 0) + err = call_neg_helper(skb, offset, &ret, 4); + else + err = skb_copy_bits(skb, offset, &ret, 4); return (u64)err << 32 | ntohl(ret); } @@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx) case BPF_LD | BPF_B | BPF_ABS: load_order = 0; load: - /* the interpreter will deal with the negative K */ - if ((int)k < 0) - return -ENOTSUPP; emit_mov_i(r_off, k, ctx); load_common: ctx->seen |= SEEN_DATA | SEEN_CALL; @@ -547,12 +564,24 @@ load_common: emit(ARM_SUB_I(r_scratch, r_skb_hl, 1 << load_order), ctx); emit(ARM_CMP_R(r_scratch, r_off), ctx); - condt = ARM_COND_HS; + condt = ARM_COND_GE; } else { emit(ARM_CMP_R(r_skb_hl, r_off), ctx); condt = ARM_COND_HI; } + /* + * test for negative offset, only if we are + * currently scheduled to take the fast + * path. this will update the flags so that + * the slowpath instruction are ignored if the + * offset is negative. + * + * for loard_order == 0 the HI condition will + * make loads at offset 0 take the slow path too. + */ + _emit(condt, ARM_CMP_I(r_off, 0), ctx); + _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), ctx); @@ -860,9 +889,11 @@ b_epilogue: off = offsetof(struct sk_buff, vlan_tci); emit(ARM_LDRH_I(r_A, r_skb, off), ctx); if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) - OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); - else - OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); + OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx); + else { + OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx); + OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx); + } break; case BPF_ANC | SKF_AD_QUEUE: ctx->seen |= SEEN_SKB; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f860bfda454a..e16351819fed 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -585,7 +585,8 @@ ENDPROC(el0_irq) * */ ENTRY(cpu_switch_to) - add x8, x0, #THREAD_CPU_CONTEXT + mov x10, #THREAD_CPU_CONTEXT + add x8, x0, x10 mov x9, sp stp x19, x20, [x8], #16 // store callee-saved registers stp x21, x22, [x8], #16 @@ -594,7 +595,7 @@ ENTRY(cpu_switch_to) stp x27, x28, [x8], #16 stp x29, x9, [x8], #16 str lr, [x8] - add x8, x1, #THREAD_CPU_CONTEXT + add x8, x1, x10 ldp x19, x20, [x8], #16 // restore callee-saved registers ldp x21, x22, [x8], #16 ldp x23, x24, [x8], #16 diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 240b75c0e94f..463fa2e7e34c 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -61,7 +61,7 @@ void __init init_IRQ(void) static bool migrate_one_irq(struct irq_desc *desc) { struct irq_data *d = irq_desc_get_irq_data(desc); - const struct cpumask *affinity = d->affinity; + const struct cpumask *affinity = irq_data_get_affinity_mask(d); struct irq_chip *c; bool ret = false; @@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc) if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) - cpumask_copy(d->affinity, affinity); + cpumask_copy(irq_data_get_affinity_mask(d), affinity); return ret; } diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index d0f771be9e96..a124c55733db 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -18,6 +18,7 @@ #include <mach/pm.h> +static bool disable_cpu_idle_poll; static cycle_t read_cycle_count(struct clocksource *cs) { @@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta, return 0; } -static void comparator_mode(enum clock_event_mode mode, - struct clock_event_device *evdev) +static int comparator_shutdown(struct clock_event_device *evdev) { - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - pr_debug("%s: start\n", evdev->name); - /* FALLTHROUGH */ - case CLOCK_EVT_MODE_RESUME: + pr_debug("%s: %s\n", __func__, evdev->name); + sysreg_write(COMPARE, 0); + + if (disable_cpu_idle_poll) { + disable_cpu_idle_poll = false; /* - * If we're using the COUNT and COMPARE registers we - * need to force idle poll. + * Only disable idle poll if we have forced that + * in a previous call. */ - cpu_idle_poll_ctrl(true); - break; - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - sysreg_write(COMPARE, 0); - pr_debug("%s: stop\n", evdev->name); - if (evdev->mode == CLOCK_EVT_MODE_ONESHOT || - evdev->mode == CLOCK_EVT_MODE_RESUME) { - /* - * Only disable idle poll if we have forced that - * in a previous call. - */ - cpu_idle_poll_ctrl(false); - } - break; - default: - BUG(); + cpu_idle_poll_ctrl(false); } + return 0; +} + +static int comparator_set_oneshot(struct clock_event_device *evdev) +{ + pr_debug("%s: %s\n", __func__, evdev->name); + + disable_cpu_idle_poll = true; + /* + * If we're using the COUNT and COMPARE registers we + * need to force idle poll. + */ + cpu_idle_poll_ctrl(true); + + return 0; } static struct clock_event_device comparator = { - .name = "avr32_comparator", - .features = CLOCK_EVT_FEAT_ONESHOT, - .shift = 16, - .rating = 50, - .set_next_event = comparator_next_event, - .set_mode = comparator_mode, + .name = "avr32_comparator", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 16, + .rating = 50, + .set_next_event = comparator_next_event, + .set_state_shutdown = comparator_shutdown, + .set_state_oneshot = comparator_set_oneshot, + .tick_resume = comparator_set_oneshot, }; void read_persistent_clock(struct timespec *ts) diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h index 0c3f25ee3381..f8de767ce2bc 100644 --- a/arch/m32r/include/asm/io.h +++ b/arch/m32r/include/asm/io.h @@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr) #define iowrite16 writew #define iowrite32 writel +#define ioread16be(addr) be16_to_cpu(readw(addr)) +#define ioread32be(addr) be32_to_cpu(readl(addr)) +#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr)) +#define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr)) + #define mmiowb() #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index c7d1b9d09011..a2da259d9327 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -23,15 +23,15 @@ int main(void) { - DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); - DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); - DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); - BLANK(); + DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack)); + DEFINE(__TASK_thread, offsetof(struct task_struct, thread)); DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); BLANK(); - DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); - DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); - DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); + DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); + DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); + DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); + DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); + DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); BLANK(); DEFINE(__TI_task, offsetof(struct thread_info, task)); DEFINE(__TI_flags, offsetof(struct thread_info, flags)); @@ -176,7 +176,6 @@ int main(void) DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); - DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb)); DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 3238893c9d4f..84062e7a77da 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP) */ ENTRY(__switch_to) stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task - stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev - lg %r4,__THREAD_info(%r2) # get thread_info of prev - lg %r5,__THREAD_info(%r3) # get thread_info of next + lgr %r1,%r2 + aghi %r1,__TASK_thread # thread_struct of prev task + lg %r4,__TASK_thread_info(%r2) # get thread_info of prev + lg %r5,__TASK_thread_info(%r3) # get thread_info of next + stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev + lgr %r1,%r3 + aghi %r1,__TASK_thread # thread_struct of next task lgr %r15,%r5 aghi %r15,STACK_INIT # end of kernel stack of next stg %r3,__LC_CURRENT # store task struct of next stg %r5,__LC_THREAD_INFO # store thread info of next stg %r15,__LC_KERNEL_STACK # store end of kernel stack + lg %r15,__THREAD_ksp(%r1) # load kernel stack of next lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next - lg %r15,__THREAD_ksp(%r3) # load kernel stack of next lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task br %r14 @@ -417,6 +421,7 @@ ENTRY(pgm_check_handler) LAST_BREAK %r14 lg %r15,__LC_KERNEL_STACK lg %r14,__TI_task(%r12) + aghi %r14,__TASK_thread # pointer to thread_struct lghi %r13,__LC_PGM_TDB tm __LC_PGM_ILC+2,0x02 # check for transaction abort jz 2f diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 4d96c9f53455..7bea81d8a363 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs) } /* get vector interrupt code from fpc */ - asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); + asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc)); vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; switch (vic) { case 1: /* invalid vector operation */ @@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs) location = get_trap_ip(regs); - asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); + asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc)); /* Check for vector register enablement */ if (MACHINE_HAS_VX && !current->thread.vxrs && (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 99c9ff87e018..6b755d125783 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void) void __init free_initrd_mem(unsigned long begin, unsigned long end) { - free_bootmem(__pa(begin), end - begin); + free_bootmem_late(__pa(begin), end - begin); } static int __init setup_initrd(char *str) diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index bb187a6a877c..5a1844765a7a 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S @@ -205,7 +205,6 @@ sysexit_from_sys_call: movl RDX(%rsp), %edx /* arg3 */ movl RSI(%rsp), %ecx /* arg4 */ movl RDI(%rsp), %r8d /* arg5 */ - movl %ebp, %r9d /* arg6 */ .endm .macro auditsys_exit exit @@ -236,6 +235,7 @@ sysexit_from_sys_call: sysenter_auditsys: auditsys_entry_common + movl %ebp, %r9d /* reload 6th syscall arg */ jmp sysenter_dispatch sysexit_audit: @@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat) * 32-bit zero extended: */ ASM_STAC -1: movl (%r8), %ebp +1: movl (%r8), %r9d _ASM_EXTABLE(1b, ia32_badarg) ASM_CLAC orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) @@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat) cstar_do_call: /* 32-bit syscall -> 64-bit C ABI argument conversion */ movl %edi, %r8d /* arg5 */ - movl %ebp, %r9d /* arg6 */ + /* r9 already loaded */ /* arg6 */ xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ movl %ebx, %edi /* arg1 */ movl %edx, %edx /* arg3 (zero extension) */ @@ -358,7 +358,6 @@ cstar_dispatch: call *ia32_sys_call_table(, %rax, 8) movq %rax, RAX(%rsp) 1: - movl RCX(%rsp), %ebp DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) @@ -392,7 +391,9 @@ sysretl_from_sys_call: #ifdef CONFIG_AUDITSYSCALL cstar_auditsys: + movl %r9d, R9(%rsp) /* register to be clobbered by call */ auditsys_entry_common + movl R9(%rsp), %r9d /* reload 6th syscall arg */ jmp cstar_dispatch sysretl_audit: @@ -404,14 +405,16 @@ cstar_tracesys: testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) jz cstar_auditsys #endif + xchgl %r9d, %ebp SAVE_EXTRA_REGS xorl %eax, %eax /* Do not leak kernel information */ movq %rax, R11(%rsp) movq %rax, R10(%rsp) - movq %rax, R9(%rsp) + movq %r9, R9(%rsp) movq %rax, R8(%rsp) movq %rsp, %rdi /* &pt_regs -> arg1 */ call syscall_trace_enter + movl R9(%rsp), %r9d /* Reload arg registers from stack. (see sysenter_tracesys) */ movl RCX(%rsp), %ecx @@ -421,6 +424,7 @@ cstar_tracesys: movl %eax, %eax /* zero extension */ RESTORE_EXTRA_REGS + xchgl %ebp, %r9d jmp cstar_do_call END(entry_SYSCALL_compat) diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index a4ae82eb82aa..cd54147cb365 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h @@ -354,7 +354,7 @@ struct kvm_xcrs { struct kvm_sync_regs { }; -#define KVM_QUIRK_LINT0_REENABLED (1 << 0) -#define KVM_QUIRK_CD_NW_CLEARED (1 << 1) +#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) +#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) #endif /* _ASM_X86_KVM_H */ diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c index 188076161c1b..63eb68b73589 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c @@ -952,6 +952,14 @@ static u64 intel_cqm_event_count(struct perf_event *event) return 0; /* + * Getting up-to-date values requires an SMP IPI which is not + * possible if we're being called in interrupt context. Return + * the cached values instead. + */ + if (unlikely(in_interrupt())) + goto out; + + /* * Notice that we don't perform the reading of an RMID * atomically, because we can't hold a spin lock across the * IPIs. diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 0b39173dd971..1e173f6285c7 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s) setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); + setup_clear_cpu_cap(X86_FEATURE_XSAVEC); setup_clear_cpu_cap(X86_FEATURE_XSAVES); setup_clear_cpu_cap(X86_FEATURE_AVX); setup_clear_cpu_cap(X86_FEATURE_AVX2); + setup_clear_cpu_cap(X86_FEATURE_AVX512F); + setup_clear_cpu_cap(X86_FEATURE_AVX512PF); + setup_clear_cpu_cap(X86_FEATURE_AVX512ER); + setup_clear_cpu_cap(X86_FEATURE_AVX512CD); + setup_clear_cpu_cap(X86_FEATURE_MPX); return 1; } diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 954e98a8c2e3..2a5ca97c263b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) for (i = 0; i < APIC_LVT_NUM; i++) apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); apic_update_lvtt(apic); - if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) apic_set_reg(apic, APIC_LVT0, SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index de1d2d8062e2..dc0a84a6f309 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c @@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; } +static u8 mtrr_disabled_type(void) +{ + /* + * Intel SDM 11.11.2.2: all MTRRs are disabled when + * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC + * memory type is applied to all of physical memory. + */ + return MTRR_TYPE_UNCACHABLE; +} + /* * Three terms are used in the following code: * - segment, it indicates the address segments covered by fixed MTRRs. @@ -434,6 +444,8 @@ struct mtrr_iter { /* output fields. */ int mem_type; + /* mtrr is completely disabled? */ + bool mtrr_disabled; /* [start, end) is not fully covered in MTRRs? */ bool partial_map; @@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter) static void mtrr_lookup_start(struct mtrr_iter *iter) { if (!mtrr_is_enabled(iter->mtrr_state)) { - iter->partial_map = true; + iter->mtrr_disabled = true; return; } @@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter, iter->mtrr_state = mtrr_state; iter->start = start; iter->end = end; + iter->mtrr_disabled = false; iter->partial_map = false; iter->fixed = false; iter->range = NULL; @@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) return MTRR_TYPE_WRBACK; } - /* It is not covered by MTRRs. */ - if (iter.partial_map) { - /* - * We just check one page, partially covered by MTRRs is - * impossible. - */ - WARN_ON(type != -1); - type = mtrr_default_type(mtrr_state); - } + if (iter.mtrr_disabled) + return mtrr_disabled_type(); + + /* + * We just check one page, partially covered by MTRRs is + * impossible. + */ + WARN_ON(iter.partial_map); + + /* not contained in any MTRRs. */ + if (type == -1) + return mtrr_default_type(mtrr_state); + return type; } EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); @@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, return false; } + if (iter.mtrr_disabled) + return true; + if (!iter.partial_map) return true; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index bbc678a66b18..8e0c0844c6b9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) * does not do it - this results in some delay at * reboot */ - if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED)) + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) cr0 &= ~(X86_CR0_CD | X86_CR0_NW); svm->vmcb->save.cr0 = cr0; mark_dirty(svm->vmcb, VMCB_CR); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5b4e9384717a..83b7b5cd75d5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) if (kvm_read_cr0(vcpu) & X86_CR0_CD) { ipat = VMX_EPT_IPAT_BIT; - cache = MTRR_TYPE_UNCACHABLE; + if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) + cache = MTRR_TYPE_WRBACK; + else + cache = MTRR_TYPE_UNCACHABLE; goto exit; } diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index edc8cdcd786b..0ca2f3e4803c 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu, return kvm_register_write(vcpu, reg, val); } +static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) +{ + return !(kvm->arch.disabled_quirks & quirk); +} + void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); void kvm_set_pending_timer(struct kvm_vcpu *vcpu); diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index cc5ccc415cc0..b9c78f3bcd67 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, !PageReserved(pfn_to_page(start_pfn + i))) return 1; - WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); - return 0; } @@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, pgprot_t prot; int retval; void __iomem *ret_addr; - int ram_region; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, /* * Don't allow anybody to remap normal RAM that we're using.. */ - /* First check if whole region can be identified as RAM or not */ - ram_region = region_is_ram(phys_addr, size); - if (ram_region > 0) { - WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", - (unsigned long int)phys_addr, - (unsigned long int)last_addr); + pfn = phys_addr >> PAGE_SHIFT; + last_pfn = last_addr >> PAGE_SHIFT; + if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, + __ioremap_check_ram) == 1) { + WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", + &phys_addr, &last_addr); return NULL; } - /* If could not be identified(-1), check page by page */ - if (ram_region < 0) { - pfn = phys_addr >> PAGE_SHIFT; - last_pfn = last_addr >> PAGE_SHIFT; - if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, - __ioremap_check_ram) == 1) - return NULL; - } /* * Mappings have to be page-aligned */ diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 9d518d693b4b..844b06d67df4 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_MPX) + return "[mpx]"; + return NULL; +} diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 7a657f58bbea..db1b0bc5017c 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -20,20 +20,6 @@ #define CREATE_TRACE_POINTS #include <asm/trace/mpx.h> -static const char *mpx_mapping_name(struct vm_area_struct *vma) -{ - return "[mpx]"; -} - -static struct vm_operations_struct mpx_vma_ops = { - .name = mpx_mapping_name, -}; - -static int is_mpx_vma(struct vm_area_struct *vma) -{ - return (vma->vm_ops == &mpx_vma_ops); -} - static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) { if (is_64bit_mm(mm)) @@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm) /* * This is really a simplified "vm_mmap". it only handles MPX * bounds tables (the bounds directory is user-allocated). - * - * Later on, we use the vma->vm_ops to uniquely identify these - * VMAs. */ static unsigned long mpx_mmap(unsigned long len) { @@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len) ret = -ENOMEM; goto out; } - vma->vm_ops = &mpx_vma_ops; if (vm_flags & VM_LOCKED) { up_write(&mm->mmap_sem); @@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm, * so stop immediately and return an error. This * probably results in a SIGSEGV. */ - if (!is_mpx_vma(vma)) + if (!(vma->vm_flags & VM_MPX)) return -EINVAL; len = min(vma->vm_end, end) - addr; @@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm, * lots of tables even though we have no actual table * entries in use. */ - while (next && is_mpx_vma(next)) + while (next && (next->vm_flags & VM_MPX)) next = next->vm_next; - while (prev && is_mpx_vma(prev)) + while (prev && (prev->vm_flags & VM_MPX)) prev = prev->vm_prev; /* * We know 'start' and 'end' lie within an area controlled diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3250f2371aea..90b924acd982 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -117,7 +117,7 @@ static void flush_tlb_func(void *info) } else { unsigned long addr; unsigned long nr_pages = - f->flush_end - f->flush_start / PAGE_SIZE; + (f->flush_end - f->flush_start) / PAGE_SIZE; addr = f->flush_start; while (addr < f->flush_end) { __flush_tlb_single(addr); diff --git a/block/bio.c b/block/bio.c index 2a00d349cd68..d6e5ba3399f0 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio); * Allocates and returns a new bio which represents @sectors from the start of * @bio, and updates @bio to represent the remaining sectors. * - * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's - * responsibility to ensure that @bio is not freed before the split. + * Unless this is a discard request the newly allocated bio will point + * to @bio's bi_io_vec; it is the caller's responsibility to ensure that + * @bio is not freed before the split. */ struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs) @@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors, BUG_ON(sectors <= 0); BUG_ON(sectors >= bio_sectors(bio)); - split = bio_clone_fast(bio, gfp, bs); + /* + * Discards need a mutable bio_vec to accommodate the payload + * required by the DSM TRIM and UNMAP commands. + */ + if (bio->bi_rw & REQ_DISCARD) + split = bio_clone_bioset(bio, gfp, bs); + else + split = bio_clone_fast(bio, gfp, bs); + if (!split) return NULL; @@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) bio->bi_css = blkcg_css; return 0; } +EXPORT_SYMBOL_GPL(bio_associate_blkcg); /** * bio_associate_current - associate a bio with %current @@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio) bio->bi_css = task_get_css(current, blkio_cgrp_id); return 0; } +EXPORT_SYMBOL_GPL(bio_associate_current); /** * bio_disassociate_task - undo bio_associate_current() diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 9da02c021ebe..d6283b3f5db5 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -718,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, return -EINVAL; disk = get_gendisk(MKDEV(major, minor), &part); - if (!disk || part) + if (!disk) return -EINVAL; + if (part) { + put_disk(disk); + return -EINVAL; + } rcu_read_lock(); spin_lock_irq(disk->queue->queue_lock); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index e83fc3d0da9c..db5d9f79a247 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); + if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) + dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, + dev->max_sectors); + if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) dev->max_sectors = ATA_MAX_SECTORS_LBA48; @@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, + /* + * Causes silent data corruption with higher max sects. + * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com + */ + { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, + /* Devices we expect to fail diagnostics */ /* Devices where NCQ should be avoided */ @@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | ATA_HORKAGE_FIRMWARE_WARN }, - /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ + /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ @@ -4229,7 +4240,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, + /* devices that don't properly handle TRIM commands */ + { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, + /* * As defined, the DRAT (Deterministic Read After Trim) and RZAT * (Return Zero After Trim) flags in the ATA Command Set are @@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) else /* In the ancient relic department - skip all of this */ return 0; - err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); + /* On some disks, this command causes spin-up, so we need longer timeout */ + err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); DPRINTK("EXIT, err_mask=%x\n", err_mask); return err_mask; diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 7ccc084bf1df..85aa76116a30 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap) ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA; } + } else if (vendor == 0x11ab && devid == 0x4140) { + /* Marvell 4140 quirks */ + ata_for_each_link(link, ap, EDGE) { + /* port 4 is for SEMB device and it doesn't like SRST */ + if (link->pmp == 4) + link->flags |= ATA_LFLAG_DISABLED; + } } } diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 3131adcc1f87..641a61a59e89 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) rbuf[14] = (lowest_aligned >> 8) & 0x3f; rbuf[15] = lowest_aligned; - if (ata_id_has_trim(args->id)) { + if (ata_id_has_trim(args->id) && + !(dev->horkage & ATA_HORKAGE_NOTRIM)) { rbuf[14] |= 0x80; /* LBPME */ if (ata_id_has_zero_after_trim(args->id) && diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index d6c37bcd416d..e2d94972962d 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev, if (!ata_id_has_trim(ata_dev->id)) mode = "unsupported"; + else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM) + mode = "forced_unsupported"; else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) mode = "forced_unqueued"; else if (ata_fpdma_dsm_supported(ata_dev)) diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 69de41a87b74..3177b245d2bd 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) while ((entry = llist_del_all(&cq->list)) != NULL) { entry = llist_reverse_order(entry); do { + struct request_queue *q = NULL; + cmd = container_of(entry, struct nullb_cmd, ll_list); entry = entry->next; + if (cmd->rq) + q = cmd->rq->q; end_cmd(cmd); - if (cmd->rq) { - struct request_queue *q = cmd->rq->q; - - if (!q->mq_ops && blk_queue_stopped(q)) { - spin_lock(q->queue_lock); - if (blk_queue_stopped(q)) - blk_start_queue(q); - spin_unlock(q->queue_lock); - } + if (q && !q->mq_ops && blk_queue_stopped(q)) { + spin_lock(q->queue_lock); + if (blk_queue_stopped(q)) + blk_start_queue(q); + spin_unlock(q->queue_lock); } } while (entry); } diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 1e1a4323a71f..9ceb8ac68fdc 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev) /* Read Verbose Config Version Info */ skb = btbcm_read_verbose_config(hdev); - if (IS_ERR(skb)) - return PTR_ERR(skb); - - BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], - get_unaligned_le16(skb->data + 5)); - kfree_skb(skb); + if (!IS_ERR(skb)) { + BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], + get_unaligned_le16(skb->data + 5)); + kfree_skb(skb); + } set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 4fd9961d552e..d42537425438 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p, return ret; } -static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) +static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem, + int len) { struct cper_mem_err_compact cmem; + /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */ + if (len == sizeof(struct cper_sec_mem_err_old) && + (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) { + pr_err(FW_WARN "valid bits set for fields beyond structure\n"); + return; + } if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); if (mem->validation_bits & CPER_MEM_VALID_PA) @@ -405,8 +412,10 @@ static void cper_estatus_print_section( } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); printk("%s""section_type: memory error\n", newpfx); - if (gdata->error_data_length >= sizeof(*mem_err)) - cper_print_mem(newpfx, mem_err); + if (gdata->error_data_length >= + sizeof(struct cper_sec_mem_err_old)) + cper_print_mem(newpfx, mem_err, + gdata->error_data_length); else goto err_section_too_small; } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 01657830b470..e9fde72cf038 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1614,6 +1614,9 @@ struct amdgpu_uvd { #define AMDGPU_MAX_VCE_HANDLES 16 #define AMDGPU_VCE_FIRMWARE_OFFSET 256 +#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) +#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) + struct amdgpu_vce { struct amdgpu_bo *vcpu_bo; uint64_t gpu_addr; @@ -1626,6 +1629,7 @@ struct amdgpu_vce { const struct firmware *fw; /* VCE firmware */ struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; + unsigned harvest_config; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 5533434c7a8f..31ad444c6386 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -459,6 +459,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); dev_info.vram_type = adev->mc.vram_type; dev_info.vram_bit_width = adev->mc.vram_width; + dev_info.vce_harvest_config = adev->vce.harvest_config; return copy_to_user(out, &dev_info, min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 1a2d419cbf16..ace870afc7d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev) amdgpu_free_extended_power_table(adev); } +#define ixSMUSVI_NB_CURRENTVID 0xD8230044 +#define CURRENT_NB_VID_MASK 0xff000000 +#define CURRENT_NB_VID__SHIFT 24 +#define ixSMUSVI_GFX_CURRENTVID 0xD8230048 +#define CURRENT_GFX_VID_MASK 0xff000000 +#define CURRENT_GFX_VID__SHIFT 24 + static void cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, struct seq_file *m) { + struct cz_power_info *pi = cz_get_pi(adev); struct amdgpu_clock_voltage_dependency_table *table = &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - u32 current_index = - (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; - u32 sclk, tmp; - u16 vddc; - - if (current_index >= NUM_SCLK_LEVELS) { - seq_printf(m, "invalid dpm profile %d\n", current_index); + struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct amdgpu_vce_clock_voltage_dependency_table *vce_table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX), + TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); + u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); + u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), + TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); + u32 sclk, vclk, dclk, ecclk, tmp; + u16 vddnb, vddgfx; + + if (sclk_index >= NUM_SCLK_LEVELS) { + seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index); } else { - sclk = table->entries[current_index].clk; - tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; - vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); - seq_printf(m, "power level %d sclk: %u vddc: %u\n", - current_index, sclk, vddc); + sclk = table->entries[sclk_index].clk; + seq_printf(m, "%u sclk: %u\n", sclk_index, sclk); + } + + tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) & + CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; + vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); + tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) & + CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; + vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); + seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx); + + seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); + if (!pi->uvd_power_gated) { + if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + seq_printf(m, "invalid uvd dpm level %d\n", uvd_index); + } else { + vclk = uvd_table->entries[uvd_index].vclk; + dclk = uvd_table->entries[uvd_index].dclk; + seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk); + } + } + + seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); + if (!pi->vce_power_gated) { + if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { + seq_printf(m, "invalid vce dpm level %d\n", vce_index); + } else { + ecclk = vce_table->entries[vce_index].ecclk; + seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk); + } } } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 6e77964f1b64..e70a26f587a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; switch (mode) { case DRM_MODE_DPMS_ON: @@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v10_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v10_0_vga_enable(crtc, false); + /* Make sure VBLANK interrupt is still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v10_0_crtc_load_lut(crtc); break; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 7f7abb0e0be5..dcb402ee048a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) struct drm_device *dev = crtc->dev; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + unsigned type; switch (mode) { case DRM_MODE_DPMS_ON: @@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) dce_v11_0_vga_enable(crtc, true); amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); dce_v11_0_vga_enable(crtc, false); + /* Make sure VBLANK interrupt is still enabled */ + type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); + amdgpu_irq_update(adev, &adev->crtc_irq, type); drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); dce_v11_0_crtc_load_lut(crtc); break; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index d62c4002e39c..d1064ca3670e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -35,6 +35,8 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" #include "gca/gfx_8_0_d.h" +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 @@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (idx = 0; idx < 2; ++idx) { + + if (adev->vce.harvest_config & (1 << idx)) + continue; + if(idx == 0) WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); @@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev) return 0; } +#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 +#define VCE_HARVEST_FUSE_MACRO__SHIFT 27 +#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 + +static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) +{ + u32 tmp; + unsigned ret; + + if (adev->flags & AMDGPU_IS_APU) + tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & + VCE_HARVEST_FUSE_MACRO__MASK) >> + VCE_HARVEST_FUSE_MACRO__SHIFT; + else + tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & + CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> + CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; + + switch (tmp) { + case 1: + ret = AMDGPU_VCE_HARVEST_VCE0; + break; + case 2: + ret = AMDGPU_VCE_HARVEST_VCE1; + break; + case 3: + ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; + break; + default: + ret = 0; + } + + return ret; +} + static int vce_v3_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); + + if ((adev->vce.harvest_config & + (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == + (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) + return -ENOENT; + vce_v3_0_set_ring_funcs(adev); vce_v3_0_set_irq_funcs(adev); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index f69b92535505..5ae5c6923128 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev) planes->overlays[i]->base.possible_crtcs = 1 << crtc->id; drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); + drm_crtc_vblank_reset(&crtc->base); dc->crtc = &crtc->base; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 60b0c13d7ff5..6fad1f9648f3 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) pm_runtime_enable(dev->dev); - ret = atmel_hlcdc_dc_modeset_init(dev); + ret = drm_vblank_init(dev, 1); if (ret < 0) { - dev_err(dev->dev, "failed to initialize mode setting\n"); + dev_err(dev->dev, "failed to initialize vblank\n"); goto err_periph_clk_disable; } - drm_mode_config_reset(dev); - - ret = drm_vblank_init(dev, 1); + ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { - dev_err(dev->dev, "failed to initialize vblank\n"); + dev_err(dev->dev, "failed to initialize mode setting\n"); goto err_periph_clk_disable; } + drm_mode_config_reset(dev); + pm_runtime_get_sync(dev->dev); ret = drm_irq_install(dev, dc->hlcdc->irq); pm_runtime_put_sync(dev->dev); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 357bd04a173b..fed748311b92 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -5398,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev) if (encoder->funcs->reset) encoder->funcs->reset(encoder); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - connector->status = connector_status_unknown; - + list_for_each_entry(connector, &dev->mode_config.connector_list, head) if (connector->funcs->reset) connector->funcs->reset(connector); - } } EXPORT_SYMBOL(drm_mode_config_reset); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index a6d8a3ee7750..260389acfb77 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_reg_read *reg = data; struct register_whitelist const *entry = whitelist; + unsigned size; + u64 offset; int i, ret = 0; for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { - if (entry->offset == reg->offset && + if (entry->offset == (reg->offset & -entry->size) && (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) break; } @@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev, if (i == ARRAY_SIZE(whitelist)) return -EINVAL; + /* We use the low bits to encode extra flags as the register should + * be naturally aligned (and those that are not so aligned merely + * limit the available flags for that register). + */ + offset = entry->offset; + size = entry->size; + size |= reg->offset ^ offset; + intel_runtime_pm_get(dev_priv); - switch (entry->size) { + switch (size) { + case 8 | 1: + reg->val = I915_READ64_2x32(offset, offset+4); + break; case 8: - reg->val = I915_READ64(reg->offset); + reg->val = I915_READ64(offset); break; case 4: - reg->val = I915_READ(reg->offset); + reg->val = I915_READ(offset); break; case 2: - reg->val = I915_READ16(reg->offset); + reg->val = I915_READ16(offset); break; case 1: - reg->val = I915_READ8(reg->offset); + reg->val = I915_READ8(offset); break; default: - MISSING_CASE(entry->size); ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 882cccdad272..ac6fe40b99f7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) else if (boot_cpu_data.x86 > 3) tmp = pgprot_noncached(tmp); #endif -#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__) +#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ + defined(__powerpc__) if (caching_flags & TTM_PL_FLAG_WC) tmp = pgprot_writecombine(tmp); else diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 3318de690e00..a2dbbbe0d8d7 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size) struct cp2112_force_read_report report; int ret; + if (size > sizeof(dev->read_data)) + size = sizeof(dev->read_data); report.report = CP2112_DATA_READ_FORCE_SEND; report.length = cpu_to_be16(size); diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 6a9b05b328a9..7c811252c1ce 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, /* * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN" * for the stylus. + * The check for mt_report_id ensures we don't process + * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical + * collection, but within the report ID. */ if (field->physical == HID_DG_STYLUS) return 0; + else if ((field->physical == 0) && + (field->report->id != td->mt_report_id) && + (td->mt_report_id != -1)) + return 0; if (field->application == HID_DG_TOUCHSCREEN || field->application == HID_DG_TOUCHPAD) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 53e7de7cb9e2..20f9a653444c 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -87,6 +87,9 @@ static const struct hid_blacklist { { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 4c0ffca97bef..44958d79d598 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -1271,11 +1271,13 @@ fail_leds: pad_input_dev = NULL; wacom_wac->pad_registered = false; fail_register_pad_input: - input_unregister_device(touch_input_dev); + if (touch_input_dev) + input_unregister_device(touch_input_dev); wacom_wac->touch_input = NULL; wacom_wac->touch_registered = false; fail_register_touch_input: - input_unregister_device(pen_input_dev); + if (pen_input_dev) + input_unregister_device(pen_input_dev); wacom_wac->pen_input = NULL; wacom_wac->pen_registered = false; fail_register_pen_input: diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 232da89f4e88..0d244239e55d 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom) features->x_max = 4096; features->y_max = 4096; } + else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { + features->device_type |= WACOM_DEVICETYPE_PAD; + } } /* diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index e8e2077c7244..13ea1ea23328 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev) if (src & MMA8452_TRANSIENT_SRC_XTRANSE) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, - IIO_EV_TYPE_THRESH, + IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING), ts); if (src & MMA8452_TRANSIENT_SRC_YTRANSE) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, - IIO_EV_TYPE_THRESH, + IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING), ts); if (src & MMA8452_TRANSIENT_SRC_ZTRANSE) iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, - IIO_EV_TYPE_THRESH, + IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING), ts); } @@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev, static const struct iio_event_spec mma8452_transient_event[] = { { - .type = IIO_EV_TYPE_THRESH, + .type = IIO_EV_TYPE_MAG, .dir = IIO_EV_DIR_RISING, .mask_separate = BIT(IIO_EV_INFO_ENABLE), .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index 8d9c9b9215dd..d819823f7257 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c @@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi) indio_dev->channels = chip_info->channels; indio_dev->num_channels = chip_info->num_channels; + adc->chip_info = chip_info; + adc->transfer[0].tx_buf = &adc->tx_buf; adc->transfer[0].len = sizeof(adc->tx_buf); adc->transfer[1].rx_buf = adc->rx_buf; diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 480f335a0f9f..819632bf1fda 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c @@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev, struct vf610_adc *info = iio_priv(indio_dev); if ((readval == NULL) || - (!(reg % 4) || (reg > VF610_REG_ADC_PCTL))) + ((reg % 4) || (reg > VF610_REG_ADC_PCTL))) return -EINVAL; *readval = readl(info->regs + reg); diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index c1a218236be5..11a027adc204 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c @@ -200,7 +200,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev, int *val, int *val2) { u8 reg; - u16 buf; + __be16 buf; int ret; struct stk3310_data *data = iio_priv(indio_dev); @@ -222,7 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev, dev_err(&data->client->dev, "register read failed\n"); return ret; } - *val = swab16(buf); + *val = be16_to_cpu(buf); return IIO_VAL_INT; } @@ -235,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev, int val, int val2) { u8 reg; - u16 buf; + __be16 buf; int ret; unsigned int index; struct stk3310_data *data = iio_priv(indio_dev); @@ -252,7 +252,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev, else return -EINVAL; - buf = swab16(val); + buf = cpu_to_be16(val); ret = regmap_bulk_write(data->regmap, reg, &buf, 2); if (ret < 0) dev_err(&client->dev, "failed to set PS threshold!\n"); @@ -301,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev, int *val, int *val2, long mask) { u8 reg; - u16 buf; + __be16 buf; int ret; unsigned int index; struct stk3310_data *data = iio_priv(indio_dev); @@ -322,7 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev, mutex_unlock(&data->lock); return ret; } - *val = swab16(buf); + *val = be16_to_cpu(buf); mutex_unlock(&data->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_INT_TIME: @@ -608,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client, if (ret < 0) return ret; - ret = iio_device_register(indio_dev); - if (ret < 0) { - dev_err(&client->dev, "device_register failed\n"); - stk3310_set_state(data, STK3310_STATE_STANDBY); - } - - if (client->irq <= 0) + if (client->irq < 0) client->irq = stk3310_gpio_probe(client); if (client->irq >= 0) { @@ -629,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client, client->irq); } + ret = iio_device_register(indio_dev); + if (ret < 0) { + dev_err(&client->dev, "device_register failed\n"); + stk3310_set_state(data, STK3310_STATE_STANDBY); + } + return ret; } diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index dcadfc4f0661..efb9350b0d76 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig @@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS config BMC150_MAGN tristate "Bosch BMC150 Magnetometer Driver" depends on I2C + select REGMAP_I2C select IIO_BUFFER select IIO_TRIGGERED_BUFFER help diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index d4c178869991..1347a1f2e46f 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c @@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data) goto err_poweroff; } if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { - dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret); + dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id); ret = -ENODEV; goto err_poweroff; } - dev_dbg(&data->client->dev, "Chip id %x\n", ret); + dev_dbg(&data->client->dev, "Chip id %x\n", chip_id); preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; ret = bmc150_magn_set_odr(data, preset.odr); diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c index d927397a6ef7..706ebfd6297f 100644 --- a/drivers/iio/magnetometer/mmc35240.c +++ b/drivers/iio/magnetometer/mmc35240.c @@ -202,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set) coil_bit = MMC35240_CTRL0_RESET_BIT; return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0, - MMC35240_CTRL0_REFILL_BIT, - coil_bit); + coil_bit, coil_bit); + } static int mmc35240_init(struct mmc35240_data *data) @@ -222,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data) /* * make sure we restore sensor characteristics, by doing - * a RESET/SET sequence + * a SET/RESET sequence, the axis polarity being naturally + * aligned after RESET */ - ret = mmc35240_hw_set(data, false); + ret = mmc35240_hw_set(data, true); if (ret < 0) return ret; usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1); - ret = mmc35240_hw_set(data, true); + ret = mmc35240_hw_set(data, false); if (ret < 0) return ret; @@ -503,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client, } data = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); data->client = client; data->regmap = regmap; data->res = MMC35240_16_BITS_SLOW; diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c index cb2e8ad8bfdc..7a2b639eaa96 100644 --- a/drivers/iio/temperature/mlx90614.c +++ b/drivers/iio/temperature/mlx90614.c @@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev, *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: - *val = 13657; + *val = -13657; *val2 = 500000; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SCALE: diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 2d7e503d13cb..871dbe56216a 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -31,6 +31,8 @@ * SOFTWARE. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/idr.h> @@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) u32 bar0 = 0, bar1 = 0; #ifdef CONFIG_X86_64 - if (WARN(pat_enabled(), - "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { + if (pat_enabled()) { + pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n"); ret = -ENODEV; goto bail; } diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c index 074a65ed17bb..766bf2660116 100644 --- a/drivers/input/input-leds.c +++ b/drivers/input/input-leds.c @@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type, { } +static int input_leds_get_count(struct input_dev *dev) +{ + unsigned int led_code; + int count = 0; + + for_each_set_bit(led_code, dev->ledbit, LED_CNT) + if (input_led_info[led_code].name) + count++; + + return count; +} + static int input_leds_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) @@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler, int led_no; int error; - num_leds = bitmap_weight(dev->ledbit, LED_CNT); + num_leds = input_leds_get_count(dev); if (!num_leds) return -ENXIO; @@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler, led->handle = &leds->handle; led->code = led_code; - if (WARN_ON(!input_led_info[led_code].name)) + if (!input_led_info[led_code].name) continue; led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index ce3d40004458..22b9ca901f4e 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1167,7 +1167,7 @@ static int elantech_set_input_params(struct psmouse *psmouse) struct input_dev *dev = psmouse->dev; struct elantech_data *etd = psmouse->private; unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0; - unsigned int x_res = 0, y_res = 0; + unsigned int x_res = 31, y_res = 31; if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) return -1; @@ -1232,8 +1232,6 @@ static int elantech_set_input_params(struct psmouse *psmouse) /* For X to recognize me as touchpad. */ input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); - input_abs_set_res(dev, ABS_X, x_res); - input_abs_set_res(dev, ABS_Y, y_res); /* * range of pressure and width is the same as v2, * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility. @@ -1246,8 +1244,6 @@ static int elantech_set_input_params(struct psmouse *psmouse) input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); - input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); - input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res); input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2, ETP_PMAX_V2, 0, 0); /* @@ -1259,6 +1255,13 @@ static int elantech_set_input_params(struct psmouse *psmouse) break; } + input_abs_set_res(dev, ABS_X, x_res); + input_abs_set_res(dev, ABS_Y, y_res); + if (etd->hw_version > 1) { + input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); + input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res); + } + etd->y_max = y_max; etd->width = width; diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index b4d12e29abff..e36162b28c2a 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -15,6 +15,7 @@ */ #include <linux/kernel.h> +#include <linux/dmi.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/input/mt.h> @@ -34,6 +35,7 @@ struct goodix_ts_data { int abs_y_max; unsigned int max_touch_num; unsigned int int_trigger_type; + bool rotated_screen; }; #define GOODIX_MAX_HEIGHT 4096 @@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = { IRQ_TYPE_LEVEL_HIGH, }; +/* + * Those tablets have their coordinates origin at the bottom right + * of the tablet, as if rotated 180 degrees + */ +static const struct dmi_system_id rotated_screen[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + .ident = "WinBook TW100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), + DMI_MATCH(DMI_PRODUCT_NAME, "TW100") + } + }, + { + .ident = "WinBook TW700", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), + DMI_MATCH(DMI_PRODUCT_NAME, "TW700") + }, + }, +#endif + {} +}; + /** * goodix_i2c_read - read data from a register of the i2c slave device. * @@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) int input_y = get_unaligned_le16(&coor_data[3]); int input_w = get_unaligned_le16(&coor_data[5]); + if (ts->rotated_screen) { + input_x = ts->abs_x_max - input_x; + input_y = ts->abs_y_max - input_y; + } + input_mt_slot(ts->input_dev, id); input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x); @@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts) ts->abs_y_max = GOODIX_MAX_HEIGHT; ts->max_touch_num = GOODIX_MAX_CONTACTS; } + + ts->rotated_screen = dmi_check_system(rotated_screen); + if (ts->rotated_screen) + dev_dbg(&ts->client->dev, + "Applying '180 degrees rotated screen' quirk\n"); } /** diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index f2c6c352c55a..2c41107240de 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c @@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) goto err_out; } + /* TSC-25 data sheet specifies a delay after the RESET command */ + msleep(150); + /* set coordinate output rate */ buf[0] = buf[1] = 0xFF; ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index f58a196521a9..80285c71786e 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c @@ -429,7 +429,7 @@ static int zforce_read_packet(struct zforce_ts *ts, u8 *buf) goto unlock; } - if (buf[PAYLOAD_LENGTH] == 0) { + if (buf[PAYLOAD_LENGTH] == 0 || buf[PAYLOAD_LENGTH] > FRAME_MAXSIZE) { dev_err(&client->dev, "invalid payload length: %d\n", buf[PAYLOAD_LENGTH]); ret = -EIO; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 8e9ec81ce4bb..da902baaa794 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -199,9 +199,10 @@ * Stream table. * * Linear: Enough to cover 1 << IDR1.SIDSIZE entries - * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus) + * 2lvl: 128k L1 entries, + * 256 lazy entries per table (each table covers a PCI bus) */ -#define STRTAB_L1_SZ_SHIFT 16 +#define STRTAB_L1_SZ_SHIFT 20 #define STRTAB_SPLIT 8 #define STRTAB_L1_DESC_DWORDS 1 @@ -269,10 +270,10 @@ #define ARM64_TCR_TG0_SHIFT 14 #define ARM64_TCR_TG0_MASK 0x3UL #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 -#define ARM64_TCR_IRGN0_SHIFT 24 +#define ARM64_TCR_IRGN0_SHIFT 8 #define ARM64_TCR_IRGN0_MASK 0x3UL #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 -#define ARM64_TCR_ORGN0_SHIFT 26 +#define ARM64_TCR_ORGN0_SHIFT 10 #define ARM64_TCR_ORGN0_MASK 0x3UL #define CTXDESC_CD_0_TCR_SH0_SHIFT 12 #define ARM64_TCR_SH0_SHIFT 12 @@ -542,6 +543,9 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_HYP (1 << 12) u32 features; +#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) + u32 options; + struct arm_smmu_cmdq cmdq; struct arm_smmu_evtq evtq; struct arm_smmu_priq priq; @@ -602,11 +606,35 @@ struct arm_smmu_domain { static DEFINE_SPINLOCK(arm_smmu_devices_lock); static LIST_HEAD(arm_smmu_devices); +struct arm_smmu_option_prop { + u32 opt; + const char *prop; +}; + +static struct arm_smmu_option_prop arm_smmu_options[] = { + { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, + { 0, NULL}, +}; + static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) { return container_of(dom, struct arm_smmu_domain, domain); } +static void parse_driver_options(struct arm_smmu_device *smmu) +{ + int i = 0; + + do { + if (of_property_read_bool(smmu->dev->of_node, + arm_smmu_options[i].prop)) { + smmu->options |= arm_smmu_options[i].opt; + dev_notice(smmu->dev, "option %s\n", + arm_smmu_options[i].prop); + } + } while (arm_smmu_options[++i].opt); +} + /* Low-level queue manipulation functions */ static bool queue_full(struct arm_smmu_queue *q) { @@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, arm_smmu_sync_ste_for_sid(smmu, sid); /* It's likely that we'll want to use the new STE soon */ - arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); + if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) + arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); } static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) @@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) return 0; size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); - strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS]; + strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; desc->span = STRTAB_SPLIT + 1; desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, @@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) { void *strtab; u64 reg; - u32 size; + u32 size, l1size; int ret; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; /* Calculate the L1 size, capped to the SIDSIZE */ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); size = min(size, smmu->sid_bits - STRTAB_SPLIT); - if (size + STRTAB_SPLIT < smmu->sid_bits) + cfg->num_l1_ents = 1 << size; + + size += STRTAB_SPLIT; + if (size < smmu->sid_bits) dev_warn(smmu->dev, "2-level strtab only covers %u/%u bits of SID\n", - size + STRTAB_SPLIT, smmu->sid_bits); + size, smmu->sid_bits); - cfg->num_l1_ents = 1 << size; - size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); - strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma, + l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); + strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, GFP_KERNEL); if (!strtab) { dev_err(smmu->dev, @@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) ret = arm_smmu_init_l1_strtab(smmu); if (ret) dma_free_coherent(smmu->dev, - cfg->num_l1_ents * - (STRTAB_L1_DESC_DWORDS << 3), + l1size, strtab, cfg->strtab_dma); return ret; @@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) if (irq > 0) smmu->gerr_irq = irq; + parse_driver_options(smmu); + /* Probe the h/w */ ret = arm_smmu_device_probe(smmu); if (ret) diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a98a7b27aca1..0649b94f5958 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width) static void domain_exit(struct dmar_domain *domain) { + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; struct page *freelist = NULL; - int i; /* Domain 0 is reserved, so dont process it */ if (!domain) @@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain) /* clear attached or cached domains */ rcu_read_lock(); - for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) - iommu_detach_domain(domain, g_iommus[i]); + for_each_active_iommu(iommu, drhd) + if (domain_type_is_vm(domain) || + test_bit(iommu->seq_id, domain->iommu_bmp)) + iommu_detach_domain(domain, iommu); rcu_read_unlock(); dma_free_pagelist(freelist); diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 8c91fd5eb6fd..375be509e95f 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c @@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty) cs->hw.ser->tty = tty; atomic_set(&cs->hw.ser->refcnt, 1); init_completion(&cs->hw.ser->dead_cmp); - tty->disc_data = cs; + /* Set the amount of data we're willing to receive per call + * from the hardware driver to half of the input buffer size + * to leave some reserve. + * Note: We don't do flow control towards the hardware driver. + * If more data is received than will fit into the input buffer, + * it will be dropped and an error will be logged. This should + * never happen as the device is slow and the buffer size ample. + */ + tty->receive_room = RBUFSIZE/2; + /* OK.. Initialization of the datastructures and the HW is done.. Now * startup system and notify the LL that we are ready to run */ @@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty) } /* - * Read on the tty. - * Unused, received data goes only to the Gigaset driver. - */ -static ssize_t -gigaset_tty_read(struct tty_struct *tty, struct file *file, - unsigned char __user *buf, size_t count) -{ - return -EAGAIN; -} - -/* - * Write on the tty. - * Unused, transmit data comes only from the Gigaset driver. - */ -static ssize_t -gigaset_tty_write(struct tty_struct *tty, struct file *file, - const unsigned char *buf, size_t count) -{ - return -EAGAIN; -} - -/* * Ioctl on the tty. * Called in process context only. * May be re-entered by multiple ioctl calling threads. @@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = { .open = gigaset_tty_open, .close = gigaset_tty_close, .hangup = gigaset_tty_hangup, - .read = gigaset_tty_read, - .write = gigaset_tty_write, .ioctl = gigaset_tty_ioctl, .receive_buf = gigaset_tty_receive, .write_wakeup = gigaset_tty_wakeup, diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ed2346ddf4c9..e51de52eeb94 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) bitmap_super_t *sb; unsigned long chunksize, daemon_sleep, write_behind; - bitmap->storage.sb_page = alloc_page(GFP_KERNEL); + bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (bitmap->storage.sb_page == NULL) return -ENOMEM; bitmap->storage.sb_page->index = 0; @@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) sb->state = cpu_to_le32(bitmap->flags); bitmap->events_cleared = bitmap->mddev->events; sb->events_cleared = cpu_to_le64(bitmap->mddev->events); + bitmap->mddev->bitmap_info.nodes = 0; kunmap_atomic(sb); @@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) unsigned long sectors_reserved = 0; int err = -EINVAL; struct page *sb_page; + loff_t offset = bitmap->mddev->bitmap_info.offset; if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { chunksize = 128 * 1024 * 1024; @@ -584,9 +586,9 @@ re_read: bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); /* to 4k blocks */ bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); - bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3); + offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, - bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset); + bitmap->cluster_slot, offset); } if (bitmap->storage.file) { @@ -597,7 +599,7 @@ re_read: bitmap, bytes, sb_page); } else { err = read_sb_page(bitmap->mddev, - bitmap->mddev->bitmap_info.offset, + offset, sb_page, 0, sizeof(bitmap_super_t)); } @@ -611,8 +613,16 @@ re_read: daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; write_behind = le32_to_cpu(sb->write_behind); sectors_reserved = le32_to_cpu(sb->sectors_reserved); - nodes = le32_to_cpu(sb->nodes); - strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); + /* XXX: This is a hack to ensure that we don't use clustering + * in case: + * - dm-raid is in use and + * - the nodes written in bitmap_sb is erroneous. + */ + if (!bitmap->mddev->sync_super) { + nodes = le32_to_cpu(sb->nodes); + strlcpy(bitmap->mddev->bitmap_info.cluster_name, + sb->cluster_name, 64); + } /* verify that the bitmap-specific fields are valid */ if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) @@ -671,7 +681,7 @@ out: kunmap_atomic(sb); /* Assiging chunksize is required for "re_read" */ bitmap->mddev->bitmap_info.chunksize = chunksize; - if (nodes && (bitmap->cluster_slot < 0)) { + if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { err = md_setup_cluster(bitmap->mddev, nodes); if (err) { pr_err("%s: Could not setup cluster service (%d)\n", @@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot, if (IS_ERR(bitmap)) return PTR_ERR(bitmap); - rv = bitmap_read_sb(bitmap); - if (rv) - goto err; - rv = bitmap_init_from_disk(bitmap, 0); if (rv) goto err; diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index fcfc4b9b2672..0072190515e0 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -44,6 +44,7 @@ struct resync_info { /* md_cluster_info flags */ #define MD_CLUSTER_WAITING_FOR_NEWDISK 1 +#define MD_CLUSTER_SUSPEND_READ_BALANCING 2 struct md_cluster_info { @@ -275,6 +276,9 @@ clear_bit: static void recover_prep(void *arg) { + struct mddev *mddev = arg; + struct md_cluster_info *cinfo = mddev->cluster_info; + set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); } static void recover_slot(void *arg, struct dlm_slot *slot) @@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots, cinfo->slot_number = our_slot; complete(&cinfo->completion); + clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); } static const struct dlm_lockspace_ops md_ls_ops = { @@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev) resync_send(mddev, RESYNCING, 0, 0); } -static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) +static int area_resyncing(struct mddev *mddev, int direction, + sector_t lo, sector_t hi) { struct md_cluster_info *cinfo = mddev->cluster_info; int ret = 0; struct suspend_info *s; + if ((direction == READ) && + test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state)) + return 1; + spin_lock_irq(&cinfo->suspend_lock); if (list_empty(&cinfo->suspend_list)) goto out; diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h index 6817ee00e053..00defe2badbc 100644 --- a/drivers/md/md-cluster.h +++ b/drivers/md/md-cluster.h @@ -18,7 +18,7 @@ struct md_cluster_operations { int (*metadata_update_start)(struct mddev *mddev); int (*metadata_update_finish)(struct mddev *mddev); int (*metadata_update_cancel)(struct mddev *mddev); - int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi); + int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); int (*add_new_disk_finish)(struct mddev *mddev); int (*new_disk_ack)(struct mddev *mddev, bool ack); diff --git a/drivers/md/md.c b/drivers/md/md.c index d429c30cd514..0c2a4e8b873c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev) { struct md_personality *pers = mddev->pers; mddev_detach(mddev); + /* Ensure ->event_work is done */ + flush_workqueue(md_misc_wq); spin_lock(&mddev->lock); mddev->ready = 0; mddev->pers = NULL; @@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes) err = request_module("md-cluster"); if (err) { pr_err("md-cluster module not found.\n"); - return err; + return -ENOENT; } spin_lock(&pers_lock); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f80f1af61ce7..94f5b55069e0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error) spin_lock_irqsave(&conf->device_lock, flags); if (r1_bio->mddev->degraded == conf->raid_disks || (r1_bio->mddev->degraded == conf->raid_disks-1 && - !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) + test_bit(In_sync, &conf->mirrors[mirror].rdev->flags))) uptodate = 1; spin_unlock_irqrestore(&conf->device_lock, flags); } @@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect if ((conf->mddev->recovery_cp < this_sector + sectors) || (mddev_is_clustered(conf->mddev) && - md_cluster_ops->area_resyncing(conf->mddev, this_sector, + md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, this_sector + sectors))) choose_first = 1; else @@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) ((bio_end_sector(bio) > mddev->suspend_lo && bio->bi_iter.bi_sector < mddev->suspend_hi) || (mddev_is_clustered(mddev) && - md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { + md_cluster_ops->area_resyncing(mddev, WRITE, + bio->bi_iter.bi_sector, bio_end_sector(bio))))) { /* As the suspend_* range is controlled by * userspace, we want an interruptible * wait. @@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) if (bio_end_sector(bio) <= mddev->suspend_lo || bio->bi_iter.bi_sector >= mddev->suspend_hi || (mddev_is_clustered(mddev) && - !md_cluster_ops->area_resyncing(mddev, + !md_cluster_ops->area_resyncing(mddev, WRITE, bio->bi_iter.bi_sector, bio_end_sector(bio)))) break; schedule(); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 940f2f365461..38c58e19cfce 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) /* far_copies must be 1 */ conf->prev.stride = conf->dev_sectors; } + conf->reshape_safe = conf->reshape_progress; spin_lock_init(&conf->device_lock); INIT_LIST_HEAD(&conf->retry_list); @@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev) } conf->offset_diff = min_offset_diff; - conf->reshape_safe = conf->reshape_progress; clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); @@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev) conf->reshape_progress = size; } else conf->reshape_progress = 0; + conf->reshape_safe = conf->reshape_progress; spin_unlock_irq(&conf->device_lock); if (mddev->delta_disks && mddev->bitmap) { @@ -4170,6 +4171,7 @@ abort: rdev->new_data_offset = rdev->data_offset; smp_wmb(); conf->reshape_progress = MaxSector; + conf->reshape_safe = MaxSector; mddev->reshape_position = MaxSector; spin_unlock_irq(&conf->device_lock); return ret; @@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf) md_finish_reshape(conf->mddev); smp_wmb(); conf->reshape_progress = MaxSector; + conf->reshape_safe = MaxSector; spin_unlock_irq(&conf->device_lock); /* read-ahead size must cover two whole stripes, which is diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 59e44e99eef3..643d217bfa13 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize) if (!sc) return -ENOMEM; + /* Need to ensure auto-resizing doesn't interfere */ + mutex_lock(&conf->cache_size_mutex); + for (i = conf->max_nr_stripes; i; i--) { nsh = alloc_stripe(sc, GFP_KERNEL); if (!nsh) @@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) kmem_cache_free(sc, nsh); } kmem_cache_destroy(sc); + mutex_unlock(&conf->cache_size_mutex); return -ENOMEM; } /* Step 2 - Must use GFP_NOIO now. @@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) } else err = -ENOMEM; + mutex_unlock(&conf->cache_size_mutex); /* Step 4, return new stripes to service */ while(!list_empty(&newstripes)) { nsh = list_entry(newstripes.next, struct stripe_head, lru); @@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) &first_bad, &bad_sectors)) set_bit(R5_ReadRepl, &dev->flags); else { - if (rdev) + if (rdev && !test_bit(Faulty, &rdev->flags)) set_bit(R5_NeedReplace, &dev->flags); + else + clear_bit(R5_NeedReplace, &dev->flags); rdev = rcu_dereference(conf->disks[i].rdev); clear_bit(R5_ReadRepl, &dev->flags); } @@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread) pr_debug("%d stripes handled\n", handled); spin_unlock_irq(&conf->device_lock); - if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { + if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && + mutex_trylock(&conf->cache_size_mutex)) { grow_one_stripe(conf, __GFP_NOWARN); /* Set flag even if allocation failed. This helps * slow down allocation requests when mem is short */ set_bit(R5_DID_ALLOC, &conf->cache_state); + mutex_unlock(&conf->cache_size_mutex); } async_tx_issue_pending_all(); @@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size) return -EINVAL; conf->min_nr_stripes = size; + mutex_lock(&conf->cache_size_mutex); while (size < conf->max_nr_stripes && drop_one_stripe(conf)) ; + mutex_unlock(&conf->cache_size_mutex); err = md_allow_write(mddev); if (err) return err; + mutex_lock(&conf->cache_size_mutex); while (size > conf->max_nr_stripes) if (!grow_one_stripe(conf, GFP_KERNEL)) break; + mutex_unlock(&conf->cache_size_mutex); return 0; } @@ -6371,11 +6384,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, struct shrink_control *sc) { struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); - int ret = 0; - while (ret < sc->nr_to_scan) { - if (drop_one_stripe(conf) == 0) - return SHRINK_STOP; - ret++; + unsigned long ret = SHRINK_STOP; + + if (mutex_trylock(&conf->cache_size_mutex)) { + ret= 0; + while (ret < sc->nr_to_scan) { + if (drop_one_stripe(conf) == 0) { + ret = SHRINK_STOP; + break; + } + ret++; + } + mutex_unlock(&conf->cache_size_mutex); } return ret; } @@ -6444,6 +6464,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) goto abort; spin_lock_init(&conf->device_lock); seqcount_init(&conf->gen_lock); + mutex_init(&conf->cache_size_mutex); init_waitqueue_head(&conf->wait_for_quiescent); for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { init_waitqueue_head(&conf->wait_for_stripe[i]); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 02c3bf8fbfe7..d05144278690 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -482,7 +482,8 @@ struct r5conf { */ int active_name; char cache_name[2][32]; - struct kmem_cache *slab_cache; /* for allocating stripes */ + struct kmem_cache *slab_cache; /* for allocating stripes */ + struct mutex cache_size_mutex; /* Protect changes to cache size */ int seq_flush, seq_write; int quiesce; diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 4cb365d4ffdc..8b95eefb610b 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c @@ -38,6 +38,8 @@ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/kernel.h> #include <linux/fb.h> @@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv) { int rc; +#ifdef CONFIG_X86_64 + if (pat_enabled()) { + pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n"); + return -ENODEV; + } +#endif + if (itv->osd_info) { IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); return -EBUSY; @@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void) int registered = 0; int err; -#ifdef CONFIG_X86_64 - if (WARN(pat_enabled(), - "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) { - return -ENODEV; - } -#endif if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 8eb0a9500a90..e9513d651cd3 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent) /* Fill in the data structures */ devno = MKDEV(MAJOR(mei_devt), dev->minor); cdev_init(&dev->cdev, &mei_fops); - dev->cdev.owner = mei_fops.owner; + dev->cdev.owner = parent->driver->owner; /* Add the device */ ret = cdev_add(&dev->cdev, devno, 1); diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c index 41e3bdb10061..6dfdae3452d6 100644 --- a/drivers/misc/mic/scif/scif_nodeqp.c +++ b/drivers/misc/mic/scif/scif_nodeqp.c @@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg) } static struct scatterlist * -scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) +scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt) { struct scatterlist *sg; struct page *page; @@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) return NULL; sg_init_table(sg, page_cnt); for (i = 0; i < page_cnt; i++) { - page = vmalloc_to_page((void __force *)va); - if (!page) - goto p2p_sg_err; + page = pfn_to_page(pa >> PAGE_SHIFT); sg_set_page(&sg[i], page, page_size, 0); - va += page_size; + pa += page_size; } return sg; -p2p_sg_err: - kfree(sg); - return NULL; } /* Init p2p mappings required to access peerdev from scifdev */ @@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev) p2p = kzalloc(sizeof(*p2p), GFP_KERNEL); if (!p2p) return NULL; - p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va, + p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa, PAGE_SIZE, num_mmio_pages); if (!p2p->ppi_sg[SCIF_PPI_MMIO]) goto free_p2p; p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages; sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30))); num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT); - p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va, + p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa, 1 << sg_page_shift, num_aper_chunks); p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks; diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c9c3d20b784b..a1b820fcb2a6 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev, ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); + mmc_blk_put(md); + return ret; } diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index fd9a58e216a5..6a0f9c79be26 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI config MMC_MTK tristate "MediaTek SD/MMC Card Interface support" + depends on HAS_DMA help This selects the MediaTek(R) Secure digital and Multimedia card Interface. If you have a machine with a integrated SD/MMC card reader, say Y or M here. diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index b2b411da297b..4d1203236890 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) if (status & (CTO_EN | CCRC_EN)) end_cmd = 1; + if (host->data || host->response_busy) { + end_trans = !end_cmd; + host->response_busy = 0; + } if (status & (CTO_EN | DTO_EN)) hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); - else if (status & (CCRC_EN | DCRC_EN)) + else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN | + BADA_EN)) hsmmc_command_incomplete(host, -EILSEQ, end_cmd); if (status & ACE_EN) { @@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) } dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); } - if (host->data || host->response_busy) { - end_trans = !end_cmd; - host->response_busy = 0; - } } OMAP_HSMMC_WRITE(host->base, STAT, status); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index faf0cb910c96..c6b9f6492e1a 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct pltfm_imx_data *imx_data = pltfm_host->priv; - struct esdhc_platform_data *boarddata = &imx_data->boarddata; - if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) - return boarddata->f_max; - else - return pltfm_host->clock; + return pltfm_host->clock; } static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) @@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { static int sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, struct sdhci_host *host, - struct esdhc_platform_data *boarddata) + struct pltfm_imx_data *imx_data) { struct device_node *np = pdev->dev.of_node; - - if (!np) - return -ENODEV; - - if (of_get_property(np, "non-removable", NULL)) - boarddata->cd_type = ESDHC_CD_PERMANENT; - - if (of_get_property(np, "fsl,cd-controller", NULL)) - boarddata->cd_type = ESDHC_CD_CONTROLLER; + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + int ret; if (of_get_property(np, "fsl,wp-controller", NULL)) boarddata->wp_type = ESDHC_WP_CONTROLLER; - boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); - if (gpio_is_valid(boarddata->cd_gpio)) - boarddata->cd_type = ESDHC_CD_GPIO; - boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); if (gpio_is_valid(boarddata->wp_gpio)) boarddata->wp_type = ESDHC_WP_GPIO; - of_property_read_u32(np, "bus-width", &boarddata->max_bus_width); - - of_property_read_u32(np, "max-frequency", &boarddata->f_max); - if (of_find_property(np, "no-1-8-v", NULL)) boarddata->support_vsel = false; else @@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, mmc_of_parse_voltage(np, &host->ocr_mask); + /* sdr50 and sdr104 needs work on 1.8v signal voltage */ + if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) && + !IS_ERR(imx_data->pins_default)) { + imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_100MHZ); + imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_200MHZ); + if (IS_ERR(imx_data->pins_100mhz) || + IS_ERR(imx_data->pins_200mhz)) { + dev_warn(mmc_dev(host->mmc), + "could not get ultra high speed state, work on normal mode\n"); + /* + * fall back to not support uhs by specify no 1.8v quirk + */ + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + } else { + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + /* call to generic mmc_of_parse to support additional capabilities */ - return mmc_of_parse(host->mmc); + ret = mmc_of_parse(host->mmc); + if (ret) + return ret; + + if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + + return 0; } #else static inline int sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, struct sdhci_host *host, - struct esdhc_platform_data *boarddata) + struct pltfm_imx_data *imx_data) { return -ENODEV; } #endif +static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, + struct sdhci_host *host, + struct pltfm_imx_data *imx_data) +{ + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + int err; + + if (!host->mmc->parent->platform_data) { + dev_err(mmc_dev(host->mmc), "no board data!\n"); + return -EINVAL; + } + + imx_data->boarddata = *((struct esdhc_platform_data *) + host->mmc->parent->platform_data); + /* write_protect */ + if (boarddata->wp_type == ESDHC_WP_GPIO) { + err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); + if (err) { + dev_err(mmc_dev(host->mmc), + "failed to request write-protect gpio!\n"); + return err; + } + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + } + + /* card_detect */ + switch (boarddata->cd_type) { + case ESDHC_CD_GPIO: + err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); + if (err) { + dev_err(mmc_dev(host->mmc), + "failed to request card-detect gpio!\n"); + return err; + } + /* fall through */ + + case ESDHC_CD_CONTROLLER: + /* we have a working card_detect back */ + host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; + break; + + case ESDHC_CD_PERMANENT: + host->mmc->caps |= MMC_CAP_NONREMOVABLE; + break; + + case ESDHC_CD_NONE: + break; + } + + switch (boarddata->max_bus_width) { + case 8: + host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; + break; + case 4: + host->mmc->caps |= MMC_CAP_4_BIT_DATA; + break; + case 1: + default: + host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; + break; + } + + return 0; +} + static int sdhci_esdhc_imx_probe(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(imx_esdhc_dt_ids, &pdev->dev); struct sdhci_pltfm_host *pltfm_host; struct sdhci_host *host; - struct esdhc_platform_data *boarddata; int err; struct pltfm_imx_data *imx_data; - bool dt = true; host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); if (IS_ERR(host)) @@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; - boarddata = &imx_data->boarddata; - if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { - if (!host->mmc->parent->platform_data) { - dev_err(mmc_dev(host->mmc), "no board data!\n"); - err = -EINVAL; - goto disable_clk; - } - imx_data->boarddata = *((struct esdhc_platform_data *) - host->mmc->parent->platform_data); - dt = false; - } - /* write_protect */ - if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) { - err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); - if (err) { - dev_err(mmc_dev(host->mmc), - "failed to request write-protect gpio!\n"); - goto disable_clk; - } - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - } - - /* card_detect */ - switch (boarddata->cd_type) { - case ESDHC_CD_GPIO: - if (dt) - break; - err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); - if (err) { - dev_err(mmc_dev(host->mmc), - "failed to request card-detect gpio!\n"); - goto disable_clk; - } - /* fall through */ - - case ESDHC_CD_CONTROLLER: - /* we have a working card_detect back */ - host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; - break; - - case ESDHC_CD_PERMANENT: - host->mmc->caps |= MMC_CAP_NONREMOVABLE; - break; - - case ESDHC_CD_NONE: - break; - } - - switch (boarddata->max_bus_width) { - case 8: - host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; - break; - case 4: - host->mmc->caps |= MMC_CAP_4_BIT_DATA; - break; - case 1: - default: - host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; - break; - } - - /* sdr50 and sdr104 needs work on 1.8v signal voltage */ - if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) && - !IS_ERR(imx_data->pins_default)) { - imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, - ESDHC_PINCTRL_STATE_100MHZ); - imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, - ESDHC_PINCTRL_STATE_200MHZ); - if (IS_ERR(imx_data->pins_100mhz) || - IS_ERR(imx_data->pins_200mhz)) { - dev_warn(mmc_dev(host->mmc), - "could not get ultra high speed state, work on normal mode\n"); - /* fall back to not support uhs by specify no 1.8v quirk */ - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; - } - } else { - host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; - } + if (of_id) + err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); + else + err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data); + if (err) + goto disable_clk; err = sdhci_add_host(host); if (err) diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index 3497cfaf683c..a870c42731d7 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -45,6 +45,6 @@ #define ESDHC_DMA_SYSCTL 0x40c #define ESDHC_DMA_SNOOP 0x00000040 -#define ESDHC_HOST_CONTROL_RES 0x05 +#define ESDHC_HOST_CONTROL_RES 0x01 #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 9cd5fc62f130..946d37f94a31 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) goto err_of_parse; sdhci_get_of_property(pdev); pdata = pxav3_get_mmc_pdata(dev); + pdev->dev.platform_data = pdata; } else if (pdata) { /* on-chip device */ if (pdata->flags & PXA_FLAG_CARD_PERMANENT) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index bc1445238fb3..1dbe93232030 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host) u32 max_current_caps; unsigned int ocr_avail; unsigned int override_timeout_clk; + u32 max_clk; int ret; WARN_ON(host == NULL); @@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host) GFP_KERNEL); host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); if (!host->adma_table || !host->align_buffer) { - dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, - host->adma_table, host->adma_addr); + if (host->adma_table) + dma_free_coherent(mmc_dev(mmc), + host->adma_table_sz, + host->adma_table, + host->adma_addr); kfree(host->align_buffer); pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", mmc_hostname(mmc)); @@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host) * Set host parameters. */ mmc->ops = &sdhci_ops; - mmc->f_max = host->max_clk; + max_clk = host->max_clk; + if (host->ops->get_min_clock) mmc->f_min = host->ops->get_min_clock(host); else if (host->version >= SDHCI_SPEC_300) { if (host->clk_mul) { mmc->f_min = (host->max_clk * host->clk_mul) / 1024; - mmc->f_max = host->max_clk * host->clk_mul; + max_clk = host->max_clk * host->clk_mul; } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; + if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk))) + mmc->f_max = max_clk; + if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 317a49480475..e1ccefce9a9d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev, call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); } +static struct slave *bond_get_old_active(struct bonding *bond, + struct slave *new_active) +{ + struct slave *slave; + struct list_head *iter; + + bond_for_each_slave(bond, slave, iter) { + if (slave == new_active) + continue; + + if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) + return slave; + } + + return NULL; +} + /* bond_do_fail_over_mac * * Perform special MAC address swapping for fail_over_mac settings @@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond, if (!new_active) return; + if (!old_active) + old_active = bond_get_old_active(bond, new_active); + if (old_active) { ether_addr_copy(tmp_mac, new_active->dev->dev_addr); ether_addr_copy(saddr.sa_data, @@ -1725,9 +1745,16 @@ err_free: err_undo_flags: /* Enslave of first slave has failed and we need to fix master's mac */ - if (!bond_has_slaves(bond) && - ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) - eth_hw_addr_random(bond_dev); + if (!bond_has_slaves(bond)) { + if (ether_addr_equal_64bits(bond_dev->dev_addr, + slave_dev->dev_addr)) + eth_hw_addr_random(bond_dev); + if (bond_dev->type != ARPHRD_ETHER) { + ether_setup(bond_dev); + bond_dev->flags |= IFF_MASTER; + bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; + } + } return res; } @@ -1916,6 +1943,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond %s\n", bond_dev->name); + bond_remove_proc_entry(bond); unregister_netdevice(bond_dev); } return ret; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f4e40aa4d2a2..945c0955a967 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev) cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); } /** @@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb) } at91_read_mb(dev, mb, cf); - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); can_led_event(dev, CAN_LED_EVENT_RX); } @@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) return 0; at91_poll_err_frame(dev, cf, reg_sr); - netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_receive_skb(skb); return 1; } @@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev) return; at91_irq_err_state(dev, cf, new_state); - netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_rx(skb); priv->can.state = new_state; } diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 27ad312e7abf..57dadd52b428 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c @@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc) cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) @@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) priv->can.state = state; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index c11d44984036..70a8cbb29e75 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) for (i = 0; i < cf->can_dlc; i++) cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); } - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static int cc770_err(struct net_device *dev, u8 status) @@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status) } } - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 6201c5a1a884..b1e8d729851c 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) return 0; do_bus_err(dev, cf, reg_esr); - netif_receive_skb(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_receive_skb(skb); return 1; } @@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); - netif_receive_skb(skb); - dev->stats.rx_packets++; dev->stats.rx_bytes += cf->can_dlc; + netif_receive_skb(skb); return 1; } @@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev) } flexcan_read_fifo(dev, cf); - netif_receive_skb(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); can_led_event(dev, CAN_LED_EVENT_RX); diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index e3d7e22a4fa0..db9538d4b358 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget) cf->data[i] = (u8)(slot[j] >> shift); } } - netif_receive_skb(skb); /* Update statistics and read pointer */ stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_receive_skb(skb); + rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); } diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 32bd7f451aa4..7b92e911a616 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev) /* release receive buffer */ sja1000_write_cmdreg(priv, CMD_RRB); - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); can_led_event(dev, CAN_LED_EVENT_RX); } @@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) can_bus_off(dev); } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index a23a7af8eb9a..9a3f15cb7ef4 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -218,10 +218,10 @@ static void slc_bump(struct slcan *sl) memcpy(skb_put(skb, sizeof(struct can_frame)), &cf, sizeof(struct can_frame)); - netif_rx_ni(skb); sl->dev->stats.rx_packets++; sl->dev->stats.rx_bytes += cf.can_dlc; + netif_rx_ni(skb); } /* parse tty input stream */ diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index c1a95a34d62e..b7e83c212023 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi) if (ret) goto out_clk; - priv->power = devm_regulator_get(&spi->dev, "vdd"); - priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); + priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); + priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { ret = -EPROBE_DEFER; @@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev) struct spi_device *spi = to_spi_device(dev); struct mcp251x_priv *priv = spi_get_drvdata(spi); - if (priv->after_suspend & AFTER_SUSPEND_POWER) { + if (priv->after_suspend & AFTER_SUSPEND_POWER) mcp251x_power_enable(priv->power, 1); + + if (priv->after_suspend & AFTER_SUSPEND_UP) { + mcp251x_power_enable(priv->transceiver, 1); queue_work(priv->wq, &priv->restart_work); } else { - if (priv->after_suspend & AFTER_SUSPEND_UP) { - mcp251x_power_enable(priv->transceiver, 1); - queue_work(priv->wq, &priv->restart_work); - } else { - priv->after_suspend = 0; - } + priv->after_suspend = 0; } + priv->force_quit = 0; enable_irq(spi->irq); return 0; diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index e95a9e1a889f..cf345cbfe819 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, } } - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 866bac0ae7e9..2d390384ef3b 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) cf->data[i] = msg->msg.can_msg.msg[i]; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) @@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) stats->rx_errors++; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } /* diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 411c1af92c62..0e5a4493ba4f 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, cf->data[7] = rxerr; } - netif_rx(skb); - priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } } @@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, cf->data[i] = msg->msg.rx.data[i]; } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } return; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 72427f21edff..6b94007ae052 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c @@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, hwts->hwtstamp = timeval_to_ktime(tv); } - netif_rx(skb); mc->netdev->stats.rx_packets++; mc->netdev->stats.rx_bytes += cf->can_dlc; + netif_rx(skb); return 0; } @@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) hwts = skb_hwtstamps(skb); hwts->hwtstamp = timeval_to_ktime(tv); - /* push the skb */ - netif_rx(skb); - /* update statistics */ mc->netdev->stats.rx_packets++; mc->netdev->stats.rx_bytes += cf->can_dlc; + /* push the skb */ + netif_rx(skb); return 0; diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index dec51717635e..7d61b3279798 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, hwts = skb_hwtstamps(skb); hwts->hwtstamp = timeval_to_ktime(tv); - netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; + netif_rx(skb); return 0; } @@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); hwts = skb_hwtstamps(skb); hwts->hwtstamp = timeval_to_ktime(tv); - netif_rx(skb); netdev->stats.rx_packets++; netdev->stats.rx_bytes += can_frame->can_dlc; + netif_rx(skb); return 0; } diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index dd52c7a4c80d..de95b1ccba3e 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } /* Read data and status frames */ @@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, else memcpy(cf->data, msg->data, cf->can_dlc); - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); can_led_event(priv->netdev, CAN_LED_EVENT_RX); } else { diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 972982f8bea7..079897b3a955 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) } /* Include the pseudo-PHY address and the broadcast PHY address to - * divert reads towards our workaround + * divert reads towards our workaround. This is only required for + * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such + * that we can use the regular SWITCH_MDIO master controller instead. + * + * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask + * to have a 1:1 mapping between Port address and PHY address in order + * to utilize the slave_mii_bus instance to read from Port PHYs. This is + * not what we want here, so we initialize phys_mii_mask 0 to always + * utilize the "master" MDIO bus backed by the "mdio-unimac" driver. */ - ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); + if (of_machine_is_compatible("brcm,bcm7445d0")) + ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); + else + ds->phys_mii_mask = 0; rev = reg_readl(priv, REG_SWITCH_REVISION); priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index fd8547c2b79d..561342466076 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) newfid = __ffs(ps->fid_mask); ps->fid[port] = newfid; - ps->fid_mask &= (1 << newfid); + ps->fid_mask &= ~(1 << newfid); ps->bridge_mask[fid] &= ~(1 << port); ps->bridge_mask[newfid] = 1 << port; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 42e20e5385ac..1f89c59b4353 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -24,7 +24,6 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> -#include <linux/pm_runtime.h> #include <linux/ptrace.h> #include <linux/errno.h> #include <linux/ioport.h> @@ -78,7 +77,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); #define FEC_ENET_RAEM_V 0x8 #define FEC_ENET_RAFL_V 0x8 #define FEC_ENET_OPD_V 0xFFF0 -#define FEC_MDIO_PM_TIMEOUT 100 /* ms */ static struct platform_device_id fec_devtype[] = { { @@ -1769,13 +1767,7 @@ static void fec_enet_adjust_link(struct net_device *ndev) static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct fec_enet_private *fep = bus->priv; - struct device *dev = &fep->pdev->dev; unsigned long time_left; - int ret = 0; - - ret = pm_runtime_get_sync(dev); - if (IS_ERR_VALUE(ret)) - return ret; fep->mii_timeout = 0; init_completion(&fep->mdio_done); @@ -1791,30 +1783,18 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO read timeout\n"); - ret = -ETIMEDOUT; - goto out; + return -ETIMEDOUT; } - ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); - -out: - pm_runtime_mark_last_busy(dev); - pm_runtime_put_autosuspend(dev); - - return ret; + /* return value */ + return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); } static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct fec_enet_private *fep = bus->priv; - struct device *dev = &fep->pdev->dev; unsigned long time_left; - int ret = 0; - - ret = pm_runtime_get_sync(dev); - if (IS_ERR_VALUE(ret)) - return ret; fep->mii_timeout = 0; init_completion(&fep->mdio_done); @@ -1831,13 +1811,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, if (time_left == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO write timeout\n"); - ret = -ETIMEDOUT; + return -ETIMEDOUT; } - pm_runtime_mark_last_busy(dev); - pm_runtime_put_autosuspend(dev); - - return ret; + return 0; } static int fec_enet_clk_enable(struct net_device *ndev, bool enable) @@ -1849,6 +1826,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) ret = clk_prepare_enable(fep->clk_ahb); if (ret) return ret; + ret = clk_prepare_enable(fep->clk_ipg); + if (ret) + goto failed_clk_ipg; if (fep->clk_enet_out) { ret = clk_prepare_enable(fep->clk_enet_out); if (ret) @@ -1872,6 +1852,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) } } else { clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); if (fep->clk_ptp) { @@ -1893,6 +1874,8 @@ failed_clk_ptp: if (fep->clk_enet_out) clk_disable_unprepare(fep->clk_enet_out); failed_clk_enet_out: + clk_disable_unprepare(fep->clk_ipg); +failed_clk_ipg: clk_disable_unprepare(fep->clk_ahb); return ret; @@ -2864,14 +2847,10 @@ fec_enet_open(struct net_device *ndev) struct fec_enet_private *fep = netdev_priv(ndev); int ret; - ret = pm_runtime_get_sync(&fep->pdev->dev); - if (IS_ERR_VALUE(ret)) - return ret; - pinctrl_pm_select_default_state(&fep->pdev->dev); ret = fec_enet_clk_enable(ndev, true); if (ret) - goto clk_enable; + return ret; /* I should reset the ring buffers here, but I don't yet know * a simple way to do that. @@ -2902,9 +2881,6 @@ err_enet_mii_probe: fec_enet_free_buffers(ndev); err_enet_alloc: fec_enet_clk_enable(ndev, false); -clk_enable: - pm_runtime_mark_last_busy(&fep->pdev->dev); - pm_runtime_put_autosuspend(&fep->pdev->dev); pinctrl_pm_select_sleep_state(&fep->pdev->dev); return ret; } @@ -2927,9 +2903,6 @@ fec_enet_close(struct net_device *ndev) fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&fep->pdev->dev); - pm_runtime_mark_last_busy(&fep->pdev->dev); - pm_runtime_put_autosuspend(&fep->pdev->dev); - fec_enet_free_buffers(ndev); return 0; @@ -3415,10 +3388,6 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_clk; - ret = clk_prepare_enable(fep->clk_ipg); - if (ret) - goto failed_clk_ipg; - fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); if (!IS_ERR(fep->reg_phy)) { ret = regulator_enable(fep->reg_phy); @@ -3465,8 +3434,6 @@ fec_probe(struct platform_device *pdev) netif_carrier_off(ndev); fec_enet_clk_enable(ndev, false); pinctrl_pm_select_sleep_state(&pdev->dev); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); ret = register_netdev(ndev); if (ret) @@ -3480,12 +3447,6 @@ fec_probe(struct platform_device *pdev) fep->rx_copybreak = COPYBREAK_DEFAULT; INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); - - pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_mark_last_busy(&pdev->dev); - pm_runtime_put_autosuspend(&pdev->dev); - return 0; failed_register: @@ -3496,8 +3457,6 @@ failed_init: if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_regulator: - clk_disable_unprepare(fep->clk_ipg); -failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: failed_phy: @@ -3609,28 +3568,7 @@ failed_clk: return ret; } -static int __maybe_unused fec_runtime_suspend(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct fec_enet_private *fep = netdev_priv(ndev); - - clk_disable_unprepare(fep->clk_ipg); - - return 0; -} - -static int __maybe_unused fec_runtime_resume(struct device *dev) -{ - struct net_device *ndev = dev_get_drvdata(dev); - struct fec_enet_private *fep = netdev_priv(ndev); - - return clk_prepare_enable(fep->clk_ipg); -} - -static const struct dev_pm_ops fec_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) - SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) -}; +static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); static struct platform_driver fec_driver = { .driver = { diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 370e20ed224c..62e48bc0cb23 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, struct mvneta_rx_queue *rxq) { struct net_device *dev = pp->dev; - int rx_done, rx_filled; + int rx_done; u32 rcvd_pkts = 0; u32 rcvd_bytes = 0; @@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, rx_todo = rx_done; rx_done = 0; - rx_filled = 0; /* Fairness NAPI loop */ while (rx_done < rx_todo) { @@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, int rx_bytes, err; rx_done++; - rx_filled++; rx_status = rx_desc->status; rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); data = (unsigned char *)rx_desc->buf_cookie; @@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, continue; } + /* Refill processing */ + err = mvneta_rx_refill(pp, rx_desc); + if (err) { + netdev_err(dev, "Linux processing - Can't refill\n"); + rxq->missed++; + goto err_drop_frame; + } + skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); if (!skb) goto err_drop_frame; @@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, mvneta_rx_csum(pp, rx_status, skb); napi_gro_receive(&pp->napi, skb); - - /* Refill processing */ - err = mvneta_rx_refill(pp, rx_desc); - if (err) { - netdev_err(dev, "Linux processing - Can't refill\n"); - rxq->missed++; - rx_filled--; - } } if (rcvd_pkts) { @@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, } /* Update rxq management counters */ - mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); + mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); return rx_done; } diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index fd9745714d90..78849dd4ef8e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q) struct ravb_desc *desc = NULL; int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; - struct sk_buff *skb; dma_addr_t dma_addr; - void *buffer; int i; priv->cur_rx[q] = 0; @@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q) memset(priv->rx_ring[q], 0, rx_ring_size); /* Build RX ring buffer */ for (i = 0; i < priv->num_rx_ring[q]; i++) { - priv->rx_skb[q][i] = NULL; - skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); - if (!skb) - break; - ravb_set_buffer_align(skb); /* RX descriptor */ rx_desc = &priv->rx_ring[q][i]; /* The size of the buffer should be on 16-byte boundary. */ rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); - dma_addr = dma_map_single(&ndev->dev, skb->data, + dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data, ALIGN(PKT_BUF_SZ, 16), DMA_FROM_DEVICE); - if (dma_mapping_error(&ndev->dev, dma_addr)) { - dev_kfree_skb(skb); - break; - } - priv->rx_skb[q][i] = skb; + /* We just set the data size to 0 for a failed mapping which + * should prevent DMA from happening... + */ + if (dma_mapping_error(&ndev->dev, dma_addr)) + rx_desc->ds_cc = cpu_to_le16(0); rx_desc->dptr = cpu_to_le32(dma_addr); rx_desc->die_dt = DT_FEMPTY; } rx_desc = &priv->rx_ring[q][i]; rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); rx_desc->die_dt = DT_LINKFIX; /* type */ - priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]); memset(priv->tx_ring[q], 0, tx_ring_size); /* Build TX ring buffer */ for (i = 0; i < priv->num_tx_ring[q]; i++) { - priv->tx_skb[q][i] = NULL; - priv->tx_buffers[q][i] = NULL; - buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); - if (!buffer) - break; - /* Aligned TX buffer */ - priv->tx_buffers[q][i] = buffer; tx_desc = &priv->tx_ring[q][i]; tx_desc->die_dt = DT_EEMPTY; } @@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q) static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + struct sk_buff *skb; int ring_size; + void *buffer; + int i; /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], @@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q) if (!priv->rx_skb[q] || !priv->tx_skb[q]) goto error; + for (i = 0; i < priv->num_rx_ring[q]; i++) { + skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); + if (!skb) + goto error; + ravb_set_buffer_align(skb); + priv->rx_skb[q][i] = skb; + } + /* Allocate rings for the aligned buffers */ priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], sizeof(*priv->tx_buffers[q]), GFP_KERNEL); if (!priv->tx_buffers[q]) goto error; + for (i = 0; i < priv->num_tx_ring[q]; i++) { + buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); + if (!buffer) + goto error; + /* Aligned TX buffer */ + priv->tx_buffers[q][i] = buffer; + } + /* Allocate all RX descriptors. */ ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, @@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) if (--boguscnt < 0) break; + /* We use 0-byte descriptors to mark the DMA mapping errors */ + if (!pkt_len) + continue; + if (desc_status & MSC_MC) stats->multicast++; @@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) skb = priv->rx_skb[q][entry]; priv->rx_skb[q][entry] = NULL; - dma_sync_single_for_cpu(&ndev->dev, - le32_to_cpu(desc->dptr), - ALIGN(PKT_BUF_SZ, 16), - DMA_FROM_DEVICE); + dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), + ALIGN(PKT_BUF_SZ, 16), + DMA_FROM_DEVICE); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; @@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) if (!skb) break; /* Better luck next round. */ ravb_set_buffer_align(skb); - dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), - ALIGN(PKT_BUF_SZ, 16), - DMA_FROM_DEVICE); dma_addr = dma_map_single(&ndev->dev, skb->data, le16_to_cpu(desc->ds_cc), DMA_FROM_DEVICE); skb_checksum_none_assert(skb); - if (dma_mapping_error(&ndev->dev, dma_addr)) { - dev_kfree_skb_any(skb); - break; - } + /* We just set the data size to 0 for a failed mapping + * which should prevent DMA from happening... + */ + if (dma_mapping_error(&ndev->dev, dma_addr)) + desc->ds_cc = cpu_to_le16(0); desc->dptr = cpu_to_le32(dma_addr); priv->rx_skb[q][entry] = skb; } @@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) u32 dma_addr; void *buffer; u32 entry; - u32 tccr; spin_lock_irqsave(&priv->lock, flags); if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { @@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) dma_wmb(); desc->die_dt = DT_FSINGLE; - tccr = ravb_read(ndev, TCCR); - if (!(tccr & (TCCR_TSRQ0 << q))) - ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR); + ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); priv->cur_tx[q]++; if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 50f7a7a26821..864b476f7fd5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device, if (res->mac) memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); - dev_set_drvdata(device, priv); + dev_set_drvdata(device, priv->dev); /* Verify driver arguments */ stmmac_verify_args(); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f335bf119ab5..d155bf2573cd 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -793,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) static int cpsw_poll(struct napi_struct *napi, int budget) { struct cpsw_priv *priv = napi_to_priv(napi); - int num_tx, num_rx; - - num_tx = cpdma_chan_process(priv->txch, 128); + int num_rx; num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { @@ -810,9 +808,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget) } } - if (num_rx || num_tx) - cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", - num_rx, num_tx); + if (num_rx) + cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx); return num_rx; } diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 5ec4ed3f6c8d..ec8ed30196f3 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1617,11 +1617,11 @@ static int netcp_ndo_open(struct net_device *ndev) } mutex_unlock(&netcp_modules_lock); - netcp_rxpool_refill(netcp); napi_enable(&netcp->rx_napi); napi_enable(&netcp->tx_napi); knav_queue_enable_notify(netcp->tx_compl_q); knav_queue_enable_notify(netcp->rx_queue); + netcp_rxpool_refill(netcp); netif_tx_wake_all_queues(ndev); dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); return 0; diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 953a97492fab..9542b7bac61a 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -67,8 +67,6 @@ struct ipvl_dev { struct ipvl_port *port; struct net_device *phy_dev; struct list_head addrs; - int ipv4cnt; - int ipv6cnt; struct ipvl_pcpu_stats __percpu *pcpu_stats; DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); netdev_features_t sfeatures; @@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) return rcu_dereference(d->rx_handler_data); } +static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d) +{ + return rcu_dereference_bh(d->rx_handler_data); +} + static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) { return rtnl_dereference(d->rx_handler_data); @@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, const void *iaddr, bool is_v6); -void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); +void ipvlan_ht_addr_del(struct ipvl_addr *addr); #endif /* __IPVLAN_H */ diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8afbedad620d..207f62e8de9a 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); } -void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) +void ipvlan_ht_addr_del(struct ipvl_addr *addr) { hlist_del_init_rcu(&addr->hlnode); - if (sync) - synchronize_rcu(); } struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, @@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) { struct ipvl_dev *ipvlan = netdev_priv(dev); - struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); + struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev); if (!port) goto out; diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1acc283160d9..20b58bdecf75 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev) else dev->flags &= ~IFF_NOARP; - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { - list_for_each_entry(addr, &ipvlan->addrs, anode) - ipvlan_ht_addr_add(ipvlan, addr); - } + list_for_each_entry(addr, &ipvlan->addrs, anode) + ipvlan_ht_addr_add(ipvlan, addr); + return dev_uc_add(phy_dev, phy_dev->dev_addr); } @@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev) dev_uc_del(phy_dev, phy_dev->dev_addr); - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { - list_for_each_entry(addr, &ipvlan->addrs, anode) - ipvlan_ht_addr_del(addr, !dev->dismantle); - } + list_for_each_entry(addr, &ipvlan->addrs, anode) + ipvlan_ht_addr_del(addr); + return 0; } @@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, ipvlan->port = port; ipvlan->sfeatures = IPVLAN_FEATURES; INIT_LIST_HEAD(&ipvlan->addrs); - ipvlan->ipv4cnt = 0; - ipvlan->ipv6cnt = 0; /* TODO Probably put random address here to be presented to the * world but keep using the physical-dev address for the outgoing @@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) struct ipvl_dev *ipvlan = netdev_priv(dev); struct ipvl_addr *addr, *next; - if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { - list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { - ipvlan_ht_addr_del(addr, !dev->dismantle); - list_del(&addr->anode); - } + list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { + ipvlan_ht_addr_del(addr); + list_del(&addr->anode); + kfree_rcu(addr, rcu); } + list_del_rcu(&ipvlan->pnode); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(ipvlan->phy_dev, dev); @@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); addr->atype = IPVL_IPV6; list_add_tail(&addr->anode, &ipvlan->addrs); - ipvlan->ipv6cnt++; + /* If the interface is not up, the address will be added to the hash * list by ipvlan_open. */ @@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) if (!addr) return; - ipvlan_ht_addr_del(addr, true); + ipvlan_ht_addr_del(addr); list_del(&addr->anode); - ipvlan->ipv6cnt--; - WARN_ON(ipvlan->ipv6cnt < 0); kfree_rcu(addr, rcu); return; @@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused, struct net_device *dev = (struct net_device *)if6->idev->dev; struct ipvl_dev *ipvlan = netdev_priv(dev); + /* FIXME IPv6 autoconf calls us from bh without RTNL */ + if (in_softirq()) + return NOTIFY_DONE; + if (!netif_is_ipvlan(dev)) return NOTIFY_DONE; @@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); addr->atype = IPVL_IPV4; list_add_tail(&addr->anode, &ipvlan->addrs); - ipvlan->ipv4cnt++; + /* If the interface is not up, the address will be added to the hash * list by ipvlan_open. */ @@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) if (!addr) return; - ipvlan_ht_addr_del(addr, true); + ipvlan_ht_addr_del(addr); list_del(&addr->anode); - ipvlan->ipv4cnt--; - WARN_ON(ipvlan->ipv4cnt < 0); kfree_rcu(addr, rcu); return; diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index c7a12e2e07b7..8a3bf5469892 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev) return ret; } - if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || + if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, DP83867_DEVADDR, phydev->addr); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 095ef3fe369a..46a14cbb0215 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) { struct phy_device *phydev = to_phy_device(dev); struct phy_driver *phydrv = to_phy_driver(drv); + const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); + int i; if (of_driver_match_device(dev, drv)) return 1; @@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) if (phydrv->match_phy_device) return phydrv->match_phy_device(phydev); - return (phydrv->phy_id & phydrv->phy_id_mask) == - (phydev->phy_id & phydrv->phy_id_mask); + if (phydev->is_c45) { + for (i = 1; i < num_ids; i++) { + if (!(phydev->c45_ids.devices_in_package & (1 << i))) + continue; + + if ((phydrv->phy_id & phydrv->phy_id_mask) == + (phydev->c45_ids.device_ids[i] & + phydrv->phy_id_mask)) + return 1; + } + return 0; + } else { + return (phydrv->phy_id & phydrv->phy_id_mask) == + (phydev->phy_id & phydrv->phy_id_mask); + } } #ifdef CONFIG_PM diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index f603f362504b..9d43460ce3c7 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -757,6 +757,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ + {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 63c7810e1545..7fbca37a1adf 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev) else vi->hdr_len = sizeof(struct virtio_net_hdr); - if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) + if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || + virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) vi->any_header_sg = true; if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 5e15e8e10ed3..a31a6804dc34 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah) return; case AR9300_DEVID_QCA956X: ah->hw_version.macVersion = AR_SREV_VERSION_9561; + return; } val = REG_READ(ah, AR_SREV) & AR_SREV_ID; diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index d56064861a9c..d45dc021cda2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) #define RX_QUEUE_MASK 255 #define RX_QUEUE_SIZE_LOG 8 +/* + * RX related structures and functions + */ +#define RX_FREE_BUFFERS 64 +#define RX_LOW_WATERMARK 8 + /** * struct iwl_rb_status - reserve buffer status * host memory mapped FH registers diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 80fefe7d7b8c..3b8e85e51002 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c @@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev, hw_addr = (const u8 *)(mac_override + MAC_ADDRESS_OVERRIDE_FAMILY_8000); - /* The byte order is little endian 16 bit, meaning 214365 */ - data->hw_addr[0] = hw_addr[1]; - data->hw_addr[1] = hw_addr[0]; - data->hw_addr[2] = hw_addr[3]; - data->hw_addr[3] = hw_addr[2]; - data->hw_addr[4] = hw_addr[5]; - data->hw_addr[5] = hw_addr[4]; + /* + * Store the MAC address from MAO section. + * No byte swapping is required in MAO section + */ + memcpy(data->hw_addr, hw_addr, ETH_ALEN); /* * Force the use of the OTP MAC address in case of reserved MAC diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 5e4cbdb44c60..737774a01c74 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -660,7 +660,8 @@ struct iwl_scan_config { * iwl_umac_scan_flags *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request * can be preempted by other scan requests with higher priority. - * The low priority scan is aborted. + * The low priority scan will be resumed when the higher proirity scan is + * completed. *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver * when scan starts. */ diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 5de144968723..5000bfcded61 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cmd->uid = cpu_to_le32(uid); cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); + if (type == IWL_MVM_SCAN_SCHED) + cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); + if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index d68dc697a4a0..26f076e82149 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); u8 sta_id; int ret; + static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; lockdep_assert_held(&mvm->mutex); @@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, end: IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", keyconf->cipher, keyconf->keylen, keyconf->keyidx, - sta->addr, ret); + sta ? sta->addr : zero_addr, ret); return ret; } diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index d24b6a83e68c..e472729e5f14 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c @@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, { lockdep_assert_held(&mvm->time_event_lock); - if (te_data->id == TE_MAX) + if (!te_data->vif) return; list_del(&te_data->list); diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 7ba7a118ff5c..89116864d2a0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, if (info->band == IEEE80211_BAND_2GHZ && !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) - rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; + rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; else rate_flags = BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 2ed1e4d2774d..9f65c1cff1b1 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = { /* 3165 Series */ {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, + {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, @@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, - {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 31f72a61cc3f..376b84e54ad7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -44,15 +44,6 @@ #include "iwl-io.h" #include "iwl-op-mode.h" -/* - * RX related structures and functions - */ -#define RX_NUM_QUEUES 1 -#define RX_POST_REQ_ALLOC 2 -#define RX_CLAIM_REQ_ALLOC 8 -#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) -#define RX_LOW_WATERMARK 8 - struct iwl_host_cmd; /*This file includes the declaration that are internal to the @@ -86,29 +77,29 @@ struct isr_statistics { * struct iwl_rxq - Rx queue * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) + * @pool: + * @queue: * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free - * @used_count: Number of RBDs handled to allocator to use for allocation * @write_actual: - * @rx_free: list of RBDs with allocated RB ready for use - * @rx_used: list of RBDs with no RB attached + * @rx_free: list of free SKBs for use + * @rx_used: List of Rx buffers with no SKB * @need_update: flag to indicate we need to update read/write index * @rb_stts: driver's pointer to receive buffer status * @rb_stts_dma: bus address of receive buffer status * @lock: - * @pool: initial pool of iwl_rx_mem_buffer for the queue - * @queue: actual rx queue * * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers */ struct iwl_rxq { __le32 *bd; dma_addr_t bd_dma; + struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; + struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; u32 read; u32 write; u32 free_count; - u32 used_count; u32 write_actual; struct list_head rx_free; struct list_head rx_used; @@ -116,32 +107,6 @@ struct iwl_rxq { struct iwl_rb_status *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; - struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; - struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; -}; - -/** - * struct iwl_rb_allocator - Rx allocator - * @pool: initial pool of allocator - * @req_pending: number of requests the allcator had not processed yet - * @req_ready: number of requests honored and ready for claiming - * @rbd_allocated: RBDs with pages allocated and ready to be handled to - * the queue. This is a list of &struct iwl_rx_mem_buffer - * @rbd_empty: RBDs with no page attached for allocator use. This is a list - * of &struct iwl_rx_mem_buffer - * @lock: protects the rbd_allocated and rbd_empty lists - * @alloc_wq: work queue for background calls - * @rx_alloc: work struct for background calls - */ -struct iwl_rb_allocator { - struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; - atomic_t req_pending; - atomic_t req_ready; - struct list_head rbd_allocated; - struct list_head rbd_empty; - spinlock_t lock; - struct workqueue_struct *alloc_wq; - struct work_struct rx_alloc; }; struct iwl_dma_ptr { @@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data - * @rba: allocator for RX replenishing + * @rx_replenish: work that will be called when buffers need to be allocated * @drv - pointer to iwl_drv * @trans: pointer to the generic transport area * @scd_base_addr: scheduler sram base address in SRAM @@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) */ struct iwl_trans_pcie { struct iwl_rxq rxq; - struct iwl_rb_allocator rba; + struct work_struct rx_replenish; struct iwl_trans *trans; struct iwl_drv *drv; diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index a3fbaa0ef5e0..adad8d0fae7f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -1,7 +1,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -74,29 +74,16 @@ * resets the Rx queue buffers with new memory. * * The management in the driver is as follows: - * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. - * When the interrupt handler is called, the request is processed. - * The page is either stolen - transferred to the upper layer - * or reused - added immediately to the iwl->rxq->rx_free list. - * + When the page is stolen - the driver updates the matching queue's used - * count, detaches the RBD and transfers it to the queue used list. - * When there are two used RBDs - they are transferred to the allocator empty - * list. Work is then scheduled for the allocator to start allocating - * eight buffers. - * When there are another 6 used RBDs - they are transferred to the allocator - * empty list and the driver tries to claim the pre-allocated buffers and - * add them to iwl->rxq->rx_free. If it fails - it continues to claim them - * until ready. - * When there are 8+ buffers in the free list - either from allocation or from - * 8 reused unstolen pages - restock is called to update the FW and indexes. - * + In order to make sure the allocator always has RBDs to use for allocation - * the allocator has initial pool in the size of num_queues*(8-2) - the - * maximum missing RBDs per allocation request (request posted with 2 - * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). - * The queues supplies the recycle of the rest of the RBDs. + * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When + * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled + * to replenish the iwl->rxq->rx_free. + * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the + * iwl->rxq is replenished and the READ INDEX is updated (updating the + * 'processed' and 'read' driver indexes as well) * + A received packet is processed and handed to the kernel network stack, * detached from the iwl->rxq. The driver 'processed' index is updated. - * + If there are no allocated buffers in iwl->rxq->rx_free, + * + The Host/Firmware iwl->rxq is replenished at irq thread time from the + * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. * If there were enough free buffers and RX_STALLED is set it is cleared. * @@ -105,32 +92,18 @@ * * iwl_rxq_alloc() Allocates rx_free * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls - * iwl_pcie_rxq_restock. - * Used only during initialization. + * iwl_pcie_rxq_restock * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx * queue, updates firmware pointers, and updates - * the WRITE index. - * iwl_pcie_rx_allocator() Background work for allocating pages. + * the WRITE index. If insufficient rx_free buffers + * are available, schedules iwl_pcie_rx_replenish * * -- enable interrupts -- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the * READ INDEX, detaching the SKB from the pool. * Moves the packet buffer from queue to rx_used. - * Posts and claims requests to the allocator. * Calls iwl_pcie_rxq_restock to refill any empty * slots. - * - * RBD life-cycle: - * - * Init: - * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue - * - * Regular Receive interrupt: - * Page Stolen: - * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> - * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue - * Page not Stolen: - * rxq.queue -> rxq.rx_free -> rxq.queue * ... * */ @@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) rxq->free_count--; } spin_unlock(&rxq->lock); + /* If the pre-allocated buffer pool is dropping low, schedule to + * refill it */ + if (rxq->free_count <= RX_LOW_WATERMARK) + schedule_work(&trans_pcie->rx_replenish); /* If we've added more space for the firmware to place data, tell it. * Increment device's write pointer in multiples of 8. */ @@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) } /* - * iwl_pcie_rx_alloc_page - allocates and returns a page. - * - */ -static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rxq *rxq = &trans_pcie->rxq; - struct page *page; - gfp_t gfp_mask = GFP_KERNEL; - - if (rxq->free_count > RX_LOW_WATERMARK) - gfp_mask |= __GFP_NOWARN; - - if (trans_pcie->rx_page_order > 0) - gfp_mask |= __GFP_COMP; - - /* Alloc a new receive buffer */ - page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); - if (!page) { - if (net_ratelimit()) - IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", - trans_pcie->rx_page_order); - /* Issue an error if the hardware has consumed more than half - * of its free buffer list and we don't have enough - * pre-allocated buffers. -` */ - if (rxq->free_count <= RX_LOW_WATERMARK && - iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && - net_ratelimit()) - IWL_CRIT(trans, - "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", - rxq->free_count); - return NULL; - } - return page; -} - -/* * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD * * A used RBD is an Rx buffer that has been given to the stack. To use it again @@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) +static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; struct iwl_rx_mem_buffer *rxb; struct page *page; + gfp_t gfp_mask = priority; while (1) { spin_lock(&rxq->lock); @@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) } spin_unlock(&rxq->lock); + if (rxq->free_count > RX_LOW_WATERMARK) + gfp_mask |= __GFP_NOWARN; + + if (trans_pcie->rx_page_order > 0) + gfp_mask |= __GFP_COMP; + /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans); - if (!page) + page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); + if (!page) { + if (net_ratelimit()) + IWL_DEBUG_INFO(trans, "alloc_pages failed, " + "order: %d\n", + trans_pcie->rx_page_order); + + if ((rxq->free_count <= RX_LOW_WATERMARK) && + net_ratelimit()) + IWL_CRIT(trans, "Failed to alloc_pages with %s." + "Only %u free buffers remaining.\n", + priority == GFP_ATOMIC ? + "GFP_ATOMIC" : "GFP_KERNEL", + rxq->free_count); + /* We don't reschedule replenish work here -- we will + * call the restock method and if it still needs + * more buffers it will schedule replenish */ return; + } spin_lock(&rxq->lock); @@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) lockdep_assert_held(&rxq->lock); - for (i = 0; i < RX_QUEUE_SIZE; i++) { + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { if (!rxq->pool[i].page) continue; dma_unmap_page(trans->dev, rxq->pool[i].page_dma, @@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) * When moving to rx_free an page is allocated for the slot. * * Also restock the Rx queue via iwl_pcie_rxq_restock. - * This is called only during initialization + * This is called as a scheduled work item (except for during initialization) */ -static void iwl_pcie_rx_replenish(struct iwl_trans *trans) +static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) { - iwl_pcie_rxq_alloc_rbs(trans); + iwl_pcie_rxq_alloc_rbs(trans, gfp); iwl_pcie_rxq_restock(trans); } -/* - * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues - * - * Allocates for each received request 8 pages - * Called as a scheduled work item. - */ -static void iwl_pcie_rx_allocator(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - - while (atomic_read(&rba->req_pending)) { - int i; - struct list_head local_empty; - struct list_head local_allocated; - - INIT_LIST_HEAD(&local_allocated); - spin_lock(&rba->lock); - /* swap out the entire rba->rbd_empty to a local list */ - list_replace_init(&rba->rbd_empty, &local_empty); - spin_unlock(&rba->lock); - - for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { - struct iwl_rx_mem_buffer *rxb; - struct page *page; - - /* List should never be empty - each reused RBD is - * returned to the list, and initial pool covers any - * possible gap between the time the page is allocated - * to the time the RBD is added. - */ - BUG_ON(list_empty(&local_empty)); - /* Get the first rxb from the rbd list */ - rxb = list_first_entry(&local_empty, - struct iwl_rx_mem_buffer, list); - BUG_ON(rxb->page); - - /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans); - if (!page) - continue; - rxb->page = page; - - /* Get physical address of the RB */ - rxb->page_dma = dma_map_page(trans->dev, page, 0, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - if (dma_mapping_error(trans->dev, rxb->page_dma)) { - rxb->page = NULL; - __free_pages(page, trans_pcie->rx_page_order); - continue; - } - /* dma address must be no more than 36 bits */ - BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); - /* and also 256 byte aligned! */ - BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); - - /* move the allocated entry to the out list */ - list_move(&rxb->list, &local_allocated); - i++; - } - - spin_lock(&rba->lock); - /* add the allocated rbds to the allocator allocated list */ - list_splice_tail(&local_allocated, &rba->rbd_allocated); - /* add the unused rbds back to the allocator empty list */ - list_splice_tail(&local_empty, &rba->rbd_empty); - spin_unlock(&rba->lock); - - atomic_dec(&rba->req_pending); - atomic_inc(&rba->req_ready); - } -} - -/* - * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages -.* -.* Called by queue when the queue posted allocation request and - * has freed 8 RBDs in order to restock itself. - */ -static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, - struct iwl_rx_mem_buffer - *out[RX_CLAIM_REQ_ALLOC]) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; - - if (atomic_dec_return(&rba->req_ready) < 0) { - atomic_inc(&rba->req_ready); - IWL_DEBUG_RX(trans, - "Allocation request not ready, pending requests = %d\n", - atomic_read(&rba->req_pending)); - return -ENOMEM; - } - - spin_lock(&rba->lock); - for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { - /* Get next free Rx buffer, remove it from free list */ - out[i] = list_first_entry(&rba->rbd_allocated, - struct iwl_rx_mem_buffer, list); - list_del(&out[i]->list); - } - spin_unlock(&rba->lock); - - return 0; -} - -static void iwl_pcie_rx_allocator_work(struct work_struct *data) +static void iwl_pcie_rx_replenish_work(struct work_struct *data) { - struct iwl_rb_allocator *rba_p = - container_of(data, struct iwl_rb_allocator, rx_alloc); struct iwl_trans_pcie *trans_pcie = - container_of(rba_p, struct iwl_trans_pcie, rba); + container_of(data, struct iwl_trans_pcie, rx_replenish); - iwl_pcie_rx_allocator(trans_pcie->trans); + iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); } static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; struct device *dev = trans->dev; memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); spin_lock_init(&rxq->lock); - spin_lock_init(&rba->lock); if (WARN_ON(rxq->bd || rxq->rb_stts)) return -EINVAL; @@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) INIT_LIST_HEAD(&rxq->rx_free); INIT_LIST_HEAD(&rxq->rx_used); rxq->free_count = 0; - rxq->used_count = 0; - for (i = 0; i < RX_QUEUE_SIZE; i++) + for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) list_add(&rxq->pool[i].list, &rxq->rx_used); } -static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) -{ - int i; - - lockdep_assert_held(&rba->lock); - - INIT_LIST_HEAD(&rba->rbd_allocated); - INIT_LIST_HEAD(&rba->rbd_empty); - - for (i = 0; i < RX_POOL_SIZE; i++) - list_add(&rba->pool[i].list, &rba->rbd_empty); -} - -static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - int i; - - lockdep_assert_held(&rba->lock); - - for (i = 0; i < RX_POOL_SIZE; i++) { - if (!rba->pool[i].page) - continue; - dma_unmap_page(trans->dev, rba->pool[i].page_dma, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); - __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); - rba->pool[i].page = NULL; - } -} - int iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, err; if (!rxq->bd) { @@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (err) return err; } - if (!rba->alloc_wq) - rba->alloc_wq = alloc_workqueue("rb_allocator", - WQ_HIGHPRI | WQ_UNBOUND, 1); - INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); - - spin_lock(&rba->lock); - atomic_set(&rba->req_pending, 0); - atomic_set(&rba->req_ready, 0); - /* free all first - we might be reconfigured for a different size */ - iwl_pcie_rx_free_rba(trans); - iwl_pcie_rx_init_rba(rba); - spin_unlock(&rba->lock); spin_lock(&rxq->lock); + INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); + /* free all first - we might be reconfigured for a different size */ iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rx_init_rxb_lists(rxq); @@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); spin_unlock(&rxq->lock); - iwl_pcie_rx_replenish(trans); + iwl_pcie_rx_replenish(trans, GFP_KERNEL); iwl_pcie_rx_hw_init(trans, rxq); @@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - struct iwl_rb_allocator *rba = &trans_pcie->rba; /*if rxq->bd is NULL, it means that nothing has been allocated, * exit now */ @@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) return; } - cancel_work_sync(&rba->rx_alloc); - if (rba->alloc_wq) { - destroy_workqueue(rba->alloc_wq); - rba->alloc_wq = NULL; - } - - spin_lock(&rba->lock); - iwl_pcie_rx_free_rba(trans); - spin_unlock(&rba->lock); + cancel_work_sync(&trans_pcie->rx_replenish); spin_lock(&rxq->lock); iwl_pcie_rxq_free_rbs(trans); @@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) rxq->rb_stts = NULL; } -/* - * iwl_pcie_rx_reuse_rbd - Recycle used RBDs - * - * Called when a RBD can be reused. The RBD is transferred to the allocator. - * When there are 2 empty RBDs - a request for allocation is posted - */ -static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, - struct iwl_rx_mem_buffer *rxb, - struct iwl_rxq *rxq) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - - /* Count the used RBDs */ - rxq->used_count++; - - /* Move the RBD to the used list, will be moved to allocator in batches - * before claiming or posting a request*/ - list_add_tail(&rxb->list, &rxq->rx_used); - - /* If we have RX_POST_REQ_ALLOC new released rx buffers - - * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is - * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, - * after but we still need to post another request. - */ - if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { - /* Move the 2 RBDs to the allocator ownership. - Allocator has another 6 from pool for the request completion*/ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); - - atomic_inc(&rba->req_pending); - queue_work(rba->alloc_wq, &rba->rx_alloc); - } -} - static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { @@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, */ __free_pages(rxb->page, trans_pcie->rx_page_order); rxb->page = NULL; - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); + list_add_tail(&rxb->list, &rxq->rx_used); } else { list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; } } else - iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); + list_add_tail(&rxb->list, &rxq->rx_used); } /* @@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *rxq = &trans_pcie->rxq; - u32 r, i, j; + u32 r, i; + u8 fill_rx = 0; + u32 count = 8; + int total_empty; restart: spin_lock(&rxq->lock); @@ -957,6 +720,14 @@ restart: if (i == r) IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); + /* calculate total frames need to be restock after handling RX */ + total_empty = r - rxq->write_actual; + if (total_empty < 0) + total_empty += RX_QUEUE_SIZE; + + if (total_empty > (RX_QUEUE_SIZE / 2)) + fill_rx = 1; + while (i != r) { struct iwl_rx_mem_buffer *rxb; @@ -968,48 +739,29 @@ restart: iwl_pcie_rx_handle_rb(trans, rxb); i = (i + 1) & RX_QUEUE_MASK; - - /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - - * try to claim the pre-allocated buffers from the allocator */ - if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { - struct iwl_rb_allocator *rba = &trans_pcie->rba; - struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; - - /* Add the remaining 6 empty RBDs for allocator use */ - spin_lock(&rba->lock); - list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); - spin_unlock(&rba->lock); - - /* If not ready - continue, will try to reclaim later. - * No need to reschedule work - allocator exits only on - * success */ - if (!iwl_pcie_rx_allocator_get(trans, out)) { - /* If success - then RX_CLAIM_REQ_ALLOC - * buffers were retrieved and should be added - * to free list */ - rxq->used_count -= RX_CLAIM_REQ_ALLOC; - for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { - list_add_tail(&out[j]->list, - &rxq->rx_free); - rxq->free_count++; - } + /* If there are a lot of unused frames, + * restock the Rx queue so ucode wont assert. */ + if (fill_rx) { + count++; + if (count >= 8) { + rxq->read = i; + spin_unlock(&rxq->lock); + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); + count = 0; + goto restart; } } - /* handle restock for two cases: - * - we just pulled buffers from the allocator - * - we have 8+ unstolen pages accumulated */ - if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { - rxq->read = i; - spin_unlock(&rxq->lock); - iwl_pcie_rxq_restock(trans); - goto restart; - } } /* Backtrack one entry */ rxq->read = i; spin_unlock(&rxq->lock); + if (fill_rx) + iwl_pcie_rx_replenish(trans, GFP_ATOMIC); + else + iwl_pcie_rxq_restock(trans); + if (trans_pcie->napi.poll) napi_gro_flush(&trans_pcie->napi, false); } diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 43ae658af6ec..6203c4ad9bba 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) { - if (!trans->cfg->apmg_not_supported) + if (trans->cfg->apmg_not_supported) return; if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) @@ -2459,7 +2459,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; u16 pci_cmd; - int err; + int ret; trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg, &trans_ops_pcie, 0); @@ -2474,8 +2474,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, spin_lock_init(&trans_pcie->ref_lock); init_waitqueue_head(&trans_pcie->ucode_write_waitq); - err = pci_enable_device(pdev); - if (err) + ret = pci_enable_device(pdev); + if (ret) goto out_no_pci; if (!cfg->base_params->pcie_l1_allowed) { @@ -2491,23 +2491,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); - if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); + if (ret) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (!ret) + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); /* both attempts failed: */ - if (err) { + if (ret) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto out_pci_disable_device; } } - err = pci_request_regions(pdev, DRV_NAME); - if (err) { + ret = pci_request_regions(pdev, DRV_NAME); + if (ret) { dev_err(&pdev->dev, "pci_request_regions failed\n"); goto out_pci_disable_device; } @@ -2515,7 +2515,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); if (!trans_pcie->hw_base) { dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); - err = -ENODEV; + ret = -ENODEV; goto out_pci_release_regions; } @@ -2527,9 +2527,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans_pcie->pci_dev = pdev; iwl_disable_interrupts(trans); - err = pci_enable_msi(pdev); - if (err) { - dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); + ret = pci_enable_msi(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); /* enable rfkill interrupt: hw bug w/a */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { @@ -2547,11 +2547,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, */ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { unsigned long flags; - int ret; trans->hw_rev = (trans->hw_rev & 0xfff0) | (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); + ret = iwl_pcie_prepare_card_hw(trans); + if (ret) { + IWL_WARN(trans, "Exit HW not ready\n"); + goto out_pci_disable_msi; + } + /* * in-order to recognize C step driver should read chip version * id located at the AUX bus MISC address space. @@ -2591,13 +2596,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, /* Initialize the wait queue for commands */ init_waitqueue_head(&trans_pcie->wait_command_queue); - if (iwl_pcie_alloc_ict(trans)) + ret = iwl_pcie_alloc_ict(trans); + if (ret) goto out_pci_disable_msi; - err = request_threaded_irq(pdev->irq, iwl_pcie_isr, + ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, iwl_pcie_irq_handler, IRQF_SHARED, DRV_NAME, trans); - if (err) { + if (ret) { IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); goto out_free_ict; } @@ -2617,5 +2623,5 @@ out_pci_disable_device: pci_disable_device(pdev); out_no_pci: iwl_trans_free(trans); - return ERR_PTR(err); + return ERR_PTR(ret); } diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 880d0d63e872..7d50711476fe 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1566,13 +1566,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) smp_rmb(); while (dc != dp) { - BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); + BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); pending_idx = queue->dealloc_ring[pending_index(dc++)]; - pending_idx_release[gop-queue->tx_unmap_ops] = + pending_idx_release[gop - queue->tx_unmap_ops] = pending_idx; - queue->pages_to_unmap[gop-queue->tx_unmap_ops] = + queue->pages_to_unmap[gop - queue->tx_unmap_ops] = queue->mmap_pages[pending_idx]; gnttab_set_unmap_op(gop, idx_to_kaddr(queue, pending_idx), diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index a5233422f9dc..7384455792bf 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, nvdimm_bus_unlock(dev); } if (is_nd_btt(dev) && probe) { + struct nd_btt *nd_btt = to_nd_btt(dev); + nd_region = to_nd_region(dev->parent); nvdimm_bus_lock(dev); if (nd_region->btt_seed == dev) nd_region_create_btt_seed(nd_region); + if (nd_region->ns_seed == &nd_btt->ndns->dev && + is_nd_blk(dev->parent)) + nd_region_create_blk_seed(nd_region); nvdimm_bus_unlock(dev); } } diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 8067f54ce050..5ce5ef211bdb 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name, par_dev->dev.release = free_pardevice; par_dev->devmodel = true; ret = device_register(&par_dev->dev); - if (ret) - goto err_put_dev; + if (ret) { + put_device(&par_dev->dev); + goto err_put_port; + } /* Chain this onto the list */ par_dev->prev = NULL; @@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name, spin_unlock(&port->physport->pardevice_lock); pr_debug("%s: cannot grant exclusive access for device %s\n", port->name, name); - goto err_put_dev; + device_unregister(&par_dev->dev); + goto err_put_port; } port->flags |= PARPORT_FLAG_EXCL; } @@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name, return par_dev; -err_put_dev: - put_device(&par_dev->dev); err_free_devname: kfree(devname); err_free_par_dev: diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index c0e6ede3e27d..6b8dd162f644 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO config PHY_PXA_28NM_HSIC tristate "Marvell USB HSIC 28nm PHY Driver" + depends on HAS_IOMEM select GENERIC_PHY help Enable this to support Marvell USB HSIC PHY driver for Marvell @@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC config PHY_PXA_28NM_USB2 tristate "Marvell USB 2.0 28nm PHY Driver" + depends on HAS_IOMEM select GENERIC_PHY help Enable this to support Marvell USB 2.0 PHY driver for Marvell diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c index c6fc95b53083..335e06d66ed9 100644 --- a/drivers/phy/phy-berlin-usb.c +++ b/drivers/phy/phy-berlin-usb.c @@ -105,9 +105,9 @@ static const u32 phy_berlin_pll_dividers[] = { /* Berlin 2 */ - CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54), - /* Berlin 2CD */ CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55), + /* Berlin 2CD/Q */ + CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54), }; struct phy_berlin_usb_priv { diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 53f295c1bab1..3510b81db3fa 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -28,7 +28,6 @@ #include <linux/delay.h> #include <linux/phy/omap_control_phy.h> #include <linux/of_platform.h> -#include <linux/spinlock.h> #define PLL_STATUS 0x00000004 #define PLL_GO 0x00000008 @@ -83,10 +82,6 @@ struct ti_pipe3 { struct clk *refclk; struct clk *div_clk; struct pipe3_dpll_map *dpll_map; - bool enabled; - spinlock_t lock; /* serialize clock enable/disable */ - /* the below flag is needed specifically for SATA */ - bool refclk_enabled; }; static struct pipe3_dpll_map dpll_map_usb[] = { @@ -137,6 +132,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy) return NULL; } +static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy); +static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy); + static int ti_pipe3_power_off(struct phy *x) { struct ti_pipe3 *phy = phy_get_drvdata(x); @@ -217,6 +215,7 @@ static int ti_pipe3_init(struct phy *x) u32 val; int ret = 0; + ti_pipe3_enable_clocks(phy); /* * Set pcie_pcs register to 0x96 for proper functioning of phy * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table @@ -250,33 +249,35 @@ static int ti_pipe3_exit(struct phy *x) u32 val; unsigned long timeout; - /* SATA DPLL can't be powered down due to Errata i783 and PCIe - * does not have internal DPLL - */ - if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") || - of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) + /* SATA DPLL can't be powered down due to Errata i783 */ + if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) return 0; - /* Put DPLL in IDLE mode */ - val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); - val |= PLL_IDLE; - ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); - - /* wait for LDO and Oscillator to power down */ - timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); - do { - cpu_relax(); - val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); - if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) - break; - } while (!time_after(jiffies, timeout)); + /* PCIe doesn't have internal DPLL */ + if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) { + /* Put DPLL in IDLE mode */ + val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); + val |= PLL_IDLE; + ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); - if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { - dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", - val); - return -EBUSY; + /* wait for LDO and Oscillator to power down */ + timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); + do { + cpu_relax(); + val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); + if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) + break; + } while (!time_after(jiffies, timeout)); + + if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { + dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", + val); + return -EBUSY; + } } + ti_pipe3_disable_clocks(phy); + return 0; } static struct phy_ops ops = { @@ -306,7 +307,6 @@ static int ti_pipe3_probe(struct platform_device *pdev) return -ENOMEM; phy->dev = &pdev->dev; - spin_lock_init(&phy->lock); if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { match = of_match_device(ti_pipe3_id_table, &pdev->dev); @@ -402,6 +402,10 @@ static int ti_pipe3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, phy); pm_runtime_enable(phy->dev); + /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */ + if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) + if (!IS_ERR(phy->refclk)) + clk_prepare_enable(phy->refclk); generic_phy = devm_phy_create(phy->dev, NULL, &ops); if (IS_ERR(generic_phy)) @@ -413,63 +417,33 @@ static int ti_pipe3_probe(struct platform_device *pdev) if (IS_ERR(phy_provider)) return PTR_ERR(phy_provider); - pm_runtime_get(&pdev->dev); - return 0; } static int ti_pipe3_remove(struct platform_device *pdev) { - if (!pm_runtime_suspended(&pdev->dev)) - pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); return 0; } -#ifdef CONFIG_PM -static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy) +static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) { - if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) { - int ret; + int ret = 0; + if (!IS_ERR(phy->refclk)) { ret = clk_prepare_enable(phy->refclk); if (ret) { dev_err(phy->dev, "Failed to enable refclk %d\n", ret); return ret; } - phy->refclk_enabled = true; } - return 0; -} - -static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy) -{ - if (!IS_ERR(phy->refclk)) - clk_disable_unprepare(phy->refclk); - - phy->refclk_enabled = false; -} - -static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) -{ - int ret = 0; - unsigned long flags; - - spin_lock_irqsave(&phy->lock, flags); - if (phy->enabled) - goto err1; - - ret = ti_pipe3_enable_refclk(phy); - if (ret) - goto err1; - if (!IS_ERR(phy->wkupclk)) { ret = clk_prepare_enable(phy->wkupclk); if (ret) { dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); - goto err2; + goto disable_refclk; } } @@ -477,96 +451,33 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) ret = clk_prepare_enable(phy->div_clk); if (ret) { dev_err(phy->dev, "Failed to enable div_clk %d\n", ret); - goto err3; + goto disable_wkupclk; } } - phy->enabled = true; - spin_unlock_irqrestore(&phy->lock, flags); return 0; -err3: +disable_wkupclk: if (!IS_ERR(phy->wkupclk)) clk_disable_unprepare(phy->wkupclk); -err2: +disable_refclk: if (!IS_ERR(phy->refclk)) clk_disable_unprepare(phy->refclk); - ti_pipe3_disable_refclk(phy); -err1: - spin_unlock_irqrestore(&phy->lock, flags); return ret; } static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) { - unsigned long flags; - - spin_lock_irqsave(&phy->lock, flags); - if (!phy->enabled) { - spin_unlock_irqrestore(&phy->lock, flags); - return; - } - if (!IS_ERR(phy->wkupclk)) clk_disable_unprepare(phy->wkupclk); - /* Don't disable refclk for SATA PHY due to Errata i783 */ - if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) - ti_pipe3_disable_refclk(phy); + if (!IS_ERR(phy->refclk)) + clk_disable_unprepare(phy->refclk); if (!IS_ERR(phy->div_clk)) clk_disable_unprepare(phy->div_clk); - phy->enabled = false; - spin_unlock_irqrestore(&phy->lock, flags); -} - -static int ti_pipe3_runtime_suspend(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - - ti_pipe3_disable_clocks(phy); - return 0; } -static int ti_pipe3_runtime_resume(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - int ret = 0; - - ret = ti_pipe3_enable_clocks(phy); - return ret; -} - -static int ti_pipe3_suspend(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - - ti_pipe3_disable_clocks(phy); - return 0; -} - -static int ti_pipe3_resume(struct device *dev) -{ - struct ti_pipe3 *phy = dev_get_drvdata(dev); - int ret; - - ret = ti_pipe3_enable_clocks(phy); - if (ret) - return ret; - - pm_runtime_disable(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - return 0; -} -#endif - -static const struct dev_pm_ops ti_pipe3_pm_ops = { - SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend, - ti_pipe3_runtime_resume, NULL) - SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume) -}; - static const struct of_device_id ti_pipe3_id_table[] = { { .compatible = "ti,phy-usb3", @@ -592,7 +503,6 @@ static struct platform_driver ti_pipe3_driver = { .remove = ti_pipe3_remove, .driver = { .name = "ti-pipe3", - .pm = &ti_pipe3_pm_ops, .of_match_table = ti_pipe3_id_table, }, }; diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index efcf2a2b3975..6177315ab74e 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data) spin_lock_irqsave(&pc->irq_lock[bank], flags); bcm2835_gpio_irq_config(pc, gpio, false); + /* Clear events that were latched prior to clearing event sources */ + bcm2835_gpio_set_bit(pc, GPEDS0, gpio); clear_bit(offset, &pc->enabled_irq_map[bank]); spin_unlock_irqrestore(&pc->irq_lock[bank], flags); } diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 5fd4437cee15..88a7fac11bd4 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c @@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev, unsigned num_configs) { struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); - const struct imx1_pinctrl_soc_info *info = ipctl->info; int i; for (i = 0; i != num_configs; ++i) { imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", - info->pins[pin_id].name); + pin_desc_get(pctldev, pin_id)->name); } return 0; diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c index 557d0f2a3031..97681fac082e 100644 --- a/drivers/pinctrl/nomadik/pinctrl-abx500.c +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c @@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = { .set_mux = abx500_pmx_set, .gpio_request_enable = abx500_gpio_request_enable, .gpio_disable_free = abx500_gpio_disable_free, - .strict = true, }; static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev) diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c index ef0b697639a7..347c763a6a78 100644 --- a/drivers/pinctrl/pinctrl-lpc18xx.c +++ b/drivers/pinctrl/pinctrl-lpc18xx.c @@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: - if (param) + if (param_val) *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift); else *reg |= (LPC18XX_SCU_I2C0_ZIF << shift); @@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: - if (param) + if (param_val) *reg &= ~LPC18XX_SCU_PIN_ZIF; else *reg |= LPC18XX_SCU_PIN_ZIF; diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index b2de09d3b1a0..0b8d480171a3 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs, int res; res = request_irq(pcs_soc->irq, pcs_irq_handler, - IRQF_SHARED | IRQF_NO_SUSPEND, + IRQF_SHARED | IRQF_NO_SUSPEND | + IRQF_NO_THREAD, name, pcs_soc); if (res) { pcs_soc->irq = -1; diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 3dd5a3b2ac62..c760bf43d116 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -33,11 +33,6 @@ #include "../core.h" #include "pinctrl-samsung.h" -#define GROUP_SUFFIX "-grp" -#define GSUFFIX_LEN sizeof(GROUP_SUFFIX) -#define FUNCTION_SUFFIX "-mux" -#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX) - /* list of all possible config options supported */ static struct pin_config { const char *property; diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index c7508d5f6886..0874cfee6889 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -224,7 +224,7 @@ struct sh_pfc_soc_info { /* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ #define _GP_GPIO(bank, _pin, _name, sfx) \ - [(bank * 32) + _pin] = { \ + { \ .pin = (bank * 32) + _pin, \ .name = __stringify(_name), \ .enum_id = _name##_DATA, \ diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c index 832932bdc977..7fd4f511d78f 100644 --- a/drivers/regulator/88pm800.c +++ b/drivers/regulator/88pm800.c @@ -130,7 +130,7 @@ struct pm800_regulators { .owner = THIS_MODULE, \ .n_voltages = ARRAY_SIZE(ldo_volt_table), \ .vsel_reg = PM800_##vreg##_VOUT, \ - .vsel_mask = 0x1f, \ + .vsel_mask = 0xf, \ .enable_reg = PM800_##ereg, \ .enable_mask = 1 << (ebit), \ .volt_table = ldo_volt_table, \ diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index c9f72019bd68..78387a6cbae5 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, static struct regulator *create_regulator(struct regulator_dev *rdev, struct device *dev, const char *supply_name); +static void _regulator_put(struct regulator *regulator); static const char *rdev_get_name(struct regulator_dev *rdev) { @@ -1105,6 +1106,9 @@ static int set_supply(struct regulator_dev *rdev, rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); + if (!try_module_get(supply_rdev->owner)) + return -ENODEV; + rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); if (rdev->supply == NULL) { err = -ENOMEM; @@ -1381,9 +1385,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) } if (!r) { - dev_err(dev, "Failed to resolve %s-supply for %s\n", - rdev->supply_name, rdev->desc->name); - return -EPROBE_DEFER; + if (have_full_constraints()) { + r = dummy_regulator_rdev; + } else { + dev_err(dev, "Failed to resolve %s-supply for %s\n", + rdev->supply_name, rdev->desc->name); + return -EPROBE_DEFER; + } } /* Recursively resolve the supply of the supply */ @@ -1398,8 +1406,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) /* Cascade always-on state to supply */ if (_regulator_is_enabled(rdev)) { ret = regulator_enable(rdev->supply); - if (ret < 0) + if (ret < 0) { + if (rdev->supply) + _regulator_put(rdev->supply); return ret; + } } return 0; diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c index 6f2bdad8b4d8..e94ddcf97722 100644 --- a/drivers/regulator/max8973-regulator.c +++ b/drivers/regulator/max8973-regulator.c @@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt( pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; if (of_property_read_bool(np, "maxim,enable-bias-control")) - pdata->control_flags |= MAX8973_BIAS_ENABLE; + pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE; return pdata; } diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 326ffb553371..72fc3c32db49 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -34,6 +34,8 @@ #include <linux/mfd/samsung/s2mps14.h> #include <linux/mfd/samsung/s2mpu02.h> +/* The highest number of possible regulators for supported devices. */ +#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX struct s2mps11_info { unsigned int rdev_num; int ramp_delay2; @@ -49,7 +51,7 @@ struct s2mps11_info { * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether * the suspend mode was enabled. */ - unsigned long long s2mps14_suspend_state:50; + DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX); /* Array of size rdev_num with GPIO-s for external sleep control */ int *ext_control_gpio; @@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) switch (s2mps11->dev_type) { case S2MPS13X: case S2MPS14X: - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) + if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state)) val = S2MPS14_ENABLE_SUSPEND; else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) val = S2MPS14_ENABLE_EXT_CONTROL; @@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) val = rdev->desc->enable_mask; break; case S2MPU02: - if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) + if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state)) val = S2MPU02_ENABLE_SUSPEND; else val = rdev->desc->enable_mask; @@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev) if (ret < 0) return ret; - s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev)); + set_bit(rdev_get_id(rdev), s2mps11->suspend_state); /* * Don't enable suspend mode if regulator is already disabled because * this would effectively for a short time turn on the regulator after @@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) case S2MPS11X: s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); regulators = s2mps11_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; case S2MPS13X: s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators); regulators = s2mps13_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; case S2MPS14X: s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); regulators = s2mps14_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; case S2MPU02: s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); regulators = s2mpu02_regulators; + BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); break; default: dev_err(&pdev->dev, "Invalid device type: %u\n", diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index 95bccfd3f169..e5225ad9c5b1 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile @@ -2,7 +2,7 @@ # Makefile for the S/390 specific device drivers # -obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/ +obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/ drivers-y += drivers/s390/built-in.o diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile index 241891a57caf..241891a57caf 100644 --- a/drivers/s390/kvm/Makefile +++ b/drivers/s390/virtio/Makefile diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 53fb975c404b..53fb975c404b 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index f8d8fdb26b72..f8d8fdb26b72 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 285f77544c36..7dbbb29d24c6 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev) { struct Scsi_Host *shost; struct virtio_scsi *vscsi; - int err, host_prot; + int err; u32 sg_elems, num_targets; u32 cmd_per_lun; u32 num_queues; @@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev) #ifdef CONFIG_BLK_DEV_INTEGRITY if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { + int host_prot; + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 0cae1694014d..b0f30fb68914 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA config SPI_ZYNQMP_GQSPI tristate "Xilinx ZynqMP GQSPI controller" - depends on SPI_MASTER + depends on SPI_MASTER && HAS_DMA help Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index 788e2b176a4f..acce90ac7371 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -40,6 +40,7 @@ #define SPFI_CONTROL_SOFT_RESET BIT(11) #define SPFI_CONTROL_SEND_DMA BIT(10) #define SPFI_CONTROL_GET_DMA BIT(9) +#define SPFI_CONTROL_SE BIT(8) #define SPFI_CONTROL_TMODE_SHIFT 5 #define SPFI_CONTROL_TMODE_MASK 0x7 #define SPFI_CONTROL_TMODE_SINGLE 0 @@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi, else if (xfer->tx_nbits == SPI_NBITS_QUAD && xfer->rx_nbits == SPI_NBITS_QUAD) val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; + val |= SPFI_CONTROL_SE; spfi_writel(spfi, val, SPFI_CONTROL); } diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index eb7d3a6fb14c..f9deb84e4e55 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, { struct spi_imx_data *spi_imx = spi_master_get_devdata(master); - if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) - && (transfer->len > spi_imx->tx_wml)) + if (spi_imx->dma_is_inited + && transfer->len > spi_imx->rx_wml * sizeof(u32) + && transfer->len > spi_imx->tx_wml * sizeof(u32)) return true; return false; } diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 87b20a511a6b..f23f36ebaf3d 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c @@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr, case GQSPI_SELECT_FLASH_CS_BOTH: instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | GQSPI_GENFIFO_CS_UPPER; + break; case GQSPI_SELECT_FLASH_CS_UPPER: instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; break; diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index dd616ff0ffc5..c7de64171c45 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -693,6 +693,7 @@ static struct class *spidev_class; #ifdef CONFIG_OF static const struct of_device_id spidev_dt_ids[] = { { .compatible = "rohm,dh2228fv" }, + { .compatible = "lineartechnology,ltc2488" }, {}, }; MODULE_DEVICE_TABLE(of, spidev_dt_ids); diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index c9c27f69e101..ee8bfacf2071 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty) * Locking: ctrl_lock */ -static void isig(int sig, struct tty_struct *tty) +static void __isig(int sig, struct tty_struct *tty) { - struct n_tty_data *ldata = tty->disc_data; struct pid *tty_pgrp = tty_get_pgrp(tty); if (tty_pgrp) { kill_pgrp(tty_pgrp, sig, 1); put_pid(tty_pgrp); } +} - if (!L_NOFLSH(tty)) { +static void isig(int sig, struct tty_struct *tty) +{ + struct n_tty_data *ldata = tty->disc_data; + + if (L_NOFLSH(tty)) { + /* signal only */ + __isig(sig, tty); + + } else { /* signal and flush */ up_read(&tty->termios_rwsem); down_write(&tty->termios_rwsem); + __isig(sig, tty); + /* clear echo buffer */ mutex_lock(&ldata->output_lock); ldata->echo_head = ldata->echo_tail = 0; diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 76e65b714471..15b4079a335e 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE config SERIAL_SC16IS7XX tristate "SC16IS7xx serial support" select SERIAL_CORE - depends on I2C || SPI_MASTER + depends on (SPI_MASTER && !I2C) || I2C help This selects support for SC16IS7xx serial ports. Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752, diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 50cf5b10ceed..fd27e986b1dd 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, void __iomem *base; base = devm_ioremap_resource(dev, mmiobase); - if (!base) - return -ENOMEM; + if (IS_ERR(base)) + return PTR_ERR(base); index = pl011_probe_dt_alias(index, dev); diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c index a57301a6fe42..679709f51fd4 100644 --- a/drivers/tty/serial/etraxfs-uart.c +++ b/drivers/tty/serial/etraxfs-uart.c @@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev) port = platform_get_drvdata(pdev); uart_remove_one_port(&etraxfs_uart_driver, port); - etraxfs_uart_ports[pdev->id] = NULL; + etraxfs_uart_ports[port->line] = NULL; return 0; } diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 2c90dc31bfaa..54fdc7866ea1 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port) writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); - /* Can we enable the DMA support? */ - if (is_imx6q_uart(sport) && !uart_console(port) && - !sport->dma_is_inited) - imx_uart_dma_init(sport); - spin_lock_irqsave(&sport->port.lock, flags); /* Reset fifo's and state machines */ i = 100; @@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port) writel(USR1_RTSD, sport->port.membase + USR1); writel(USR2_ORE, sport->port.membase + USR2); - if (sport->dma_is_inited && !sport->dma_is_enabled) - imx_enable_dma(sport); - temp = readl(sport->port.membase + UCR1); temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; @@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, } else { ucr2 |= UCR2_CTSC; } + + /* Can we enable the DMA support? */ + if (is_imx6q_uart(sport) && !uart_console(port) + && !sport->dma_is_inited) + imx_uart_dma_init(sport); } else { termios->c_cflag &= ~CRTSCTS; } @@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) imx_enable_ms(&sport->port); + if (sport->dma_is_inited && !sport->dma_is_enabled) + imx_enable_dma(sport); spin_unlock_irqrestore(&sport->port.lock, flags); } diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 9e6576004a42..5ccc698cbbfa 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val) (reg << SC16IS7XX_REG_SHIFT) | port->line, val); } +static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line; + + regcache_cache_bypass(s->regmap, true); + regmap_raw_read(s->regmap, addr, s->buf, rxlen); + regcache_cache_bypass(s->regmap, false); +} + +static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send) +{ + struct sc16is7xx_port *s = dev_get_drvdata(port->dev); + u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line; + + regcache_cache_bypass(s->regmap, true); + regmap_raw_write(s->regmap, addr, s->buf, to_send); + regcache_cache_bypass(s->regmap, false); +} + static void sc16is7xx_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val) { @@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); bytes_read = 1; } else { - regcache_cache_bypass(s->regmap, true); - regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG, - s->buf, rxlen); - regcache_cache_bypass(s->regmap, false); + sc16is7xx_fifo_read(port, rxlen); bytes_read = rxlen; } @@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port) s->buf[i] = xmit->buf[xmit->tail]; xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); } - regcache_cache_bypass(s->regmap, true); - regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send); - regcache_cache_bypass(s->regmap, false); + + sc16is7xx_fifo_write(port, to_send); } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 7ae1592f7ec9..f36852067f20 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) mutex_lock(&port->mutex); uart_shutdown(tty, state); tty_port_tty_set(port, NULL); - tty->closing = 0; + spin_lock_irqsave(&port->lock, flags); if (port->blocked_open) { @@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) mutex_unlock(&port->mutex); tty_ldisc_flush(tty); + tty->closing = 0; } static void uart_wait_until_sent(struct tty_struct *tty, int timeout) diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index ea27804d87af..381a2b13682c 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c @@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty) schedule(); continue; } + __set_current_state(TASK_RUNNING); count = sel_buffer_lth - pasted; count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL, count); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 8fe52989b380..4462d167900c 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init) __module_get(vc->vc_sw->owner); vc->vc_num = num; vc->vc_display_fg = &master_display_fg; + if (vc->vc_uni_pagedir_loc) + con_free_unimap(vc); vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; vc->vc_uni_pagedir = NULL; vc->vc_hi_font_mask = 0; diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 519a77ba214c..b30e7423549b 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1944,6 +1944,7 @@ static void __exit acm_exit(void) usb_deregister(&acm_driver); tty_unregister_driver(acm_tty_driver); put_tty_driver(acm_tty_driver); + idr_destroy(&acm_minors); } module_init(acm_init); diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 0e6f968e93fe..01c0c0477a9e 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -242,7 +242,7 @@ static int __init ulpi_init(void) { return bus_register(&ulpi_bus); } -module_init(ulpi_init); +subsys_initcall(ulpi_init); static void __exit ulpi_exit(void) { diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 3204a4dcfeb4..4d64e5c499e1 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd) dev_name(&usb_dev->dev), retval); return (retval < 0) ? retval : -EMSGSIZE; } - if (usb_dev->speed == USB_SPEED_SUPER) { + + if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) { retval = usb_get_bos_descriptor(usb_dev); - if (retval < 0) { + if (!retval) { + usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev); + } else if (usb_dev->speed == USB_SPEED_SUPER) { mutex_unlock(&usb_bus_list_lock); dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", dev_name(&usb_dev->dev), retval); diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d9ce8f9d9acc..917c27c4675a 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) return usb_get_intfdata(hdev->actconfig->interface[0]); } -static int usb_device_supports_lpm(struct usb_device *udev) +int usb_device_supports_lpm(struct usb_device *udev) { /* USB 2.1 (and greater) devices indicate LPM support through * their USB 2.0 Extended Capabilities BOS descriptor. diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 7eb1e26798e5..457255a3306a 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -65,6 +65,7 @@ extern int usb_hub_init(void); extern void usb_hub_cleanup(void); extern int usb_major_init(void); extern void usb_major_cleanup(void); +extern int usb_device_supports_lpm(struct usb_device *udev); #ifdef CONFIG_PM diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 2ef3c8d6a9db..69e769c35cf5 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); break; + case USB_REQ_SET_INTERFACE: + dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE"); + dwc->start_config_issued = false; + /* Fall through */ default: dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); ret = dwc3_ep0_delegate_req(dwc, ctrl); diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index d32160d6463f..5da37c957b53 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c @@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev) return -ENODEV; } - udc->phy_regs = ioremap(r->start, resource_size(r)); + udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (udc->phy_regs == NULL) { dev_err(&pdev->dev, "failed to map phy I/O memory\n"); return -EBUSY; diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index d69c35558f68..362ee8af5fce 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock); int usb_gadget_map_request(struct usb_gadget *gadget, struct usb_request *req, int is_in) { + struct device *dev = gadget->dev.parent; + if (req->length == 0) return 0; if (req->num_sgs) { int mapped; - mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs, + mapped = dma_map_sg(dev, req->sg, req->num_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); if (mapped == 0) { dev_err(&gadget->dev, "failed to map SGs\n"); @@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget, req->num_mapped_sgs = mapped; } else { - req->dma = dma_map_single(&gadget->dev, req->buf, req->length, + req->dma = dma_map_single(dev, req->buf, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); - if (dma_mapping_error(&gadget->dev, req->dma)) { - dev_err(&gadget->dev, "failed to map buffer\n"); + if (dma_mapping_error(dev, req->dma)) { + dev_err(dev, "failed to map buffer\n"); return -EFAULT; } } @@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget, return; if (req->num_mapped_sgs) { - dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, + dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); req->num_mapped_sgs = 0; } else { - dma_unmap_single(&gadget->dev, req->dma, req->length, + dma_unmap_single(gadget->dev.parent, req->dma, req->length, is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); } } diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index f7d561ed3c23..d029bbe9eb36 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c @@ -981,10 +981,6 @@ rescan_all: int completed, modified; __hc32 *prev; - /* Is this ED already invisible to the hardware? */ - if (ed->state == ED_IDLE) - goto ed_idle; - /* only take off EDs that the HC isn't using, accounting for * frame counter wraps and EDs with partially retired TDs */ @@ -1012,12 +1008,10 @@ skip_ed: } /* ED's now officially unlinked, hc doesn't see */ - ed->state = ED_IDLE; ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); ed->hwNextED = 0; wmb(); ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); -ed_idle: /* reentrancy: if we drop the schedule lock, someone might * have modified this list. normally it's just prepending @@ -1088,6 +1082,7 @@ rescan_this: if (list_empty(&ed->td_list)) { *last = ed->ed_next; ed->ed_next = NULL; + ed->state = ED_IDLE; list_del(&ed->in_use_list); } else if (ohci->rh_state == OHCI_RH_RUNNING) { *last = ed->ed_next; diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c index e9a6eec39142..cfcfadfc94fc 100644 --- a/drivers/usb/host/ohci-tmio.c +++ b/drivers/usb/host/ohci-tmio.c @@ -58,7 +58,7 @@ #define CCR_PM_CKRNEN 0x0002 #define CCR_PM_USBPW1 0x0004 #define CCR_PM_USBPW2 0x0008 -#define CCR_PM_USBPW3 0x0008 +#define CCR_PM_USBPW3 0x0010 #define CCR_PM_PMEE 0x0100 #define CCR_PM_PMES 0x8000 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e75c565feb53..78241b5550df 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, u32 pls = status_reg & PORT_PLS_MASK; /* resume state is a xHCI internal state. - * Do not report it to usb core. + * Do not report it to usb core, instead, pretend to be U3, + * thus usb core knows it's not ready for transfer */ - if (pls == XDEV_RESUME) + if (pls == XDEV_RESUME) { + *status |= USB_SS_PORT_LS_U3; return; + } /* When the CAS bit is set then warm reset * should be performed on port @@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, status |= USB_PORT_STAT_C_RESET << 16; /* USB3.0 only */ if (hcd->speed == HCD_USB3) { - if ((raw_port_status & PORT_PLC)) + /* Port link change with port in resume state should not be + * reported to usbcore, as this is an internal state to be + * handled by xhci driver. Reporting PLC to usbcore may + * cause usbcore clearing PLC first and port change event + * irq won't be generated. + */ + if ((raw_port_status & PORT_PLC) && + (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) status |= USB_PORT_STAT_C_LINK_STATE << 16; if ((raw_port_status & PORT_WRC)) status |= USB_PORT_STAT_C_BH_RESET << 16; @@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd) spin_lock_irqsave(&xhci->lock, flags); if (hcd->self.root_hub->do_remote_wakeup) { - if (bus_state->resuming_ports) { + if (bus_state->resuming_ports || /* USB2 */ + bus_state->port_remote_wakeup) { /* USB3 */ spin_unlock_irqrestore(&xhci->lock, flags); - xhci_dbg(xhci, "suspend failed because " - "a port is resuming\n"); + xhci_dbg(xhci, "suspend failed because a port is resuming\n"); return -EBUSY; } } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f8336408ef07..3e442f77a2b9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Attempt to use the ring cache */ if (virt_dev->num_rings_cached == 0) return -ENOMEM; + virt_dev->num_rings_cached--; virt_dev->eps[ep_index].new_ring = virt_dev->ring_cache[virt_dev->num_rings_cached]; virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; - virt_dev->num_rings_cached--; xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1, type); } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4a4cb1d91ac8..5590eac2b22d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -23,10 +23,15 @@ #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/acpi.h> #include "xhci.h" #include "xhci-trace.h" +#define PORT2_SSIC_CONFIG_REG2 0x883c +#define PROG_DONE (1 << 30) +#define SSIC_PORT_UNUSED (1 << 31) + /* Device for a quirk */ #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 @@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) } /* + * In some Intel xHCI controllers, in order to get D3 working, + * through a vendor specific SSIC CONFIG register at offset 0x883c, + * SSIC PORT need to be marked as "unused" before putting xHCI + * into D3. After D3 exit, the SSIC port need to be marked as "used". + * Without this change, xHCI might not enter D3 state. * Make sure PME works on some Intel xHCI controllers by writing 1 to clear * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 */ -static void xhci_pme_quirk(struct xhci_hcd *xhci) +static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) { + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); u32 val; void __iomem *reg; + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { + + reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; + + /* Notify SSIC that SSIC profile programming is not done */ + val = readl(reg) & ~PROG_DONE; + writel(val, reg); + + /* Mark SSIC port as unused(suspend) or used(resume) */ + val = readl(reg); + if (suspend) + val |= SSIC_PORT_UNUSED; + else + val &= ~SSIC_PORT_UNUSED; + writel(val, reg); + + /* Notify SSIC that SSIC profile programming is done */ + val = readl(reg) | PROG_DONE; + writel(val, reg); + readl(reg); + } + reg = (void __iomem *) xhci->cap_regs + 0x80a4; val = readl(reg); writel(val | BIT(28), reg); readl(reg); } +#ifdef CONFIG_ACPI +static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) +{ + static const u8 intel_dsm_uuid[] = { + 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, + 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, + }; + acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); +} +#else + static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } +#endif /* CONFIG_ACPI */ + /* called during probe() after chip reset completes */ static int xhci_pci_setup(struct usb_hcd *hcd) { @@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) HCC_MAX_PSA(xhci->hcc_params) >= 4) xhci->shared_hcd->can_do_streams = 1; + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) + xhci_pme_acpi_rtd3_enable(dev); + /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ pm_runtime_put_noidle(&dev->dev); @@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) pdev->no_d3cold = true; if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_quirk(xhci); + xhci_pme_quirk(hcd, true); return xhci_suspend(xhci, do_wakeup); } @@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) usb_enable_intel_xhci_ports(pdev); if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_quirk(xhci); + xhci_pme_quirk(hcd, false); retval = xhci_resume(xhci, hibernated); return retval; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 94416ff70810..6a8fc52aed58 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci, usb_hcd_resume_root_hub(hcd); } + if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) + bus_state->port_remote_wakeup &= ~(1 << faked_port_index); + if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { xhci_dbg(xhci, "port resume event for port %d\n", port_id); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index e66857f0a3bb..c3ac0a2aac37 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) return -EINVAL; } + if (virt_dev->tt_info) + old_active_eps = virt_dev->tt_info->active_eps; + if (virt_dev->udev != udev) { /* If the virt_dev and the udev does not match, this virt_dev * may belong to another udev. diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 31e46cc55807..ed2ebf647c38 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -285,6 +285,7 @@ struct xhci_op_regs { #define XDEV_U0 (0x0 << 5) #define XDEV_U2 (0x2 << 5) #define XDEV_U3 (0x3 << 5) +#define XDEV_INACTIVE (0x6 << 5) #define XDEV_RESUME (0xf << 5) /* true: port has power (see HCC_PPC) */ #define PORT_POWER (1 << 9) diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index caf188800c67..6b2479123de7 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_READ_DISC_INFO ), +/* Reported by Oliver Neukum <oneukum@suse.com> + * This device morphes spontaneously into another device if the access + * pattern of Windows isn't followed. Thus writable media would be dirty + * if the initial instance is used. So the device is limited to its + * virtual CD. + * And yes, the concept that BCD goes up to 9 is not heeded */ +UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff, + "ZTE,Incorporated", + "ZTE WCDMA Technologies MSM", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_SINGLE_LUN ), + /* Reported by Sven Geggus <sven-usbst@geggus.net> * This encrypted pen drive returns bogus data for the initial READ(10). */ @@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_INITIAL_READ10 ), +/* Reported by Hans de Goede <hdegoede@redhat.com> + * These are mini projectors using USB for both power and video data transport + * The usb-storage interface is a virtual windows driver CD, which the gm12u320 + * driver automatically converts into framebuffer & kms dri device nodes. + */ +UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff, + "Grain-media Technology Corp.", + "USB3.0 Device GM12U320", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_DEVICE ), + /* Patch by Richard Schütz <r.schtz@t-online.de> * This external hard drive enclosure uses a JMicron chip which * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9e8e004bb1c3..a9fe859f43c8 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -22,14 +22,20 @@ #include <linux/file.h> #include <linux/highmem.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include <linux/kthread.h> #include <linux/cgroup.h> #include <linux/module.h> +#include <linux/sort.h> #include "vhost.h" +static ushort max_mem_regions = 64; +module_param(max_mem_regions, ushort, 0444); +MODULE_PARM_DESC(max_mem_regions, + "Maximum number of memory regions in memory map. (default: 64)"); + enum { - VHOST_MEMORY_MAX_NREGIONS = 64, VHOST_MEMORY_F_LOG = 0x1, }; @@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) fput(dev->log_file); dev->log_file = NULL; /* No one will access memory at this point */ - kfree(dev->memory); + kvfree(dev->memory); dev->memory = NULL; WARN_ON(!list_empty(&dev->work_list)); if (dev->worker) { @@ -663,6 +669,28 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq) } EXPORT_SYMBOL_GPL(vhost_vq_access_ok); +static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2) +{ + const struct vhost_memory_region *r1 = p1, *r2 = p2; + if (r1->guest_phys_addr < r2->guest_phys_addr) + return 1; + if (r1->guest_phys_addr > r2->guest_phys_addr) + return -1; + return 0; +} + +static void *vhost_kvzalloc(unsigned long size) +{ + void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); + + if (!n) { + n = vzalloc(size); + if (!n) + return ERR_PTR(-ENOMEM); + } + return n; +} + static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem, *oldmem; @@ -673,21 +701,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) return -EFAULT; if (mem.padding) return -EOPNOTSUPP; - if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) + if (mem.nregions > max_mem_regions) return -E2BIG; - newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); + newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions)); if (!newmem) return -ENOMEM; memcpy(newmem, &mem, size); if (copy_from_user(newmem->regions, m->regions, mem.nregions * sizeof *m->regions)) { - kfree(newmem); + kvfree(newmem); return -EFAULT; } + sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions), + vhost_memory_reg_sort_cmp, NULL); if (!memory_access_ok(d, newmem, 0)) { - kfree(newmem); + kvfree(newmem); return -EFAULT; } oldmem = d->memory; @@ -699,7 +729,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) d->vqs[i]->memory = newmem; mutex_unlock(&d->vqs[i]->mutex); } - kfree(oldmem); + kvfree(oldmem); return 0; } @@ -992,17 +1022,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl); static const struct vhost_memory_region *find_region(struct vhost_memory *mem, __u64 addr, __u32 len) { - struct vhost_memory_region *reg; - int i; + const struct vhost_memory_region *reg; + int start = 0, end = mem->nregions; - /* linear search is not brilliant, but we really have on the order of 6 - * regions in practice */ - for (i = 0; i < mem->nregions; ++i) { - reg = mem->regions + i; - if (reg->guest_phys_addr <= addr && - reg->guest_phys_addr + reg->memory_size - 1 >= addr) - return reg; + while (start < end) { + int slot = start + (end - start) / 2; + reg = mem->regions + slot; + if (addr >= reg->guest_phys_addr) + end = slot; + else + start = slot + 1; } + + reg = mem->regions + start; + if (addr >= reg->guest_phys_addr && + reg->guest_phys_addr + reg->memory_size > addr) + return reg; return NULL; } diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f0520bcf2094..518c6294bf6c 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page, else wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); } +EXPORT_SYMBOL_GPL(wbc_account_io); /** * inode_congested - test whether an inode is congested diff --git a/fs/namespace.c b/fs/namespace.c index c7cb8a526c05..2b8aa15fd6df 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1361,6 +1361,36 @@ enum umount_tree_flags { UMOUNT_PROPAGATE = 2, UMOUNT_CONNECTED = 4, }; + +static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) +{ + /* Leaving mounts connected is only valid for lazy umounts */ + if (how & UMOUNT_SYNC) + return true; + + /* A mount without a parent has nothing to be connected to */ + if (!mnt_has_parent(mnt)) + return true; + + /* Because the reference counting rules change when mounts are + * unmounted and connected, umounted mounts may not be + * connected to mounted mounts. + */ + if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) + return true; + + /* Has it been requested that the mount remain connected? */ + if (how & UMOUNT_CONNECTED) + return false; + + /* Is the mount locked such that it needs to remain connected? */ + if (IS_MNT_LOCKED(mnt)) + return false; + + /* By default disconnect the mount */ + return true; +} + /* * mount_lock must be held * namespace_sem must be held for write @@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) if (how & UMOUNT_SYNC) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; - disconnect = !(((how & UMOUNT_CONNECTED) && - mnt_has_parent(p) && - (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) || - IS_MNT_LOCKED_AND_LAZY(p)); + disconnect = disconnect_mount(p, how); pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, disconnect ? &unmounted : NULL); @@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry) while (!hlist_empty(&mp->m_list)) { mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); if (mnt->mnt.mnt_flags & MNT_UMOUNT) { - struct mount *p, *tmp; - list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { - hlist_add_head(&p->mnt_umount.s_list, &unmounted); - umount_mnt(p); - } + hlist_add_head(&mnt->mnt_umount.s_list, &unmounted); + umount_mnt(mnt); } else umount_tree(mnt, UMOUNT_CONNECTED); } diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 3e594ce41010..92e48c70f0f0 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -152,15 +152,31 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, BUG(); list_del_init(&mark->g_list); + spin_unlock(&mark->lock); if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) iput(inode); + /* release lock temporarily */ + mutex_unlock(&group->mark_mutex); spin_lock(&destroy_lock); list_add(&mark->g_list, &destroy_list); spin_unlock(&destroy_lock); wake_up(&destroy_waitq); + /* + * We don't necessarily have a ref on mark from caller so the above destroy + * may have actually freed it, unless this group provides a 'freeing_mark' + * function which must be holding a reference. + */ + + /* + * Some groups like to know that marks are being freed. This is a + * callback to the group function to let it know that this mark + * is being freed. + */ + if (group->ops->freeing_mark) + group->ops->freeing_mark(mark, group); /* * __fsnotify_update_child_dentry_flags(inode); @@ -175,6 +191,8 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, */ atomic_dec(&group->num_marks); + + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); } void fsnotify_destroy_mark(struct fsnotify_mark *mark, @@ -187,10 +205,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark, /* * Destroy all marks in the given list. The marks must be already detached from - * the original inode / vfsmount. Note that we can race with - * fsnotify_clear_marks_by_group_flags(). However we hold a reference to each - * mark so they won't get freed from under us and nobody else touches our - * free_list list_head. + * the original inode / vfsmount. */ void fsnotify_destroy_marks(struct list_head *to_free) { @@ -391,7 +406,7 @@ struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head, } /* - * Clear any marks in a group in which mark->flags & flags is true. + * clear any marks in a group in which mark->flags & flags is true */ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags) @@ -445,7 +460,6 @@ static int fsnotify_mark_destroy(void *ignored) { struct fsnotify_mark *mark, *next; struct list_head private_destroy_list; - struct fsnotify_group *group; for (;;) { spin_lock(&destroy_lock); @@ -457,14 +471,6 @@ static int fsnotify_mark_destroy(void *ignored) list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { list_del_init(&mark->g_list); - group = mark->group; - /* - * Some groups like to know that marks are being freed. - * This is a callback to the group function to let it - * know that this mark is being freed. - */ - if (group && group->ops->freeing_mark) - group->ops->freeing_mark(mark, group); fsnotify_put_mark(mark); } diff --git a/fs/pnode.h b/fs/pnode.h index 7114ce6e6b9e..0fcdbe7ca648 100644 --- a/fs/pnode.h +++ b/fs/pnode.h @@ -20,8 +20,6 @@ #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) -#define IS_MNT_LOCKED_AND_LAZY(m) \ - (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED) #define CL_EXPIRE 0x01 #define CL_SLAVE 0x02 diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 6afac3d561ac..8d0b3ade0ff0 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -1652,17 +1652,9 @@ static int udf_update_inode(struct inode *inode, int do_sync) iinfo->i_ext.i_data, inode->i_sb->s_blocksize - sizeof(struct unallocSpaceEntry)); use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); - use->descTag.tagLocation = - cpu_to_le32(iinfo->i_location.logicalBlockNum); - crclen = sizeof(struct unallocSpaceEntry) + - iinfo->i_lenAlloc - sizeof(struct tag); - use->descTag.descCRCLength = cpu_to_le16(crclen); - use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use + - sizeof(struct tag), - crclen)); - use->descTag.tagChecksum = udf_tag_checksum(&use->descTag); + crclen = sizeof(struct unallocSpaceEntry); - goto out; + goto finish; } if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) @@ -1782,6 +1774,8 @@ static int udf_update_inode(struct inode *inode, int do_sync) efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); crclen = sizeof(struct extendedFileEntry); } + +finish: if (iinfo->i_strat4096) { fe->icbTag.strategyType = cpu_to_le16(4096); fe->icbTag.strategyParameter = cpu_to_le16(1); @@ -1791,7 +1785,9 @@ static int udf_update_inode(struct inode *inode, int do_sync) fe->icbTag.numEntries = cpu_to_le16(1); } - if (S_ISDIR(inode->i_mode)) + if (iinfo->i_use) + fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE; + else if (S_ISDIR(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; else if (S_ISREG(inode->i_mode)) fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; @@ -1828,7 +1824,6 @@ static int udf_update_inode(struct inode *inode, int do_sync) crclen)); fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); -out: set_buffer_uptodate(bh); unlock_buffer(bh); diff --git a/include/linux/ata.h b/include/linux/ata.h index fed36418dd1c..6c78956aa470 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -45,6 +45,7 @@ enum { ATA_SECT_SIZE = 512, ATA_MAX_SECTORS_128 = 128, ATA_MAX_SECTORS = 256, + ATA_MAX_SECTORS_1024 = 1024, ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ ATA_MAX_SECTORS_TAPE = 65535, diff --git a/include/linux/cper.h b/include/linux/cper.h index 76abba4b238e..dcacb1a72e26 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -340,7 +340,27 @@ struct cper_ia_proc_ctx { __u64 mm_reg_addr; }; -/* Memory Error Section */ +/* Old Memory Error Section UEFI 2.1, 2.2 */ +struct cper_sec_mem_err_old { + __u64 validation_bits; + __u64 error_status; + __u64 physical_addr; + __u64 physical_addr_mask; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u8 error_type; +}; + +/* Memory Error Section UEFI >= 2.3 */ struct cper_sec_mem_err { __u64 validation_bits; __u64 error_status; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 1da602982cf9..6cd8c0ee4b6f 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); * SAVE_REGS. If another ops with this flag set is already registered * for any of the functions that this ops will be registered for, then * this ops will fail to register or set_filter_ip. + * PID - Is affected by set_ftrace_pid (allows filtering on those pids) */ enum { FTRACE_OPS_FL_ENABLED = 1 << 0, @@ -132,6 +133,7 @@ enum { FTRACE_OPS_FL_MODIFYING = 1 << 11, FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, FTRACE_OPS_FL_IPMODIFY = 1 << 13, + FTRACE_OPS_FL_PID = 1 << 14, }; #ifdef CONFIG_DYNAMIC_FTRACE @@ -159,6 +161,7 @@ struct ftrace_ops { struct ftrace_ops *next; unsigned long flags; void *private; + ftrace_func_t saved_func; int __percpu *disabled; #ifdef CONFIG_DYNAMIC_FTRACE int nr_trampolines; diff --git a/include/linux/libata.h b/include/linux/libata.h index 36ce37bcc963..c9cfbcdb8d14 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -431,6 +431,8 @@ enum { ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ + ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ + ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index f25e2bdd188c..272f42952f34 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -178,17 +178,17 @@ typedef enum { /* Chip may not exist, so silence any errors in scan */ #define NAND_SCAN_SILENT_NODEV 0x00040000 /* - * This option could be defined by controller drivers to protect against - * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers - */ -#define NAND_USE_BOUNCE_BUFFER 0x00080000 -/* * Autodetect nand buswidth with readid/onfi. * This suppose the driver will configure the hardware in 8 bits mode * when calling nand_scan_ident, and update its configuration * before calling nand_scan_tail. */ #define NAND_BUSWIDTH_AUTO 0x00080000 +/* + * This option could be defined by controller drivers to protect against + * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers + */ +#define NAND_USE_BOUNCE_BUFFER 0x00100000 /* Options set by nand scan */ /* Nand scan has allocated controller struct */ diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index 75f70f6ac137..e1571efa3f2b 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h @@ -43,7 +43,6 @@ struct esdhc_platform_data { enum wp_types wp_type; enum cd_types cd_type; int max_bus_width; - unsigned int f_max; bool support_vsel; unsigned int delay_line; }; diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index a741678f24a2..883fe1e7c5a1 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -4868,6 +4868,23 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype); +/** + * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation + * @wiphy: the wiphy + * @chandef: the channel definition + * @iftype: interface type + * + * Return: %true if there is no secondary channel or the secondary channel(s) + * can be used for beaconing (i.e. is not a radar channel etc.). This version + * also checks if IR-relaxation conditions apply, to allow beaconing under + * more permissive conditions. + * + * Requires the RTNL to be held. + */ +bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + enum nl80211_iftype iftype); + /* * cfg80211_ch_switch_notify - update wdev channel and notify userspace * @dev: the device which switched channels diff --git a/include/net/ip.h b/include/net/ip.h index 0750a186ea63..d5fe9f2ab699 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) } /* datagram.c */ +int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); void ip4_datagram_release_cb(struct sock *sk); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index b6fce900a833..d708a53b8fb1 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -614,6 +614,8 @@ struct drm_amdgpu_info_device { uint32_t vram_type; /** video memory bit width*/ uint32_t vram_bit_width; + /* vce harvesting instance */ + uint32_t vce_harvest_config; }; struct drm_amdgpu_info_hw_ip { diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 6e1a2ed116cb..db809b722985 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h @@ -1070,6 +1070,14 @@ struct drm_i915_reg_read { __u64 offset; __u64 val; /* Return value */ }; +/* Known registers: + * + * Render engine timestamp - 0x2358 + 64bit - gen7+ + * - Note this register returns an invalid value if using the default + * single instruction 8byte read, in order to workaround that use + * offset (0x2538 | 1) instead. + * + */ struct drm_i915_reset_stats { __u32 ctx_id; diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 7bbee79ca293..ec32293a00db 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -34,6 +34,7 @@ /* The feature bitmap for virtio net */ #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ +#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */ #define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ #define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ #define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ @@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq { #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 +/* + * Control network offloads + * + * Reconfigures the network offloads that Guest can handle. + * + * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit. + * + * Command data format matches the feature bit mask exactly. + * + * See VIRTIO_NET_F_GUEST_* for the list of offloads + * that can be enabled/disabled. + */ +#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5 +#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0 + #endif /* _LINUX_VIRTIO_NET_H */ diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 75301468359f..90007a1abcab 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -157,6 +157,12 @@ struct virtio_pci_common_cfg { __le32 queue_used_hi; /* read-write */ }; +/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ +struct virtio_pci_cfg_cap { + struct virtio_pci_cap cap; + __u8 pci_cfg_data[4]; /* Data for BAR access. */ +}; + /* Macro versions of offsets for the Old Timers! */ #define VIRTIO_PCI_CAP_VNDR 0 #define VIRTIO_PCI_CAP_NEXT 1 diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 915980ac68df..c07295969b7e 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h @@ -31,6 +31,9 @@ * SUCH DAMAGE. * * Copyright Rusty Russell IBM Corporation 2007. */ +#ifndef __KERNEL__ +#include <stdint.h> +#endif #include <linux/types.h> #include <linux/virtio_types.h> @@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p, vr->num = num; vr->desc = p; vr->avail = p + num*sizeof(struct vring_desc); - vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) + vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16) + align-1) & ~(align - 1)); } diff --git a/kernel/resource.c b/kernel/resource.c index 90552aab5f2d..fed052a1bc9f 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size) { struct resource *p; resource_size_t end = start + size - 1; - int flags = IORESOURCE_MEM | IORESOURCE_BUSY; + unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; const char *name = "System RAM"; int ret = -1; read_lock(&resource_lock); for (p = iomem_resource.child; p ; p = p->sibling) { - if (end < p->start) + if (p->end < start) continue; if (p->start <= start && end <= p->end) { @@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size) ret = 1; break; } - if (p->end < start) + if (end < p->start) break; /* not found */ } read_unlock(&resource_lock); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 02bece4a99ea..eb11011b5292 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -98,6 +98,13 @@ struct ftrace_pid { struct pid *pid; }; +static bool ftrace_pids_enabled(void) +{ + return !list_empty(&ftrace_pids); +} + +static void ftrace_update_trampoline(struct ftrace_ops *ops); + /* * ftrace_disabled is set when an anomaly is discovered. * ftrace_disabled is much stronger than ftrace_enabled. @@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock); static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; -ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; static struct ftrace_ops global_ops; static struct ftrace_ops control_ops; @@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, if (!test_tsk_trace_trace(current)) return; - ftrace_pid_function(ip, parent_ip, op, regs); -} - -static void set_ftrace_pid_function(ftrace_func_t func) -{ - /* do not set ftrace_pid_function to itself! */ - if (func != ftrace_pid_func) - ftrace_pid_function = func; + op->saved_func(ip, parent_ip, op, regs); } /** @@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func) void clear_ftrace_function(void) { ftrace_trace_function = ftrace_stub; - ftrace_pid_function = ftrace_stub; } static void control_ops_disable_all(struct ftrace_ops *ops) @@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops) } else add_ftrace_ops(&ftrace_ops_list, ops); + /* Always save the function, and reset at unregistering */ + ops->saved_func = ops->func; + + if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled()) + ops->func = ftrace_pid_func; + ftrace_update_trampoline(ops); if (ftrace_enabled) @@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) if (ftrace_enabled) update_ftrace_function(); + ops->func = ops->saved_func; + return 0; } static void ftrace_update_pid_func(void) { + bool enabled = ftrace_pids_enabled(); + struct ftrace_ops *op; + /* Only do something if we are tracing something */ if (ftrace_trace_function == ftrace_stub) return; + do_for_each_ftrace_op(op, ftrace_ops_list) { + if (op->flags & FTRACE_OPS_FL_PID) { + op->func = enabled ? ftrace_pid_func : + op->saved_func; + ftrace_update_trampoline(op); + } + } while_for_each_ftrace_op(op); + update_ftrace_function(); } @@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = { .local_hash.filter_hash = EMPTY_HASH, INIT_OPS_HASH(global_ops) .flags = FTRACE_OPS_FL_RECURSION_SAFE | - FTRACE_OPS_FL_INITIALIZED, + FTRACE_OPS_FL_INITIALIZED | + FTRACE_OPS_FL_PID, }; /* @@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops) static struct ftrace_ops global_ops = { .func = ftrace_stub, - .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | + FTRACE_OPS_FL_INITIALIZED | + FTRACE_OPS_FL_PID, }; static int __init ftrace_nodyn_init(void) @@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) if (WARN_ON(tr->ops->func != ftrace_stub)) printk("ftrace ops had %pS for function\n", tr->ops->func); - /* Only the top level instance does pid tracing */ - if (!list_empty(&ftrace_pids)) { - set_ftrace_pid_function(func); - func = ftrace_pid_func; - } } tr->ops->func = func; tr->ops->private = tr; @@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos) { mutex_lock(&ftrace_lock); - if (list_empty(&ftrace_pids) && (!*pos)) + if (!ftrace_pids_enabled() && (!*pos)) return (void *) 1; return seq_list_start(&ftrace_pids, *pos); @@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = { .func = ftrace_stub, .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED | + FTRACE_OPS_FL_PID | FTRACE_OPS_FL_STUB, #ifdef FTRACE_GRAPH_TRAMP_ADDR .trampoline = FTRACE_GRAPH_TRAMP_ADDR, diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 9dd49ca67dbc..6e70ddb158b4 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev) mutex_unlock(&virtio_9p_lock); + vdev->config->reset(vdev); vdev->config->del_vqs(vdev); sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c index 1997538a5d23..3b78e8473a01 100644 --- a/net/ax25/ax25_subr.c +++ b/net/ax25/ax25_subr.c @@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason) { ax25_clear_queues(ax25); + ax25_stop_heartbeat(ax25); ax25_stop_t1timer(ax25); ax25_stop_t2timer(ax25); ax25_stop_t3timer(ax25); diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index c11cf2611db0..1198a3dbad95 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -351,7 +351,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, if (state == MDB_TEMPORARY) mod_timer(&p->timer, now + br->multicast_membership_interval); - br_mdb_notify(br->dev, port, group, RTM_NEWMDB); return 0; } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 742a6c27d7a2..79db489cdade 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -39,6 +39,16 @@ static void br_multicast_start_querier(struct net_bridge *br, struct bridge_mcast_own_query *query); static void br_multicast_add_router(struct net_bridge *br, struct net_bridge_port *port); +static void br_ip4_multicast_leave_group(struct net_bridge *br, + struct net_bridge_port *port, + __be32 group, + __u16 vid); +#if IS_ENABLED(CONFIG_IPV6) +static void br_ip6_multicast_leave_group(struct net_bridge *br, + struct net_bridge_port *port, + const struct in6_addr *group, + __u16 vid); +#endif unsigned int br_mdb_rehash_seq; static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) @@ -1010,9 +1020,15 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, continue; } - err = br_ip4_multicast_add_group(br, port, group, vid); - if (err) - break; + if ((type == IGMPV3_CHANGE_TO_INCLUDE || + type == IGMPV3_MODE_IS_INCLUDE) && + ntohs(grec->grec_nsrcs) == 0) { + br_ip4_multicast_leave_group(br, port, group, vid); + } else { + err = br_ip4_multicast_add_group(br, port, group, vid); + if (err) + break; + } } return err; @@ -1071,10 +1087,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, continue; } - err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, - vid); - if (err) - break; + if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || + grec->grec_type == MLD2_MODE_IS_INCLUDE) && + ntohs(*nsrcs) == 0) { + br_ip6_multicast_leave_group(br, port, &grec->grec_mca, + vid); + } else { + err = br_ip6_multicast_add_group(br, port, + &grec->grec_mca, vid); + if (!err) + break; + } } return err; diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 3cc71b9f5517..cc858919108e 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode) * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are * not dropped, but CAIF is sending flow off instead. */ -static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) +static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); + bool queued = false; if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { @@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) err = sk_filter(sk, skb); if (err) - return err; + goto out; + if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { set_rx_flow_off(cf_sk); net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); @@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) } skb->dev = NULL; skb_set_owner_r(skb, sk); - /* Cache the SKB length before we tack it onto the receive - * queue. Once it is added it no longer belongs to us and - * may be freed by other threads of control pulling packets - * from the queue. - */ spin_lock_irqsave(&list->lock, flags); - if (!sock_flag(sk, SOCK_DEAD)) + queued = !sock_flag(sk, SOCK_DEAD); + if (queued) __skb_queue_tail(list, skb); spin_unlock_irqrestore(&list->lock, flags); - - if (!sock_flag(sk, SOCK_DEAD)) +out: + if (queued) sk->sk_data_ready(sk); else kfree_skb(skb); - return 0; } /* Packet Receive Callback function called from CAIF Stack */ diff --git a/net/core/datagram.c b/net/core/datagram.c index b80fb91bb3f7..4967262b2707 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -131,6 +131,35 @@ out_noerr: goto out; } +static int skb_set_peeked(struct sk_buff *skb) +{ + struct sk_buff *nskb; + + if (skb->peeked) + return 0; + + /* We have to unshare an skb before modifying it. */ + if (!skb_shared(skb)) + goto done; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + return -ENOMEM; + + skb->prev->next = nskb; + skb->next->prev = nskb; + nskb->prev = skb->prev; + nskb->next = skb->next; + + consume_skb(skb); + skb = nskb; + +done: + skb->peeked = 1; + + return 0; +} + /** * __skb_recv_datagram - Receive a datagram skbuff * @sk: socket @@ -165,7 +194,9 @@ out_noerr: struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, int *peeked, int *off, int *err) { + struct sk_buff_head *queue = &sk->sk_receive_queue; struct sk_buff *skb, *last; + unsigned long cpu_flags; long timeo; /* * Caller is allowed not to check sk->sk_err before skb_recv_datagram() @@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, * Look at current nfs client by the way... * However, this function was correct in any case. 8) */ - unsigned long cpu_flags; - struct sk_buff_head *queue = &sk->sk_receive_queue; int _off = *off; last = (struct sk_buff *)queue; @@ -199,7 +228,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, _off -= skb->len; continue; } - skb->peeked = 1; + + error = skb_set_peeked(skb); + if (error) + goto unlock_err; + atomic_inc(&skb->users); } else __skb_unlink(skb, queue); @@ -223,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, return NULL; +unlock_err: + spin_unlock_irqrestore(&queue->lock, cpu_flags); no_packet: *err = error; return NULL; @@ -622,7 +657,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) !skb->csum_complete_sw) netdev_rx_csum_fault(skb->dev); } - skb->csum_valid = !sum; + if (!skb_shared(skb)) + skb->csum_valid = !sum; return sum; } EXPORT_SYMBOL(__skb_checksum_complete_head); @@ -642,11 +678,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb) netdev_rx_csum_fault(skb->dev); } - /* Save full packet checksum */ - skb->csum = csum; - skb->ip_summed = CHECKSUM_COMPLETE; - skb->csum_complete_sw = 1; - skb->csum_valid = !sum; + if (!skb_shared(skb)) { + /* Save full packet checksum */ + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum_complete_sw = 1; + skb->csum_valid = !sum; + } return sum; } diff --git a/net/core/dst.c b/net/core/dst.c index e956ce6d1378..002144bea935 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -284,7 +284,9 @@ void dst_release(struct dst_entry *dst) int newrefcnt; newrefcnt = atomic_dec_return(&dst->__refcnt); - WARN_ON(newrefcnt < 0); + if (unlikely(newrefcnt < 0)) + net_warn_ratelimited("%s: dst:%p refcnt:%d\n", + __func__, dst, newrefcnt); if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) call_rcu(&dst->rcu_head, dst_destroy_rcu); } diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9e433d58d265..dc004b1e1f85 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1804,10 +1804,13 @@ static int do_setlink(const struct sk_buff *skb, goto errout; nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { - if (nla_type(attr) != IFLA_VF_PORT) - continue; - err = nla_parse_nested(port, IFLA_PORT_MAX, - attr, ifla_port_policy); + if (nla_type(attr) != IFLA_VF_PORT || + nla_len(attr) < NLA_HDRLEN) { + err = -EINVAL; + goto errout; + } + err = nla_parse_nested(port, IFLA_PORT_MAX, attr, + ifla_port_policy); if (err < 0) goto errout; if (!port[IFLA_PORT_VF]) { diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 90c0e8386116..574fad9cca05 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -20,7 +20,7 @@ #include <net/route.h> #include <net/tcp_states.h> -int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; @@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sk_dst_reset(sk); - lock_sock(sk); - oif = sk->sk_bound_dev_if; saddr = inet->inet_saddr; if (ipv4_is_multicast(usin->sin_addr.s_addr)) { @@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sk_dst_set(sk, &rt->dst); err = 0; out: - release_sock(sk); return err; } +EXPORT_SYMBOL(__ip4_datagram_connect); + +int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +{ + int res; + + lock_sock(sk); + res = __ip4_datagram_connect(sk, uaddr, addr_len); + release_sock(sk); + return res; +} EXPORT_SYMBOL(ip4_datagram_connect); /* Because UDP xmit path can manipulate sk_dst_cache without holding diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 5f9b063bbe8a..0cb9165421d4 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -624,22 +624,21 @@ EXPORT_SYMBOL_GPL(inet_hashinfo_init); int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) { + unsigned int locksz = sizeof(spinlock_t); unsigned int i, nblocks = 1; - if (sizeof(spinlock_t) != 0) { + if (locksz != 0) { /* allocate 2 cache lines or at least one spinlock per cpu */ - nblocks = max_t(unsigned int, - 2 * L1_CACHE_BYTES / sizeof(spinlock_t), - 1); + nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); /* no more locks than number of hash buckets */ nblocks = min(nblocks, hashinfo->ehash_mask + 1); - hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), + hashinfo->ehash_locks = kmalloc_array(nblocks, locksz, GFP_KERNEL | __GFP_NOWARN); if (!hashinfo->ehash_locks) - hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); + hashinfo->ehash_locks = vmalloc(nblocks * locksz); if (!hashinfo->ehash_locks) return -ENOMEM; diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index a50dc6d408d1..31f71b15cfba 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -351,7 +351,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) ihl = ip_hdrlen(skb); /* Determine the position of this fragment. */ - end = offset + skb->len - ihl; + end = offset + skb->len - skb_network_offset(skb) - ihl; err = -EINVAL; /* Is this the final fragment? */ @@ -381,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) goto err; err = -ENOMEM; - if (!pskb_pull(skb, ihl)) + if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) goto err; err = pskb_trim_rcsum(skb, end - offset); @@ -641,6 +641,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, iph->frag_off = 0; } + ip_send_check(iph); + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; qp->q.fragments_tail = NULL; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 684f095d196e..728f5b3d3c64 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1917,14 +1917,13 @@ void tcp_enter_loss(struct sock *sk) const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - bool new_recovery = false; + bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; bool is_reneg; /* is receiver reneging on SACKs? */ /* Reduce ssthresh if it has not yet been made inside this window. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder || !after(tp->high_seq, tp->snd_una) || (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { - new_recovery = true; tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 62d908e64eeb..b10a88986a98 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); } -int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_sock *inet = inet_sk(sk); @@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) if (usin->sin6_family == AF_INET) { if (__ipv6_only_sock(sk)) return -EAFNOSUPPORT; - err = ip4_datagram_connect(sk, uaddr, addr_len); + err = __ip4_datagram_connect(sk, uaddr, addr_len); goto ipv4_connected; } @@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) sin.sin_addr.s_addr = daddr->s6_addr32[3]; sin.sin_port = usin->sin6_port; - err = ip4_datagram_connect(sk, - (struct sockaddr *) &sin, - sizeof(sin)); + err = __ip4_datagram_connect(sk, + (struct sockaddr *) &sin, + sizeof(sin)); ipv4_connected: if (err) @@ -204,6 +204,16 @@ out: fl6_sock_release(flowlabel); return err; } + +int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) +{ + int res; + + lock_sock(sk); + res = __ip6_datagram_connect(sk, uaddr, addr_len); + release_sock(sk); + return res; +} EXPORT_SYMBOL_GPL(ip6_datagram_connect); int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index e893cd18612f..08b62047c67f 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c @@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = { static const struct net_offload sit_offload = { .callbacks = { .gso_segment = ipv6_gso_segment, - .gro_receive = ipv6_gro_receive, - .gro_complete = ipv6_gro_complete, }, }; diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 29236e832e44..c09c0131bfa2 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c @@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) debugfs_remove_recursive(sdata->vif.debugfs_dir); sdata->vif.debugfs_dir = NULL; + sdata->debugfs.subdir_stations = NULL; } void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index ed1edac14372..553ac6dd4867 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1863,10 +1863,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata) ieee80211_teardown_sdata(sdata); } -/* - * Remove all interfaces, may only be called at hardware unregistration - * time because it doesn't do RCU-safe list removals. - */ void ieee80211_remove_interfaces(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *tmp; @@ -1875,14 +1871,21 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) ASSERT_RTNL(); - /* - * Close all AP_VLAN interfaces first, as otherwise they - * might be closed while the AP interface they belong to - * is closed, causing unregister_netdevice_many() to crash. + /* Before destroying the interfaces, make sure they're all stopped so + * that the hardware is stopped. Otherwise, the driver might still be + * iterating the interfaces during the shutdown, e.g. from a worker + * or from RX processing or similar, and if it does so (using atomic + * iteration) while we're manipulating the list, the iteration will + * crash. + * + * After this, the hardware should be stopped and the driver should + * have stopped all of its activities, so that we can do RCU-unaware + * manipulations of the interface list below. */ - list_for_each_entry(sdata, &local->interfaces, list) - if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) - dev_close(sdata->dev); + cfg80211_shutdown_all_interfaces(local->hw.wiphy); + + WARN(local->open_count, "%s: open count remains %d\n", + wiphy_name(local->hw.wiphy), local->open_count); mutex_lock(&local->iflist_mtx); list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 5438d13e2f00..3b59099413fb 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c @@ -306,7 +306,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, if (action == WLAN_SP_MESH_PEERING_CONFIRM) { /* AID */ pos = skb_put(skb, 2); - put_unaligned_le16(plid, pos + 2); + put_unaligned_le16(plid, pos); } if (ieee80211_add_srates_ie(sdata, skb, true, band) || ieee80211_add_ext_srates_ie(sdata, skb, true, band) || @@ -1122,6 +1122,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, WLAN_SP_MESH_PEERING_CONFIRM) { baseaddr += 4; baselen += 4; + + if (baselen > len) + return; } ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); mesh_process_plink_frame(sdata, mgmt, &elems); diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 06b60980c62c..b676b9fa707b 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -76,6 +76,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) if (sdata->vif.type != NL80211_IFTYPE_STATION) continue; ieee80211_mgd_quiesce(sdata); + /* If suspended during TX in progress, and wowlan + * is enabled (connection will be active) there + * can be a race where the driver is put out + * of power-save due to TX and during suspend + * dynamic_ps_timer is cancelled and TX packet + * is flushed, leaving the driver in ACTIVE even + * after resuming until dynamic_ps_timer puts + * driver back in DOZE. + */ + if (sdata->u.mgd.associated && + sdata->u.mgd.powersave && + !(local->hw.conf.flags & IEEE80211_CONF_PS)) { + local->hw.conf.flags |= IEEE80211_CONF_PS; + ieee80211_hw_config(local, + IEEE80211_CONF_CHANGE_PS); + } } err = drv_suspend(local, wowlan); diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index ad31b2dab4f5..8db6e2994bbc 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -60,6 +60,7 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata, struct ieee80211_channel *ch; struct cfg80211_chan_def chandef; int i, subband_start; + struct wiphy *wiphy = sdata->local->hw.wiphy; for (i = start; i <= end; i += spacing) { if (!ch_cnt) @@ -70,9 +71,8 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata, /* we will be active on the channel */ cfg80211_chandef_create(&chandef, ch, NL80211_CHAN_NO_HT); - if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy, - &chandef, - sdata->wdev.iftype)) { + if (cfg80211_reg_can_beacon_relax(wiphy, &chandef, + sdata->wdev.iftype)) { ch_cnt++; /* * check if the next channel is also part of diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 8410bb3bf5e8..b8233505bf9f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1117,7 +1117,9 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, queued = true; info->control.vif = &tx->sdata->vif; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; - info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; + info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS | + IEEE80211_TX_CTL_NO_PS_BUFFER | + IEEE80211_TX_STATUS_EOSP; __skb_queue_tail(&tid_tx->pending, skb); if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) purge_skb = __skb_dequeue(&tid_tx->pending); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 9a0ae7172f92..d8e2e3918ce2 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -357,25 +357,52 @@ err1: return NULL; } + +static void +__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, + unsigned int order) +{ + struct netlink_sock *nlk = nlk_sk(sk); + struct sk_buff_head *queue; + struct netlink_ring *ring; + + queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; + ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; + + spin_lock_bh(&queue->lock); + + ring->frame_max = req->nm_frame_nr - 1; + ring->head = 0; + ring->frame_size = req->nm_frame_size; + ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; + + swap(ring->pg_vec_len, req->nm_block_nr); + swap(ring->pg_vec_order, order); + swap(ring->pg_vec, pg_vec); + + __skb_queue_purge(queue); + spin_unlock_bh(&queue->lock); + + WARN_ON(atomic_read(&nlk->mapped)); + + if (pg_vec) + free_pg_vec(pg_vec, order, req->nm_block_nr); +} + static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, - bool closing, bool tx_ring) + bool tx_ring) { struct netlink_sock *nlk = nlk_sk(sk); struct netlink_ring *ring; - struct sk_buff_head *queue; void **pg_vec = NULL; unsigned int order = 0; - int err; ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; - queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; - if (!closing) { - if (atomic_read(&nlk->mapped)) - return -EBUSY; - if (atomic_read(&ring->pending)) - return -EBUSY; - } + if (atomic_read(&nlk->mapped)) + return -EBUSY; + if (atomic_read(&ring->pending)) + return -EBUSY; if (req->nm_block_nr) { if (ring->pg_vec != NULL) @@ -407,31 +434,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, return -EINVAL; } - err = -EBUSY; mutex_lock(&nlk->pg_vec_lock); - if (closing || atomic_read(&nlk->mapped) == 0) { - err = 0; - spin_lock_bh(&queue->lock); - - ring->frame_max = req->nm_frame_nr - 1; - ring->head = 0; - ring->frame_size = req->nm_frame_size; - ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; - - swap(ring->pg_vec_len, req->nm_block_nr); - swap(ring->pg_vec_order, order); - swap(ring->pg_vec, pg_vec); - - __skb_queue_purge(queue); - spin_unlock_bh(&queue->lock); - - WARN_ON(atomic_read(&nlk->mapped)); + if (atomic_read(&nlk->mapped) == 0) { + __netlink_set_ring(sk, req, tx_ring, pg_vec, order); + mutex_unlock(&nlk->pg_vec_lock); + return 0; } + mutex_unlock(&nlk->pg_vec_lock); if (pg_vec) free_pg_vec(pg_vec, order, req->nm_block_nr); - return err; + + return -EBUSY; } static void netlink_mm_open(struct vm_area_struct *vma) @@ -900,10 +915,10 @@ static void netlink_sock_destruct(struct sock *sk) memset(&req, 0, sizeof(req)); if (nlk->rx_ring.pg_vec) - netlink_set_ring(sk, &req, true, false); + __netlink_set_ring(sk, &req, false, NULL, 0); memset(&req, 0, sizeof(req)); if (nlk->tx_ring.pg_vec) - netlink_set_ring(sk, &req, true, true); + __netlink_set_ring(sk, &req, true, NULL, 0); } #endif /* CONFIG_NETLINK_MMAP */ @@ -2223,7 +2238,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, return -EINVAL; if (copy_from_user(&req, optval, sizeof(req))) return -EFAULT; - err = netlink_set_ring(sk, &req, false, + err = netlink_set_ring(sk, &req, optname == NETLINK_TX_RING); break; } diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index 4613df8c8290..65523948fb95 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -752,7 +752,7 @@ int ovs_flow_init(void) BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) - + (num_possible_nodes() + + (nr_node_ids * sizeof(struct flow_stats *)), 0, 0, NULL); if (flow_cache == NULL) diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 1d56903fd4c7..1df78289e248 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -339,6 +339,9 @@ static void tcf_bpf_cleanup(struct tc_action *act, int bind) bpf_prog_put(prog->filter); else bpf_prog_destroy(prog->filter); + + kfree(prog->bpf_ops); + kfree(prog->bpf_name); } static struct tc_action_ops act_bpf_ops __read_mostly = { diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index c79ecfd36e0f..e5168f8b9640 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -378,7 +378,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, goto errout; if (oldprog) { - list_replace_rcu(&prog->link, &oldprog->link); + list_replace_rcu(&oldprog->link, &prog->link); tcf_unbind_filter(tp, &oldprog->res); call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); } else { diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 76bc3a20ffdb..bb2a0f529c1f 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -425,6 +425,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (!fnew) goto err2; + tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); + fold = (struct flow_filter *)*arg; if (fold) { err = -EINVAL; @@ -486,7 +488,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, fnew->mask = ~0U; fnew->tp = tp; get_random_bytes(&fnew->hashrnd, 4); - tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); } fnew->perturb_timer.function = flow_perturbation; @@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, if (*arg == 0) list_add_tail_rcu(&fnew->list, &head->filters); else - list_replace_rcu(&fnew->list, &fold->list); + list_replace_rcu(&fold->list, &fnew->list); *arg = (unsigned long)fnew; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 9d37ccd95062..2f3d03f99487 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -499,7 +499,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, *arg = (unsigned long) fnew; if (fold) { - list_replace_rcu(&fnew->list, &fold->list); + list_replace_rcu(&fold->list, &fnew->list); tcf_unbind_filter(tp, &fold->res); call_rcu(&fold->rcu, fl_destroy_filter); } else { diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index d75993f89fac..21ca33c9f036 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -155,14 +155,23 @@ static unsigned int fq_codel_drop(struct Qdisc *sch) skb = dequeue_head(flow); len = qdisc_pkt_len(skb); q->backlogs[idx] -= len; - kfree_skb(skb); sch->q.qlen--; qdisc_qstats_drop(sch); qdisc_qstats_backlog_dec(sch, skb); + kfree_skb(skb); flow->dropped++; return idx; } +static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch) +{ + unsigned int prev_backlog; + + prev_backlog = sch->qstats.backlog; + fq_codel_drop(sch); + return prev_backlog - sch->qstats.backlog; +} + static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct fq_codel_sched_data *q = qdisc_priv(sch); @@ -604,7 +613,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = { .enqueue = fq_codel_enqueue, .dequeue = fq_codel_dequeue, .peek = qdisc_peek_dequeued, - .drop = fq_codel_drop, + .drop = fq_codel_qdisc_drop, .init = fq_codel_init, .reset = fq_codel_reset, .destroy = fq_codel_destroy, diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 7d1492663360..52f75a5473e1 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c @@ -306,10 +306,10 @@ drop: len = qdisc_pkt_len(skb); slot->backlog -= len; sfq_dec(q, x); - kfree_skb(skb); sch->q.qlen--; qdisc_qstats_drop(sch); qdisc_qstats_backlog_dec(sch, skb); + kfree_skb(skb); return len; } diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 915b328b9ac5..59cabc9bce69 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -797,23 +797,18 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy, return false; } -bool cfg80211_reg_can_beacon(struct wiphy *wiphy, - struct cfg80211_chan_def *chandef, - enum nl80211_iftype iftype) +static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + enum nl80211_iftype iftype, + bool check_no_ir) { bool res; u32 prohibited_flags = IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_RADAR; - trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype); + trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); - /* - * Under certain conditions suggested by some regulatory bodies a - * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag - * only if such relaxations are not enabled and the conditions are not - * met. - */ - if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan)) + if (check_no_ir) prohibited_flags |= IEEE80211_CHAN_NO_IR; if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && @@ -827,8 +822,36 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, trace_cfg80211_return_bool(res); return res; } + +bool cfg80211_reg_can_beacon(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + enum nl80211_iftype iftype) +{ + return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true); +} EXPORT_SYMBOL(cfg80211_reg_can_beacon); +bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + enum nl80211_iftype iftype) +{ + bool check_no_ir; + + ASSERT_RTNL(); + + /* + * Under certain conditions suggested by some regulatory bodies a + * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag + * only if such relaxations are not enabled and the conditions are not + * met. + */ + check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype, + chandef->chan); + + return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); +} +EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax); + int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef) { diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c264effd00a6..76b41578a838 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2003,7 +2003,8 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: - if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) { + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, + iftype)) { result = -EINVAL; break; } @@ -3403,8 +3404,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) } else if (!nl80211_get_ap_channel(rdev, ¶ms)) return -EINVAL; - if (!cfg80211_reg_can_beacon(&rdev->wiphy, ¶ms.chandef, - wdev->iftype)) + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, ¶ms.chandef, + wdev->iftype)) return -EINVAL; if (info->attrs[NL80211_ATTR_ACL_POLICY]) { @@ -6492,8 +6493,8 @@ skip_beacons: if (err) return err; - if (!cfg80211_reg_can_beacon(&rdev->wiphy, ¶ms.chandef, - wdev->iftype)) + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, ¶ms.chandef, + wdev->iftype)) return -EINVAL; err = cfg80211_chandef_dfs_required(wdev->wiphy, @@ -10170,7 +10171,8 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb, return -EINVAL; /* we will be active on the TDLS link */ - if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype)) + if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, + wdev->iftype)) return -EINVAL; /* don't allow switching to DFS channels */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index d359e0610198..aa2d75482017 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -544,15 +544,15 @@ static int call_crda(const char *alpha2) reg_regdb_query(alpha2); if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { - pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n"); + pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n"); return -EINVAL; } if (!is_world_regdom((char *) alpha2)) - pr_info("Calling CRDA for country: %c%c\n", + pr_debug("Calling CRDA for country: %c%c\n", alpha2[0], alpha2[1]); else - pr_info("Calling CRDA to update world regulatory domain\n"); + pr_debug("Calling CRDA to update world regulatory domain\n"); return kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, env); } @@ -1589,7 +1589,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_ADHOC: - return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); + return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype); case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: return cfg80211_chandef_usable(wiphy, &chandef, diff --git a/net/wireless/trace.h b/net/wireless/trace.h index af3617c9879e..a808279a432a 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -2358,20 +2358,23 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify, TRACE_EVENT(cfg80211_reg_can_beacon, TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, - enum nl80211_iftype iftype), - TP_ARGS(wiphy, chandef, iftype), + enum nl80211_iftype iftype, bool check_no_ir), + TP_ARGS(wiphy, chandef, iftype, check_no_ir), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY __field(enum nl80211_iftype, iftype) + __field(bool, check_no_ir) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef); __entry->iftype = iftype; + __entry->check_no_ir = check_no_ir; ), - TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d", - WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype) + TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s", + WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype, + BOOL_TO_STR(__entry->check_no_ir)) ); TRACE_EVENT(cfg80211_chandef_dfs_required, diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h index 8965d1bb8811..125d6402f64f 100644 --- a/samples/trace_events/trace-events-sample.h +++ b/samples/trace_events/trace-events-sample.h @@ -168,7 +168,10 @@ * * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo) * Use __get_dynamic_array_len(foo) to get the length of the array - * saved. + * saved. Note, __get_dynamic_array_len() returns the total allocated + * length of the dynamic array; __print_array() expects the second + * parameter to be the number of elements. To get that, the array length + * needs to be divided by the element size. * * For __string(foo, bar) use __get_str(foo) * @@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar, * This prints out the array that is defined by __array in a nice format. */ __print_array(__get_dynamic_array(list), - __get_dynamic_array_len(list), + __get_dynamic_array_len(list) / sizeof(int), sizeof(int)), __get_str(str), __get_bitmask(cpus)) ); diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index d126c03361ae..75888dd38a7f 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem); void snd_pcm_stream_lock(struct snd_pcm_substream *substream) { if (substream->pcm->nonatomic) { - down_read(&snd_pcm_link_rwsem); + down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING); mutex_lock(&substream->self_group.mutex); } else { read_lock(&snd_pcm_link_rwlock); diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 442500e06b7c..5676b849379d 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -56,8 +56,11 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable) enable ? "enable" : "disable"); if (enable) { - if (!bus->i915_power_refcount++) + if (!bus->i915_power_refcount++) { acomp->ops->get_power(acomp->dev); + snd_hdac_set_codec_wakeup(bus, true); + snd_hdac_set_codec_wakeup(bus, false); + } } else { WARN_ON(!bus->i915_power_refcount); if (!--bus->i915_power_refcount) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 745535d1840a..735bdcb04ce8 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -979,14 +979,16 @@ static int azx_runtime_resume(struct device *dev) if (!azx_has_pm_runtime(chip)) return 0; - if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL - && hda->need_i915_power) { - bus = azx_bus(chip); - snd_hdac_display_power(bus, true); - haswell_set_bclk(hda); - /* toggle codec wakeup bit for STATESTS read */ - snd_hdac_set_codec_wakeup(bus, true); - snd_hdac_set_codec_wakeup(bus, false); + if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { + bus = azx_bus(chip); + if (hda->need_i915_power) { + snd_hdac_display_power(bus, true); + haswell_set_bclk(hda); + } else { + /* toggle codec wakeup bit for STATESTS read */ + snd_hdac_set_codec_wakeup(bus, true); + snd_hdac_set_codec_wakeup(bus, false); + } } /* Read STATESTS before controller reset */ @@ -2182,6 +2184,8 @@ static const struct pci_device_id azx_ids[] = { /* ATI HDMI */ { PCI_DEVICE(0x1002, 0x1308), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0x157a), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0x793b), .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, { PCI_DEVICE(0x1002, 0x7919), @@ -2236,8 +2240,14 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0xaab0), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0xaac0), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0xaac8), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0xaad8), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, + { PCI_DEVICE(0x1002, 0xaae8), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, /* VIA VT8251/VT8237A */ { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 95158914cc6c..a97db5fc8a15 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3512,6 +3512,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, +{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, @@ -3576,6 +3577,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067"); MODULE_ALIAS("snd-hda-codec-id:10de0070"); MODULE_ALIAS("snd-hda-codec-id:10de0071"); MODULE_ALIAS("snd-hda-codec-id:10de0072"); +MODULE_ALIAS("snd-hda-codec-id:10de007d"); MODULE_ALIAS("snd-hda-codec-id:10de8001"); MODULE_ALIAS("snd-hda-codec-id:11069f80"); MODULE_ALIAS("snd-hda-codec-id:11069f81"); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d35cf506a7db..742fc626f9e1 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -5061,7 +5061,7 @@ static const struct hda_fixup alc269_fixups[] = { { 0x14, 0x90170110 }, { 0x17, 0x40000008 }, { 0x18, 0x411111f0 }, - { 0x19, 0x411111f0 }, + { 0x19, 0x01a1913c }, { 0x1a, 0x411111f0 }, { 0x1b, 0x411111f0 }, { 0x1d, 0x40f89b2d }, @@ -5430,8 +5430,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {0x15, 0x0221401f}, \ {0x1a, 0x411111f0}, \ {0x1b, 0x411111f0}, \ - {0x1d, 0x40700001}, \ - {0x1e, 0x411111f0} + {0x1d, 0x40700001} #define ALC298_STANDARD_PINS \ {0x18, 0x411111f0}, \ @@ -5463,6 +5462,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x1d, 0x40700001}, {0x21, 0x02211030}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x12, 0x40000000}, + {0x14, 0x90170130}, + {0x17, 0x411111f0}, + {0x18, 0x411111f0}, + {0x19, 0x411111f0}, + {0x1a, 0x411111f0}, + {0x1b, 0x01014020}, + {0x1d, 0x4054c029}, + {0x1e, 0x411111f0}, + {0x21, 0x0221103f}), + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60160}, {0x14, 0x90170120}, {0x17, 0x90170140}, @@ -5690,35 +5700,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x13, 0x411111f0}, {0x16, 0x01014020}, {0x18, 0x411111f0}, - {0x19, 0x01a19030}), + {0x19, 0x01a19030}, + {0x1e, 0x411111f0}), SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x90a60140}, {0x13, 0x411111f0}, {0x16, 0x01014020}, {0x18, 0x02a19031}, - {0x19, 0x01a1903e}), + {0x19, 0x01a1903e}, + {0x1e, 0x411111f0}), SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x90a60140}, {0x13, 0x411111f0}, {0x16, 0x411111f0}, {0x18, 0x411111f0}, - {0x19, 0x411111f0}), + {0x19, 0x411111f0}, + {0x1e, 0x411111f0}), SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x40000000}, {0x13, 0x90a60140}, {0x16, 0x21014020}, {0x18, 0x411111f0}, - {0x19, 0x21a19030}), + {0x19, 0x21a19030}, + {0x1e, 0x411111f0}), SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_STANDARD_PINS, {0x12, 0x40000000}, {0x13, 0x90a60140}, {0x16, 0x411111f0}, {0x18, 0x411111f0}, - {0x19, 0x411111f0}), + {0x19, 0x411111f0}, + {0x1e, 0x411111f0}), + SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC292_STANDARD_PINS, + {0x12, 0x40000000}, + {0x13, 0x90a60140}, + {0x16, 0x21014020}, + {0x18, 0x411111f0}, + {0x19, 0x21a19030}, + {0x1e, 0x411111ff}), SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, ALC298_STANDARD_PINS, {0x12, 0x90a60130}, diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c index 1b1a89e80d13..784ceb85b2d9 100644 --- a/sound/sparc/amd7930.c +++ b/sound/sparc/amd7930.c @@ -956,6 +956,7 @@ static int snd_amd7930_create(struct snd_card *card, if (!amd->regs) { snd_printk(KERN_ERR "amd7930-%d: Unable to map chip registers.\n", dev); + kfree(amd); return -EIO; } |