diff options
580 files changed, 6235 insertions, 3594 deletions
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt index 65b3eac8856c..ff49cf901148 100644 --- a/Documentation/cgroup-v2.txt +++ b/Documentation/cgroup-v2.txt @@ -7,7 +7,7 @@ This is the authoritative documentation on the design, interface and conventions of cgroup v2. It describes all userland-visible aspects of cgroup including core and specific controller behaviors. All future changes must be reflected in this document. Documentation for -v1 is available under Documentation/cgroup-legacy/. +v1 is available under Documentation/cgroup-v1/. CONTENTS @@ -843,6 +843,10 @@ PAGE_SIZE multiple when read back. Amount of memory used to cache filesystem data, including tmpfs and shared memory. + sock + + Amount of memory used in network transmission buffers + file_mapped Amount of cached filesystem data mapped with mmap() diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt index 451fef26b4df..10587bdadbbe 100644 --- a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt +++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt @@ -68,7 +68,7 @@ ethernet@f0b60000 { phy1: ethernet-phy@1 { max-speed = <1000>; reg = <0x1>; - compatible = "brcm,28nm-gphy", "ethernet-phy-ieee802.3-c22"; + compatible = "ethernet-phy-ieee802.3-c22"; }; }; }; @@ -115,7 +115,7 @@ ethernet@f0ba0000 { phy0: ethernet-phy@0 { max-speed = <1000>; reg = <0x0>; - compatible = "brcm,bcm53125", "ethernet-phy-ieee802.3-c22"; + compatible = "ethernet-phy-ieee802.3-c22"; }; }; }; diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt index 80411b2f0490..ecacfa44b1eb 100644 --- a/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt +++ b/Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt @@ -4,8 +4,6 @@ Required properties: - compatible: should be "hisilicon,hns-dsaf-v1" or "hisilicon,hns-dsaf-v2". "hisilicon,hns-dsaf-v1" is for hip05. "hisilicon,hns-dsaf-v2" is for Hi1610 and Hi1612. -- dsa-name: dsa fabric name who provide this interface. - should be "dsafX", X is the dsaf id. - mode: dsa fabric mode string. only support one of dsaf modes like these: "2port-64vf", "6port-16rss", @@ -26,9 +24,8 @@ Required properties: Example: -dsa: dsa@c7000000 { +dsaf0: dsa@c7000000 { compatible = "hisilicon,hns-dsaf-v1"; - dsa_name = "dsaf0"; mode = "6port-16rss"; interrupt-parent = <&mbigen_dsa>; reg = <0x0 0xC0000000 0x0 0x420000 diff --git a/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt b/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt index 41d19be7011e..e6a9d1c30878 100644 --- a/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt +++ b/Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt @@ -4,8 +4,9 @@ Required properties: - compatible: "hisilicon,hns-nic-v1" or "hisilicon,hns-nic-v2". "hisilicon,hns-nic-v1" is for hip05. "hisilicon,hns-nic-v2" is for Hi1610 and Hi1612. -- ae-name: accelerator name who provides this interface, - is simply a name referring to the name of name in the accelerator node. +- ae-handle: accelerator engine handle for hns, + specifies a reference to the associating hardware driver node. + see Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt - port-id: is the index of port provided by DSAF (the accelerator). DSAF can connect to 8 PHYs. Port 0 to 1 are both used for adminstration purpose. They are called debug ports. @@ -41,7 +42,7 @@ Example: ethernet@0{ compatible = "hisilicon,hns-nic-v1"; - ae-name = "dsaf0"; + ae-handle = <&dsaf0>; port-id = <0>; local-mac-address = [a2 14 e4 4b 56 76]; }; diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt index aeea50c84e92..d0cb8693963b 100644 --- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt +++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt @@ -6,12 +6,17 @@ Required properties: - interrupts: interrupt for the device - phy: See ethernet.txt file in the same directory. - phy-mode: See ethernet.txt file in the same directory -- clocks: a pointer to the reference clock for this device. +- clocks: List of clocks for this device. At least one clock is + mandatory for the core clock. If several clocks are given, then the + clock-names property must be used to identify them. Optional properties: - tx-csum-limit: maximum mtu supported by port that allow TX checksum. Value is presented in bytes. If not used, by default 1600B is set for "marvell,armada-370-neta" and 9800B for others. +- clock-names: List of names corresponding to clocks property; shall be + "core" for core clock and "bus" for the optional bus clock. + Example: diff --git a/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt index 79384113c2b0..694987d3c17a 100644 --- a/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt +++ b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt @@ -38,7 +38,6 @@ Example : phy11: ethernet-phy@1 { reg = <1>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -48,7 +47,6 @@ Example : }; phy12: ethernet-phy@2 { reg = <2>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -58,7 +56,6 @@ Example : }; phy13: ethernet-phy@3 { reg = <3>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -68,7 +65,6 @@ Example : }; phy14: ethernet-phy@4 { reg = <4>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -85,7 +81,6 @@ Example : phy21: ethernet-phy@1 { reg = <1>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -95,7 +90,6 @@ Example : }; phy22: ethernet-phy@2 { reg = <2>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -105,7 +99,6 @@ Example : }; phy23: ethernet-phy@3 { reg = <3>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -115,7 +108,6 @@ Example : }; phy24: ethernet-phy@4 { reg = <4>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, diff --git a/Documentation/devicetree/bindings/net/mdio-mux.txt b/Documentation/devicetree/bindings/net/mdio-mux.txt index f65606f8d632..491f5bd55203 100644 --- a/Documentation/devicetree/bindings/net/mdio-mux.txt +++ b/Documentation/devicetree/bindings/net/mdio-mux.txt @@ -47,7 +47,6 @@ Example : phy11: ethernet-phy@1 { reg = <1>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -57,7 +56,6 @@ Example : }; phy12: ethernet-phy@2 { reg = <2>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -67,7 +65,6 @@ Example : }; phy13: ethernet-phy@3 { reg = <3>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -77,7 +74,6 @@ Example : }; phy14: ethernet-phy@4 { reg = <4>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -94,7 +90,6 @@ Example : phy21: ethernet-phy@1 { reg = <1>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -104,7 +99,6 @@ Example : }; phy22: ethernet-phy@2 { reg = <2>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -114,7 +108,6 @@ Example : }; phy23: ethernet-phy@3 { reg = <3>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, @@ -124,7 +117,6 @@ Example : }; phy24: ethernet-phy@4 { reg = <4>; - compatible = "marvell,88e1149r"; marvell,reg-init = <3 0x10 0 0x5777>, <3 0x11 0 0x00aa>, <3 0x12 0 0x4105>, diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt index 525e1658f2da..bc1c3c8bf8fa 100644 --- a/Documentation/devicetree/bindings/net/phy.txt +++ b/Documentation/devicetree/bindings/net/phy.txt @@ -17,8 +17,7 @@ Optional Properties: "ethernet-phy-ieee802.3-c22" or "ethernet-phy-ieee802.3-c45" for PHYs that implement IEEE802.3 clause 22 or IEEE802.3 clause 45 specifications. If neither of these are specified, the default is to - assume clause 22. The compatible list may also contain other - elements. + assume clause 22. If the phy's identifier is known then the list may contain an entry of the form: "ethernet-phy-idAAAA.BBBB" where @@ -28,6 +27,9 @@ Optional Properties: 4 hex digits. This is the chip vendor OUI bits 19:24, followed by 10 bits of a vendor specific ID. + The compatible list should not contain other values than those + listed here. + - max-speed: Maximum PHY supported speed (10, 100, 1000...) - broken-turn-around: If set, indicates the PHY device does not correctly diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt index 4e8b90e43dd8..07a75094c5a8 100644 --- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt +++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt @@ -8,6 +8,7 @@ OHCI and EHCI controllers. Required properties: - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC; "renesas,pci-r8a7791" for the R8A7791 SoC; + "renesas,pci-r8a7793" for the R8A7793 SoC; "renesas,pci-r8a7794" for the R8A7794 SoC; "renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt index 558fe528ae19..6cf99690eef9 100644 --- a/Documentation/devicetree/bindings/pci/rcar-pci.txt +++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt @@ -4,6 +4,7 @@ Required properties: compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC; "renesas,pcie-r8a7790" for the R8A7790 SoC; "renesas,pcie-r8a7791" for the R8A7791 SoC; + "renesas,pcie-r8a7793" for the R8A7793 SoC; "renesas,pcie-r8a7795" for the R8A7795 SoC; "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device. diff --git a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt index ce55c0a6f757..4da41bf1888e 100644 --- a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt +++ b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt @@ -30,6 +30,8 @@ The compatible list for this generic sound card currently: "fsl,imx-audio-sgtl5000" (compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt) + "fsl,imx-audio-wm8960" + Required properties: - compatible : Contains one of entries in the compatible list. diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt index 332e625f6ed0..e5ee3f159893 100644 --- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt @@ -1,8 +1,9 @@ * Renesas R-Car Thermal Required properties: -- compatible : "renesas,thermal-<soctype>", "renesas,rcar-thermal" - as fallback. +- compatible : "renesas,thermal-<soctype>", + "renesas,rcar-gen2-thermal" (with thermal-zone) or + "renesas,rcar-thermal" (without thermal-zone) as fallback. Examples with soctypes are: - "renesas,thermal-r8a73a4" (R-Mobile APE6) - "renesas,thermal-r8a7779" (R-Car H1) @@ -36,3 +37,35 @@ thermal@e61f0000 { 0xe61f0300 0x38>; interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>; }; + +Example (with thermal-zone): + +thermal-zones { + cpu_thermal: cpu-thermal { + polling-delay-passive = <1000>; + polling-delay = <5000>; + + thermal-sensors = <&thermal>; + + trips { + cpu-crit { + temperature = <115000>; + hysteresis = <0>; + type = "critical"; + }; + }; + cooling-maps { + }; + }; +}; + +thermal: thermal@e61f0000 { + compatible = "renesas,thermal-r8a7790", + "renesas,rcar-gen2-thermal", + "renesas,rcar-thermal"; + reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>; + interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&mstp5_clks R8A7790_CLK_THERMAL>; + power-domains = <&cpg_clocks>; + #thermal-sensor-cells = <0>; +}; diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt index fde9fd06fa98..843b045b4069 100644 --- a/Documentation/filesystems/proc.txt +++ b/Documentation/filesystems/proc.txt @@ -240,8 +240,8 @@ Table 1-2: Contents of the status files (as of 4.1) RssFile size of resident file mappings RssShmem size of resident shmem memory (includes SysV shm, mapping of tmpfs and shared anonymous mappings) - VmData size of data, stack, and text segments - VmStk size of data, stack, and text segments + VmData size of private data segments + VmStk size of stack segments VmExe size of text segment VmLib size of shared library code VmPTE size of page table entries @@ -356,7 +356,7 @@ address perms offset dev inode pathname a7cb1000-a7cb2000 ---p 00000000 00:00 0 a7cb2000-a7eb2000 rw-p 00000000 00:00 0 a7eb2000-a7eb3000 ---p 00000000 00:00 0 -a7eb3000-a7ed5000 rw-p 00000000 00:00 0 [stack:1001] +a7eb3000-a7ed5000 rw-p 00000000 00:00 0 a7ed5000-a8008000 r-xp 00000000 03:00 4222 /lib/libc.so.6 a8008000-a800a000 r--p 00133000 03:00 4222 /lib/libc.so.6 a800a000-a800b000 rw-p 00135000 03:00 4222 /lib/libc.so.6 @@ -388,7 +388,6 @@ is not associated with a file: [heap] = the heap of the program [stack] = the stack of the main process - [stack:1001] = the stack of the thread with tid 1001 [vdso] = the "virtual dynamic shared object", the kernel system call handler @@ -396,10 +395,8 @@ is not associated with a file: The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint of the individual tasks of a process. In this file you will see a mapping marked -as [stack] if that task sees it as a stack. This is a key difference from the -content of /proc/PID/maps, where you will see all mappings that are being used -as stack by all of those tasks. Hence, for the example above, the task-level -map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this: +as [stack] if that task sees it as a stack. Hence, for the example above, the +task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this: 08048000-08049000 r-xp 00000000 03:00 8312 /opt/test 08049000-0804a000 rw-p 00001000 03:00 8312 /opt/test diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 87d40a72f6a1..9a53c929f017 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1496,6 +1496,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. could change it dynamically, usually by /sys/module/printk/parameters/ignore_loglevel. + ignore_rlimit_data + Ignore RLIMIT_DATA setting for data mappings, + print warning at first misuse. Can be changed via + /sys/module/kernel/parameters/ignore_rlimit_data. + ihash_entries= [KNL] Set number of hash buckets for inode cache. @@ -4230,6 +4235,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted. The default value of this parameter is determined by the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT. + workqueue.debug_force_rr_cpu + Workqueue used to implicitly guarantee that work + items queued without explicit CPU specified are put + on the local CPU. This guarantee is no longer true + and while local CPU is still preferred work items + may be put on foreign CPUs. This debug option + forces round-robin CPU selection to flush out + usages which depend on the now broken guarantee. + When enabled, memory and cache locality will be + impacted. + x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of default x2apic cluster mode on platforms supporting x2apic. diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index ceb44a095a27..73b36d7c7b0d 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -594,7 +594,7 @@ tcp_fastopen - INTEGER tcp_syn_retries - INTEGER Number of times initial SYNs for an active TCP connection attempt - will be retransmitted. Should not be higher than 255. Default value + will be retransmitted. Should not be higher than 127. Default value is 6, which corresponds to 63seconds till the last retransmission with the current initial RTO of 1second. With this the final timeout for an active TCP connection attempt will happen after 127seconds. diff --git a/MAINTAINERS b/MAINTAINERS index 5c889cd754c3..4f55edf0a589 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -223,9 +223,7 @@ F: drivers/scsi/aacraid/ ABI/API L: linux-api@vger.kernel.org -F: Documentation/ABI/ F: include/linux/syscalls.h -F: include/uapi/ F: kernel/sys_ni.c ABIT UGURU 1,2 HARDWARE MONITOR DRIVER @@ -960,6 +958,8 @@ M: Rob Herring <robh@kernel.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-highbank/ +F: arch/arm/boot/dts/highbank.dts +F: arch/arm/boot/dts/ecx-*.dts* ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT M: Krzysztof Halasa <khalasa@piap.pl> @@ -1035,6 +1035,7 @@ M: Barry Song <baohua@kernel.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git S: Maintained +F: arch/arm/boot/dts/prima2* F: arch/arm/mach-prima2/ F: drivers/clk/sirf/ F: drivers/clocksource/timer-prima2.c @@ -1136,6 +1137,10 @@ W: http://www.hisilicon.com S: Supported T: git git://github.com/hisilicon/linux-hisi.git F: arch/arm/mach-hisi/ +F: arch/arm/boot/dts/hi3* +F: arch/arm/boot/dts/hip* +F: arch/arm/boot/dts/hisi* +F: arch/arm64/boot/dts/hisilicon/ ARM/HP JORNADA 7XX MACHINE SUPPORT M: Kristoffer Ericson <kristoffer.ericson@gmail.com> @@ -1212,6 +1217,7 @@ M: Santosh Shilimkar <ssantosh@kernel.org> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-keystone/ +F: arch/arm/boot/dts/k2* T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK @@ -1280,6 +1286,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-berlin/ F: arch/arm/boot/dts/berlin* +F: arch/arm64/boot/dts/marvell/berlin* ARM/Marvell Dove/MV78xx0/Orion SOC support @@ -1418,6 +1425,7 @@ S: Maintained F: arch/arm/boot/dts/qcom-*.dts F: arch/arm/boot/dts/qcom-*.dtsi F: arch/arm/mach-qcom/ +F: arch/arm64/boot/dts/qcom/* F: drivers/soc/qcom/ F: drivers/tty/serial/msm_serial.h F: drivers/tty/serial/msm_serial.c @@ -1477,6 +1485,8 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/s3c* +F: arch/arm/boot/dts/s5p* +F: arch/arm/boot/dts/samsung* F: arch/arm/boot/dts/exynos* F: arch/arm64/boot/dts/exynos/ F: arch/arm/plat-samsung/ @@ -1556,6 +1566,7 @@ S: Maintained F: arch/arm/mach-socfpga/ F: arch/arm/boot/dts/socfpga* F: arch/arm/configs/socfpga_defconfig +F: arch/arm64/boot/dts/altera/ W: http://www.rocketboards.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git @@ -1709,7 +1720,7 @@ M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/boot/dts/vexpress* -F: arch/arm64/boot/dts/arm/vexpress* +F: arch/arm64/boot/dts/arm/ F: arch/arm/mach-vexpress/ F: */*/vexpress* F: */*/*/vexpress* @@ -2336,6 +2347,7 @@ F: arch/arm/mach-bcm/ F: arch/arm/boot/dts/bcm113* F: arch/arm/boot/dts/bcm216* F: arch/arm/boot/dts/bcm281* +F: arch/arm64/boot/dts/broadcom/ F: arch/arm/configs/bcm_defconfig F: drivers/mmc/host/sdhci-bcm-kona.c F: drivers/clocksource/bcm_kona_timer.c @@ -3438,7 +3450,7 @@ S: Maintained F: drivers/usb/dwc2/ DESIGNWARE USB3 DRD IP DRIVER -M: Felipe Balbi <balbi@ti.com> +M: Felipe Balbi <balbi@kernel.org> L: linux-usb@vger.kernel.org L: linux-omap@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git @@ -7350,7 +7362,7 @@ F: drivers/tty/isicom.c F: include/linux/isicom.h MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER -M: Felipe Balbi <balbi@ti.com> +M: Felipe Balbi <balbi@kernel.org> L: linux-usb@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git S: Maintained @@ -7919,7 +7931,7 @@ F: drivers/media/platform/omap3isp/ F: drivers/staging/media/omap4iss/ OMAP USB SUPPORT -M: Felipe Balbi <balbi@ti.com> +M: Felipe Balbi <balbi@kernel.org> L: linux-usb@vger.kernel.org L: linux-omap@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git @@ -8798,6 +8810,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/hzhuang1/linux.git T: git git://github.com/rjarzmik/linux.git S: Maintained +F: arch/arm/boot/dts/pxa* F: arch/arm/mach-pxa/ F: drivers/dma/pxa* F: drivers/pcmcia/pxa2xx* @@ -8827,6 +8840,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) T: git git://github.com/hzhuang1/linux.git T: git git://git.linaro.org/people/ycmiao/pxa-linux.git S: Maintained +F: arch/arm/boot/dts/mmp* F: arch/arm/mach-mmp/ PXA MMCI DRIVER @@ -9773,10 +9787,11 @@ S: Supported F: drivers/scsi/be2iscsi/ Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER -M: Sathya Perla <sathya.perla@avagotech.com> -M: Ajit Khaparde <ajit.khaparde@avagotech.com> -M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com> -M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com> +M: Sathya Perla <sathya.perla@broadcom.com> +M: Ajit Khaparde <ajit.khaparde@broadcom.com> +M: Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com> +M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com> +M: Somnath Kotur <somnath.kotur@broadcom.com> L: netdev@vger.kernel.org W: http://www.emulex.com S: Supported @@ -10138,6 +10153,7 @@ S: Supported F: drivers/media/pci/solo6x10/ SOFTWARE RAID (Multiple Disks) SUPPORT +M: Shaohua Li <shli@kernel.org> L: linux-raid@vger.kernel.org T: git git://neil.brown.name/md S: Supported @@ -10153,7 +10169,7 @@ F: drivers/net/ethernet/natsemi/sonic.* SONICS SILICON BACKPLANE DRIVER (SSB) M: Michael Buesch <m@bues.ch> -L: netdev@vger.kernel.org +L: linux-wireless@vger.kernel.org S: Maintained F: drivers/ssb/ F: include/linux/ssb/ @@ -10271,6 +10287,7 @@ L: spear-devel@list.st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.st.com/spear S: Maintained +F: arch/arm/boot/dts/spear* F: arch/arm/mach-spear/ SPEAR CLOCK FRAMEWORK SUPPORT @@ -11298,7 +11315,7 @@ F: Documentation/usb/ehci.txt F: drivers/usb/host/ehci* USB GADGET/PERIPHERAL SUBSYSTEM -M: Felipe Balbi <balbi@ti.com> +M: Felipe Balbi <balbi@kernel.org> L: linux-usb@vger.kernel.org W: http://www.linux-usb.org/gadget T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git @@ -11374,7 +11391,7 @@ S: Maintained F: drivers/net/usb/pegasus.* USB PHY LAYER -M: Felipe Balbi <balbi@ti.com> +M: Felipe Balbi <balbi@kernel.org> L: linux-usb@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git S: Maintained @@ -12113,7 +12130,7 @@ F: drivers/net/hamradio/*scc.c F: drivers/net/hamradio/z8530.h ZBUD COMPRESSED PAGE ALLOCATOR -M: Seth Jennings <sjennings@variantweb.net> +M: Seth Jennings <sjenning@redhat.com> L: linux-mm@kvack.org S: Maintained F: mm/zbud.c @@ -12168,7 +12185,7 @@ F: include/linux/zsmalloc.h F: Documentation/vm/zsmalloc.txt ZSWAP COMPRESSED SWAP CACHING -M: Seth Jennings <sjennings@variantweb.net> +M: Seth Jennings <sjenning@redhat.com> L: linux-mm@kvack.org S: Maintained F: mm/zswap.c @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 5 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = Blurry Fish Butt # *DOCUMENTATION* diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 76dde9db7934..0655495470ad 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -338,6 +338,19 @@ config ARC_PAGE_SIZE_4K endchoice +choice + prompt "MMU Super Page Size" + depends on ISA_ARCV2 && TRANSPARENT_HUGEPAGE + default ARC_HUGEPAGE_2M + +config ARC_HUGEPAGE_2M + bool "2MB" + +config ARC_HUGEPAGE_16M + bool "16MB" + +endchoice + if ISA_ARCOMPACT config ARC_COMPACT_IRQ_LEVELS @@ -410,7 +423,7 @@ config ARC_HAS_RTC default n depends on !SMP -config ARC_HAS_GRTC +config ARC_HAS_GFRC bool "SMP synchronized 64-bit cycle counter" default y depends on SMP @@ -566,6 +579,12 @@ endmenu endmenu # "ARC Architecture Configuration" source "mm/Kconfig" + +config FORCE_MAX_ZONEORDER + int "Maximum zone order" + default "12" if ARC_HUGEPAGE_16M + default "11" + source "net/Kconfig" source "drivers/Kconfig" source "fs/Kconfig" diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index f36c047b33ca..735985974a31 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig @@ -16,7 +16,7 @@ CONFIG_ARC_PLAT_AXS10X=y CONFIG_AXS103=y CONFIG_ISA_ARCV2=y CONFIG_SMP=y -# CONFIG_ARC_HAS_GRTC is not set +# CONFIG_ARC_HAS_GFRC is not set CONFIG_ARC_UBOOT_SUPPORT=y CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" CONFIG_PREEMPT=y diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 7fac7d85ed6a..fdc5be5b1029 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -349,14 +349,13 @@ struct cpuinfo_arc { struct cpuinfo_arc_bpu bpu; struct bcr_identity core; struct bcr_isa isa; - struct bcr_timer timers; unsigned int vec_base; struct cpuinfo_arc_ccm iccm, dccm; struct { unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3, fpu_sp:1, fpu_dp:1, pad2:6, debug:1, ap:1, smart:1, rtt:1, pad3:4, - pad4:8; + timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; } extn; struct bcr_mpy extn_mpy; struct bcr_extn_xymem extn_xymem; diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h index 258b0e5ad332..1fc18ee06cf2 100644 --- a/arch/arc/include/asm/irqflags-arcv2.h +++ b/arch/arc/include/asm/irqflags-arcv2.h @@ -30,8 +30,11 @@ /* Was Intr taken in User Mode */ #define AUX_IRQ_ACT_BIT_U 31 -/* 0 is highest level, but taken by FIRQs, if present in design */ -#define ARCV2_IRQ_DEF_PRIO 0 +/* + * User space should be interruptable even by lowest prio interrupt + * Safe even if actual interrupt priorities is fewer or even one + */ +#define ARCV2_IRQ_DEF_PRIO 15 /* seed value for status register */ #define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h index 46f4e5351b2a..847e3bbe387f 100644 --- a/arch/arc/include/asm/mcip.h +++ b/arch/arc/include/asm/mcip.h @@ -39,8 +39,8 @@ struct mcip_cmd { #define CMD_DEBUG_SET_MASK 0x34 #define CMD_DEBUG_SET_SELECT 0x36 -#define CMD_GRTC_READ_LO 0x42 -#define CMD_GRTC_READ_HI 0x43 +#define CMD_GFRC_READ_LO 0x42 +#define CMD_GFRC_READ_HI 0x43 #define CMD_IDU_ENABLE 0x71 #define CMD_IDU_DISABLE 0x72 diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 57af2f05ae84..d426d4215513 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -179,37 +179,44 @@ #define __S111 PAGE_U_X_W_R /**************************************************************** - * Page Table Lookup split + * 2 tier (PGD:PTE) software page walker * - * We implement 2 tier paging and since this is all software, we are free - * to customize the span of a PGD / PTE entry to suit us - * - * 32 bit virtual address + * [31] 32 bit virtual address [0] * ------------------------------------------------------- - * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE | + * | | <------------ PGDIR_SHIFT ----------> | + * | | | + * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> | * ------------------------------------------------------- * | | | * | | --> off in page frame - * | | * | ---> index into Page Table - * | * ----> index into Page Directory + * + * In a single page size configuration, only PAGE_SHIFT is fixed + * So both PGD and PTE sizing can be tweaked + * e.g. 8K page (PAGE_SHIFT 13) can have + * - PGDIR_SHIFT 21 -> 11:8:13 address split + * - PGDIR_SHIFT 24 -> 8:11:13 address split + * + * If Super Page is configured, PGDIR_SHIFT becomes fixed too, + * so the sizing flexibility is gone. */ -#define BITS_IN_PAGE PAGE_SHIFT - -/* Optimal Sizing of Pg Tbl - based on MMU page size */ -#if defined(CONFIG_ARC_PAGE_SIZE_8K) -#define BITS_FOR_PTE 8 /* 11:8:13 */ -#elif defined(CONFIG_ARC_PAGE_SIZE_16K) -#define BITS_FOR_PTE 8 /* 10:8:14 */ -#elif defined(CONFIG_ARC_PAGE_SIZE_4K) -#define BITS_FOR_PTE 9 /* 11:9:12 */ +#if defined(CONFIG_ARC_HUGEPAGE_16M) +#define PGDIR_SHIFT 24 +#elif defined(CONFIG_ARC_HUGEPAGE_2M) +#define PGDIR_SHIFT 21 +#else +/* + * Only Normal page support so "hackable" (see comment above) + * Default value provides 11:8:13 (8K), 11:9:12 (4K) + */ +#define PGDIR_SHIFT 21 #endif -#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE) +#define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT) +#define BITS_FOR_PGD (32 - PGDIR_SHIFT) -#define PGDIR_SHIFT (32 - BITS_FOR_PGD) #define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */ #define PGDIR_MASK (~(PGDIR_SIZE-1)) diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index cbfec79137bf..b17830294706 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S @@ -211,7 +211,11 @@ debug_marker_syscall: ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig ; entry was via Exception in DS which got preempted in kernel). ; -; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling +; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround +; +; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline +; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly + .Lintr_ret_to_delay_slot: debug_marker_ds: @@ -222,18 +226,23 @@ debug_marker_ds: ld r2, [sp, PT_ret] ld r3, [sp, PT_status32] + ; STAT32 for Int return created from scratch + ; (No delay dlot, disable Further intr in trampoline) + bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK st r0, [sp, PT_status32] mov r1, .Lintr_ret_to_delay_slot_2 st r1, [sp, PT_ret] + ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots st r2, [sp, 0] st r3, [sp, 4] b .Lisr_ret_fast_path .Lintr_ret_to_delay_slot_2: + ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP sub sp, sp, SZ_PT_REGS st r9, [sp, -4] @@ -243,11 +252,19 @@ debug_marker_ds: ld r9, [sp, 4] sr r9, [erstatus] + ; restore AUX_USER_SP if returning to U mode + bbit0 r9, STATUS_U_BIT, 1f + ld r9, [sp, PT_sp] + sr r9, [AUX_USER_SP] + +1: ld r9, [sp, 8] sr r9, [erbta] ld r9, [sp, -4] add sp, sp, SZ_PT_REGS + + ; return from pure kernel mode to delay slot rtie END(ret_from_exception) diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 0394f9f61b46..942526322ae7 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -14,6 +14,8 @@ #include <linux/irqchip.h> #include <asm/irq.h> +static int irq_prio; + /* * Early Hardware specific Interrupt setup * -Called very early (start_kernel -> setup_arch -> setup_processor) @@ -24,6 +26,14 @@ void arc_init_IRQ(void) { unsigned int tmp; + struct irq_build { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8; +#else + unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3; +#endif + } irq_bcr; + struct aux_irq_ctrl { #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int res3:18, save_idx_regs:1, res2:1, @@ -46,28 +56,25 @@ void arc_init_IRQ(void) WRITE_AUX(AUX_IRQ_CTRL, ictrl); - /* setup status32, don't enable intr yet as kernel doesn't want */ - tmp = read_aux_reg(0xa); - tmp |= ISA_INIT_STATUS_BITS; - tmp &= ~STATUS_IE_MASK; - asm volatile("flag %0 \n"::"r"(tmp)); - /* * ARCv2 core intc provides multiple interrupt priorities (upto 16). * Typical builds though have only two levels (0-high, 1-low) * Linux by default uses lower prio 1 for most irqs, reserving 0 for * NMI style interrupts in future (say perf) - * - * Read the intc BCR to confirm that Linux default priority is avail - * in h/w - * - * Note: - * IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level - * is 0 based. */ - tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF; - if (ARCV2_IRQ_DEF_PRIO > tmp) - panic("Linux default irq prio incorrect\n"); + + READ_BCR(ARC_REG_IRQ_BCR, irq_bcr); + + irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */ + pr_info("archs-intc\t: %d priority levels (default %d)%s\n", + irq_prio + 1, irq_prio, + irq_bcr.firq ? " FIRQ (not used)":""); + + /* setup status32, don't enable intr yet as kernel doesn't want */ + tmp = read_aux_reg(0xa); + tmp |= STATUS_AD_MASK | (irq_prio << 1); + tmp &= ~STATUS_IE_MASK; + asm volatile("flag %0 \n"::"r"(tmp)); } static void arcv2_irq_mask(struct irq_data *data) @@ -86,7 +93,7 @@ void arcv2_irq_enable(struct irq_data *data) { /* set default priority */ write_aux_reg(AUX_IRQ_SELECT, data->irq); - write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO); + write_aux_reg(AUX_IRQ_PRIORITY, irq_prio); /* * hw auto enables (linux unmask) all by default diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c index bd237acdf4f2..bc771f58fefb 100644 --- a/arch/arc/kernel/mcip.c +++ b/arch/arc/kernel/mcip.c @@ -96,13 +96,13 @@ static void mcip_probe_n_setup(void) #ifdef CONFIG_CPU_BIG_ENDIAN unsigned int pad3:8, idu:1, llm:1, num_cores:6, - iocoh:1, grtc:1, dbg:1, pad2:1, + iocoh:1, gfrc:1, dbg:1, pad2:1, msg:1, sem:1, ipi:1, pad:1, ver:8; #else unsigned int ver:8, pad:1, ipi:1, sem:1, msg:1, - pad2:1, dbg:1, grtc:1, iocoh:1, + pad2:1, dbg:1, gfrc:1, iocoh:1, num_cores:6, llm:1, idu:1, pad3:8; #endif @@ -116,7 +116,7 @@ static void mcip_probe_n_setup(void) IS_AVAIL1(mp.ipi, "IPI "), IS_AVAIL1(mp.idu, "IDU "), IS_AVAIL1(mp.dbg, "DEBUG "), - IS_AVAIL1(mp.grtc, "GRTC")); + IS_AVAIL1(mp.gfrc, "GFRC")); idu_detected = mp.idu; @@ -125,8 +125,8 @@ static void mcip_probe_n_setup(void) __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf); } - if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc) - panic("kernel trying to use non-existent GRTC\n"); + if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc) + panic("kernel trying to use non-existent GFRC\n"); } struct plat_smp_ops plat_smp_ops = { diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index e1b87444ea9a..a7edceba5f84 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -45,6 +45,7 @@ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; static void read_arc_build_cfg_regs(void) { struct bcr_perip uncached_space; + struct bcr_timer timer; struct bcr_generic bcr; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; unsigned long perip_space; @@ -53,7 +54,11 @@ static void read_arc_build_cfg_regs(void) READ_BCR(AUX_IDENTITY, cpu->core); READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); - READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers); + READ_BCR(ARC_REG_TIMERS_BCR, timer); + cpu->extn.timer0 = timer.t0; + cpu->extn.timer1 = timer.t1; + cpu->extn.rtc = timer.rtc; + cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); @@ -208,9 +213,9 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) (unsigned int)(arc_get_core_freq() / 10000) % 100); n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ", - IS_AVAIL1(cpu->timers.t0, "Timer0 "), - IS_AVAIL1(cpu->timers.t1, "Timer1 "), - IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ", + IS_AVAIL1(cpu->extn.timer0, "Timer0 "), + IS_AVAIL1(cpu->extn.timer1, "Timer1 "), + IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ", CONFIG_ARC_HAS_RTC)); n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s", @@ -293,13 +298,13 @@ static void arc_chk_core_config(void) struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; int fpu_enabled; - if (!cpu->timers.t0) + if (!cpu->extn.timer0) panic("Timer0 is not present!\n"); - if (!cpu->timers.t1) + if (!cpu->extn.timer1) panic("Timer1 is not present!\n"); - if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc) + if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc) panic("RTC is not present\n"); #ifdef CONFIG_ARC_HAS_DCCM @@ -334,6 +339,7 @@ static void arc_chk_core_config(void) panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic && + IS_ENABLED(CONFIG_ARC_HAS_LLSC) && !IS_ENABLED(CONFIG_ARC_STAR_9000923308)) panic("llock/scond livelock workaround missing\n"); } diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index dfad287f1db1..156d9833ff84 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -62,7 +62,7 @@ /********** Clock Source Device *********/ -#ifdef CONFIG_ARC_HAS_GRTC +#ifdef CONFIG_ARC_HAS_GFRC static int arc_counter_setup(void) { @@ -83,10 +83,10 @@ static cycle_t arc_counter_read(struct clocksource *cs) local_irq_save(flags); - __mcip_cmd(CMD_GRTC_READ_LO, 0); + __mcip_cmd(CMD_GFRC_READ_LO, 0); stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK); - __mcip_cmd(CMD_GRTC_READ_HI, 0); + __mcip_cmd(CMD_GFRC_READ_HI, 0); stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK); local_irq_restore(flags); @@ -95,7 +95,7 @@ static cycle_t arc_counter_read(struct clocksource *cs) } static struct clocksource arc_counter = { - .name = "ARConnect GRTC", + .name = "ARConnect GFRC", .rating = 400, .read = arc_counter_read, .mask = CLOCKSOURCE_MASK(64), diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 04885f9f959e..1fafaad516ba 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -439,6 +439,7 @@ ti,mbox-num-users = <4>; ti,mbox-num-fifos = <8>; mbox_wkupm3: wkup_m3 { + ti,mbox-send-noirq; ti,mbox-tx = <0 0 0>; ti,mbox-rx = <0 0 3>; }; diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index df955ba4dc62..92068fbf8b57 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -73,7 +73,7 @@ global_timer: timer@48240200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x48240200 0x100>; - interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>; interrupt-parent = <&gic>; clocks = <&mpu_periphclk>; }; @@ -81,7 +81,7 @@ local_timer: timer@48240600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0x48240600 0x100>; - interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>; interrupt-parent = <&gic>; clocks = <&mpu_periphclk>; }; @@ -290,6 +290,7 @@ ti,mbox-num-users = <4>; ti,mbox-num-fifos = <8>; mbox_wkupm3: wkup_m3 { + ti,mbox-send-noirq; ti,mbox-tx = <0 0 0>; ti,mbox-rx = <0 0 3>; }; diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index 64d43325bcbc..ecd09ab6d581 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -590,8 +590,6 @@ pinctrl-names = "default"; pinctrl-0 = <&pixcir_ts_pins>; reg = <0x5c>; - interrupt-parent = <&gpio3>; - interrupts = <22 0>; attb-gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>; @@ -599,7 +597,7 @@ * 0x264 represents the offset of padconf register of * gpio3_22 from am43xx_pinmux base. */ - interrupts-extended = <&gpio3 22 IRQ_TYPE_NONE>, + interrupts-extended = <&gpio3 22 IRQ_TYPE_EDGE_FALLING>, <&am43xx_pinmux 0x264>; interrupt-names = "tsc", "wakeup"; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 746fd2b17958..d580e2b70f9a 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -491,7 +491,7 @@ pinctrl-0 = <&pixcir_ts_pins>; reg = <0x5c>; interrupt-parent = <&gpio1>; - interrupts = <17 0>; + interrupts = <17 IRQ_TYPE_EDGE_FALLING>; attb-gpio = <&gpio1 17 GPIO_ACTIVE_HIGH>; diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts index c53882643ae9..8d93882dc8d5 100644 --- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts +++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts @@ -167,7 +167,7 @@ DRA7XX_CORE_IOPAD(0x35b8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d20.rgmii1_rd3 */ DRA7XX_CORE_IOPAD(0x35bc, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d21.rgmii1_rd2 */ DRA7XX_CORE_IOPAD(0x35c0, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d22.rgmii1_rd1 */ - DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT_PULLUP | MUX_MODE3) /* vin2a_d23.rgmii1_rd0 */ + DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d23.rgmii1_rd0 */ >; }; @@ -492,14 +492,14 @@ pinctrl-names = "default"; pinctrl-0 = <&qspi1_pins>; - spi-max-frequency = <20000000>; + spi-max-frequency = <48000000>; spi_flash: spi_flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "spansion,m25p80", "jedec,spi-nor"; reg = <0>; /* CS0 */ - spi-max-frequency = <20000000>; + spi-max-frequency = <48000000>; partition@0 { label = "uboot"; @@ -559,13 +559,13 @@ &cpsw_emac0 { phy_id = <&davinci_mdio>, <0>; - phy-mode = "rgmii"; + phy-mode = "rgmii-txid"; dual_emac_res_vlan = <0>; }; &cpsw_emac1 { phy_id = <&davinci_mdio>, <1>; - phy-mode = "rgmii"; + phy-mode = "rgmii-txid"; dual_emac_res_vlan = <1>; }; @@ -588,7 +588,7 @@ }; &usb2 { - dr_mode = "peripheral"; + dr_mode = "host"; }; &mcasp3 { diff --git a/arch/arm/boot/dts/am57xx-sbc-am57x.dts b/arch/arm/boot/dts/am57xx-sbc-am57x.dts index 77bb8e17401a..988e99632d49 100644 --- a/arch/arm/boot/dts/am57xx-sbc-am57x.dts +++ b/arch/arm/boot/dts/am57xx-sbc-am57x.dts @@ -25,8 +25,8 @@ &dra7_pmx_core { uart3_pins_default: uart3_pins_default { pinctrl-single,pins = < - DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_SLEW | MUX_MODE2) /* uart2_ctsn.uart3_rxd */ - DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_SLEW | MUX_MODE1) /* uart2_rtsn.uart3_txd */ + DRA7XX_CORE_IOPAD(0x3648, PIN_INPUT_SLEW | MUX_MODE0) /* uart3_rxd */ + DRA7XX_CORE_IOPAD(0x364c, PIN_INPUT_SLEW | MUX_MODE0) /* uart3_txd */ >; }; @@ -108,9 +108,9 @@ pinctrl-0 = <&i2c5_pins_default>; clock-frequency = <400000>; - eeprom_base: atmel@50 { + eeprom_base: atmel@54 { compatible = "atmel,24c08"; - reg = <0x50>; + reg = <0x54>; pagesize = <16>; }; diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts index 13cf69a8d0fb..fb9e1bbf2338 100644 --- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts +++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts @@ -152,6 +152,7 @@ nand-on-flash-bbt; partitions { + compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts index 77ddff036409..e683856c507c 100644 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts @@ -114,9 +114,15 @@ macb0: ethernet@f8008000 { pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_macb0_default>; + pinctrl-0 = <&pinctrl_macb0_default &pinctrl_macb0_phy_irq>; phy-mode = "rmii"; status = "okay"; + + ethernet-phy@1 { + reg = <0x1>; + interrupt-parent = <&pioA>; + interrupts = <73 IRQ_TYPE_LEVEL_LOW>; + }; }; pdmic@f8018000 { @@ -300,6 +306,10 @@ bias-disable; }; + pinctrl_macb0_phy_irq: macb0_phy_irq { + pinmux = <PIN_PC9__GPIO>; + }; + pinctrl_pdmic_default: pdmic_default { pinmux = <PIN_PB26__PDMIC_DAT>, <PIN_PB27__PDMIC_CLK>; diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts index 131614f28e75..569026e8f96c 100644 --- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts @@ -86,10 +86,12 @@ macb0: ethernet@f8020000 { phy-mode = "rmii"; status = "okay"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>; phy0: ethernet-phy@1 { interrupt-parent = <&pioE>; - interrupts = <1 IRQ_TYPE_EDGE_FALLING>; + interrupts = <1 IRQ_TYPE_LEVEL_LOW>; reg = <1>; }; }; @@ -152,6 +154,10 @@ atmel,pins = <AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; }; + pinctrl_macb0_phy_irq: macb0_phy_irq_0 { + atmel,pins = + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; + }; }; }; }; diff --git a/arch/arm/boot/dts/at91-sama5d4ek.dts b/arch/arm/boot/dts/at91-sama5d4ek.dts index 2d4a33100af6..4e98cda97403 100644 --- a/arch/arm/boot/dts/at91-sama5d4ek.dts +++ b/arch/arm/boot/dts/at91-sama5d4ek.dts @@ -160,8 +160,15 @@ }; macb0: ethernet@f8020000 { + pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>; phy-mode = "rmii"; status = "okay"; + + ethernet-phy@1 { + reg = <0x1>; + interrupt-parent = <&pioE>; + interrupts = <1 IRQ_TYPE_LEVEL_LOW>; + }; }; mmc1: mmc@fc000000 { @@ -193,6 +200,10 @@ pinctrl@fc06a000 { board { + pinctrl_macb0_phy_irq: macb0_phy_irq { + atmel,pins = + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>; + }; pinctrl_mmc0_cd: mmc0_cd { atmel,pins = <AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>; diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index ca4ddf86817a..626c67d66626 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts @@ -215,7 +215,7 @@ }; panel: panel { - compatible = "qd,qd43003c0-40", "simple-panel"; + compatible = "qiaodian,qd43003c0-40", "simple-panel"; backlight = <&backlight>; power-supply = <&panel_reg>; #address-cells = <1>; diff --git a/arch/arm/boot/dts/kirkwood-lswvl.dts b/arch/arm/boot/dts/kirkwood-lswvl.dts index 09eed3cea0af..36eec7392ab4 100644 --- a/arch/arm/boot/dts/kirkwood-lswvl.dts +++ b/arch/arm/boot/dts/kirkwood-lswvl.dts @@ -1,7 +1,8 @@ /* * Device Tree file for Buffalo Linkstation LS-WVL/VL * - * Copyright (C) 2015, rogershimizu@gmail.com + * Copyright (C) 2015, 2016 + * Roger Shimizu <rogershimizu@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -156,21 +157,21 @@ button@1 { label = "Function Button"; linux,code = <KEY_OPTION>; - gpios = <&gpio0 45 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 13 GPIO_ACTIVE_LOW>; }; button@2 { label = "Power-on Switch"; linux,code = <KEY_RESERVED>; linux,input-type = <5>; - gpios = <&gpio0 46 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 14 GPIO_ACTIVE_LOW>; }; button@3 { label = "Power-auto Switch"; linux,code = <KEY_ESC>; linux,input-type = <5>; - gpios = <&gpio0 47 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 15 GPIO_ACTIVE_LOW>; }; }; @@ -185,38 +186,38 @@ led@1 { label = "lswvl:red:alarm"; - gpios = <&gpio0 36 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>; }; led@2 { label = "lswvl:red:func"; - gpios = <&gpio0 37 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>; }; led@3 { label = "lswvl:amber:info"; - gpios = <&gpio0 38 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>; }; led@4 { label = "lswvl:blue:func"; - gpios = <&gpio0 39 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; }; led@5 { label = "lswvl:blue:power"; - gpios = <&gpio0 40 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; default-state = "keep"; }; led@6 { label = "lswvl:red:hdderr0"; - gpios = <&gpio0 34 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; }; led@7 { label = "lswvl:red:hdderr1"; - gpios = <&gpio0 35 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>; }; }; @@ -233,7 +234,7 @@ 3250 1 5000 0>; - alarm-gpios = <&gpio0 43 GPIO_ACTIVE_HIGH>; + alarm-gpios = <&gpio1 11 GPIO_ACTIVE_HIGH>; }; restart_poweroff { diff --git a/arch/arm/boot/dts/kirkwood-lswxl.dts b/arch/arm/boot/dts/kirkwood-lswxl.dts index f5db16a08597..b13ec20a7088 100644 --- a/arch/arm/boot/dts/kirkwood-lswxl.dts +++ b/arch/arm/boot/dts/kirkwood-lswxl.dts @@ -1,7 +1,8 @@ /* * Device Tree file for Buffalo Linkstation LS-WXL/WSXL * - * Copyright (C) 2015, rogershimizu@gmail.com + * Copyright (C) 2015, 2016 + * Roger Shimizu <rogershimizu@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -156,21 +157,21 @@ button@1 { label = "Function Button"; linux,code = <KEY_OPTION>; - gpios = <&gpio1 41 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 9 GPIO_ACTIVE_LOW>; }; button@2 { label = "Power-on Switch"; linux,code = <KEY_RESERVED>; linux,input-type = <5>; - gpios = <&gpio1 42 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 10 GPIO_ACTIVE_LOW>; }; button@3 { label = "Power-auto Switch"; linux,code = <KEY_ESC>; linux,input-type = <5>; - gpios = <&gpio1 43 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 11 GPIO_ACTIVE_LOW>; }; }; @@ -185,12 +186,12 @@ led@1 { label = "lswxl:blue:func"; - gpios = <&gpio1 36 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; }; led@2 { label = "lswxl:red:alarm"; - gpios = <&gpio1 49 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 17 GPIO_ACTIVE_LOW>; }; led@3 { @@ -200,23 +201,23 @@ led@4 { label = "lswxl:blue:power"; - gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>; + default-state = "keep"; }; led@5 { label = "lswxl:red:func"; - gpios = <&gpio1 5 GPIO_ACTIVE_LOW>; - default-state = "keep"; + gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; }; led@6 { label = "lswxl:red:hdderr0"; - gpios = <&gpio1 2 GPIO_ACTIVE_LOW>; + gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>; }; led@7 { label = "lswxl:red:hdderr1"; - gpios = <&gpio1 3 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>; }; }; @@ -225,15 +226,15 @@ pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>; pinctrl-names = "default"; - gpios = <&gpio0 47 GPIO_ACTIVE_LOW - &gpio0 48 GPIO_ACTIVE_LOW>; + gpios = <&gpio1 16 GPIO_ACTIVE_LOW + &gpio1 15 GPIO_ACTIVE_LOW>; gpio-fan,speed-map = <0 3 1500 2 3250 1 5000 0>; - alarm-gpios = <&gpio1 49 GPIO_ACTIVE_HIGH>; + alarm-gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>; }; restart_poweroff { @@ -256,7 +257,7 @@ enable-active-high; regulator-always-on; regulator-boot-on; - gpio = <&gpio0 37 GPIO_ACTIVE_HIGH>; + gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>; }; hdd_power0: regulator@2 { compatible = "regulator-fixed"; diff --git a/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts b/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts index 1db6f2c506cc..8082d64266a3 100644 --- a/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts +++ b/arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts @@ -131,6 +131,7 @@ chip-delay = <40>; status = "okay"; partitions { + compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 7fed0bd4f3de..00805322367e 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi @@ -112,14 +112,6 @@ clock-frequency = <400000>; }; -&i2c2 { - clock-frequency = <400000>; -}; - -&i2c3 { - clock-frequency = <400000>; -}; - /* * Only found on the wireless SOM. For the SOM without wireless, the pins for * MMC3 can be routed with jumpers to the second MMC slot on the devkit and @@ -143,6 +135,7 @@ interrupt-parent = <&gpio5>; interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */ ref-clock-frequency = <26000000>; + tcxo-clock-frequency = <26000000>; }; }; diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index 888412c63f97..902657d6713b 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi @@ -130,6 +130,16 @@ }; }; +&gpio8 { + /* TI trees use GPIO instead of msecure, see also muxing */ + p234 { + gpio-hog; + gpios = <10 GPIO_ACTIVE_HIGH>; + output-high; + line-name = "gpio8_234/msecure"; + }; +}; + &omap5_pmx_core { pinctrl-names = "default"; pinctrl-0 = < @@ -213,6 +223,13 @@ >; }; + /* TI trees use GPIO mode; msecure mode does not work reliably? */ + palmas_msecure_pins: palmas_msecure_pins { + pinctrl-single,pins = < + OMAP5_IOPAD(0x180, PIN_OUTPUT | MUX_MODE6) /* gpio8_234 */ + >; + }; + usbhost_pins: pinmux_usbhost_pins { pinctrl-single,pins = < OMAP5_IOPAD(0x0c4, PIN_INPUT | MUX_MODE0) /* usbb2_hsic_strobe */ @@ -278,6 +295,12 @@ &usbhost_wkup_pins >; + palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { + pinctrl-single,pins = < + OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ + >; + }; + usbhost_wkup_pins: pinmux_usbhost_wkup_pins { pinctrl-single,pins = < OMAP5_IOPAD(0x05a, PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */ @@ -345,6 +368,8 @@ interrupt-controller; #interrupt-cells = <2>; ti,system-power-controller; + pinctrl-names = "default"; + pinctrl-0 = <&palmas_sys_nirq_pins &palmas_msecure_pins>; extcon_usb3: palmas_usb { compatible = "ti,palmas-usb-vid"; @@ -358,6 +383,14 @@ #clock-cells = <0>; }; + rtc { + compatible = "ti,palmas-rtc"; + interrupt-parent = <&palmas>; + interrupts = <8 IRQ_TYPE_NONE>; + ti,backup-battery-chargeable; + ti,backup-battery-charge-high-current; + }; + palmas_pmic { compatible = "ti,palmas-pmic"; interrupt-parent = <&palmas>; diff --git a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts index 3daec912b4bf..420788229e6f 100644 --- a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts +++ b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts @@ -1,7 +1,8 @@ /* * Device Tree file for Buffalo Linkstation LS-WTGL * - * Copyright (C) 2015, Roger Shimizu <rogershimizu@gmail.com> + * Copyright (C) 2015, 2016 + * Roger Shimizu <rogershimizu@gmail.com> * * This file is dual-licensed: you can use it either under the terms * of the GPL or the X11 license, at your option. Note that this dual @@ -69,8 +70,6 @@ internal-regs { pinctrl: pinctrl@10000 { - pinctrl-0 = <&pmx_usb_power &pmx_power_hdd - &pmx_fan_low &pmx_fan_high &pmx_fan_lock>; pinctrl-names = "default"; pmx_led_power: pmx-leds { @@ -162,6 +161,7 @@ led@1 { label = "lswtgl:blue:power"; gpios = <&gpio0 0 GPIO_ACTIVE_LOW>; + default-state = "keep"; }; led@2 { @@ -188,7 +188,7 @@ 3250 1 5000 0>; - alarm-gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>; + alarm-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; }; restart_poweroff { diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index b8032bca4621..db1151c18466 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -1342,7 +1342,7 @@ dbgu: serial@fc069000 { compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart"; reg = <0xfc069000 0x200>; - interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>; + interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_dbgu>; clocks = <&dbgu_clk>; diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index d0c743853318..27a333eb8987 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi @@ -127,22 +127,14 @@ }; mmcsd_default_mode: mmcsd_default { mmcsd_default_cfg1 { - /* MCCLK */ - pins = "GPIO8_B10"; - ste,output = <0>; - }; - mmcsd_default_cfg2 { - /* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */ - pins = "GPIO10_C11", "GPIO15_A12", - "GPIO16_C13", "GPIO23_D15"; - ste,output = <1>; - }; - mmcsd_default_cfg3 { - /* MCCMD, MCDAT3-0, MCMSFBCLK */ - pins = "GPIO9_A10", "GPIO11_B11", - "GPIO12_A11", "GPIO13_C12", - "GPIO14_B12", "GPIO24_C15"; - ste,input = <1>; + /* + * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 + * MCCMD, MCDAT3-0, MCMSFBCLK + */ + pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11", + "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12", + "GPIO16_C13", "GPIO23_D15", "GPIO24_C15"; + ste,output = <2>; }; }; }; @@ -802,10 +794,21 @@ clock-names = "mclk", "apb_pclk"; interrupt-parent = <&vica>; interrupts = <22>; - max-frequency = <48000000>; + max-frequency = <400000>; bus-width = <4>; cap-mmc-highspeed; cap-sd-highspeed; + full-pwr-cycle; + /* + * The STw4811 circuit used with the Nomadik strictly + * requires that all of these signal direction pins be + * routed and used for its 4-bit levelshifter. + */ + st,sig-dir-dat0; + st,sig-dir-dat2; + st,sig-dir-dat31; + st,sig-dir-cmd; + st,sig-pin-fbclk; pinctrl-names = "default"; pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>; vmmc-supply = <&vmmc_regulator>; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 314f6be2dca2..8e8b2ace9b7c 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -426,6 +426,7 @@ CONFIG_SUNXI_WATCHDOG=y CONFIG_IMX2_WDT=y CONFIG_TEGRA_WATCHDOG=m CONFIG_MESON_WATCHDOG=y +CONFIG_DW_WATCHDOG=y CONFIG_DIGICOLOR_WATCHDOG=y CONFIG_MFD_AS3711=y CONFIG_MFD_AS3722=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index c5e1943e5427..d18d6b42fcf5 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -50,6 +50,7 @@ CONFIG_SOC_AM33XX=y CONFIG_SOC_AM43XX=y CONFIG_SOC_DRA7XX=y CONFIG_ARM_THUMBEE=y +CONFIG_ARM_KERNMEM_PERMS=y CONFIG_ARM_ERRATA_411920=y CONFIG_ARM_ERRATA_430973=y CONFIG_SMP=y @@ -177,6 +178,7 @@ CONFIG_TI_CPTS=y CONFIG_AT803X_PHY=y CONFIG_SMSC_PHY=y CONFIG_USB_USBNET=m +CONFIG_USB_NET_SMSC75XX=m CONFIG_USB_NET_SMSC95XX=m CONFIG_USB_ALI_M5632=y CONFIG_USB_AN2720=y @@ -290,24 +292,23 @@ CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_MODE_HELPERS=y CONFIG_FB_TILEBLITTING=y -CONFIG_OMAP2_DSS=m -CONFIG_OMAP5_DSS_HDMI=y -CONFIG_OMAP2_DSS_SDI=y -CONFIG_OMAP2_DSS_DSI=y +CONFIG_FB_OMAP5_DSS_HDMI=y +CONFIG_FB_OMAP2_DSS_SDI=y +CONFIG_FB_OMAP2_DSS_DSI=y CONFIG_FB_OMAP2=m -CONFIG_DISPLAY_ENCODER_TFP410=m -CONFIG_DISPLAY_ENCODER_TPD12S015=m -CONFIG_DISPLAY_CONNECTOR_DVI=m -CONFIG_DISPLAY_CONNECTOR_HDMI=m -CONFIG_DISPLAY_CONNECTOR_ANALOG_TV=m -CONFIG_DISPLAY_PANEL_DPI=m -CONFIG_DISPLAY_PANEL_DSI_CM=m -CONFIG_DISPLAY_PANEL_SONY_ACX565AKM=m -CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02=m -CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01=m -CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1=m -CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1=m -CONFIG_DISPLAY_PANEL_NEC_NL8048HL11=m +CONFIG_FB_OMAP2_ENCODER_TFP410=m +CONFIG_FB_OMAP2_ENCODER_TPD12S015=m +CONFIG_FB_OMAP2_CONNECTOR_DVI=m +CONFIG_FB_OMAP2_CONNECTOR_HDMI=m +CONFIG_FB_OMAP2_CONNECTOR_ANALOG_TV=m +CONFIG_FB_OMAP2_PANEL_DPI=m +CONFIG_FB_OMAP2_PANEL_DSI_CM=m +CONFIG_FB_OMAP2_PANEL_SONY_ACX565AKM=m +CONFIG_FB_OMAP2_PANEL_LGPHILIPS_LB035Q02=m +CONFIG_FB_OMAP2_PANEL_SHARP_LS037V7DW01=m +CONFIG_FB_OMAP2_PANEL_TPO_TD028TTEC1=m +CONFIG_FB_OMAP2_PANEL_TPO_TD043MTEA1=m +CONFIG_FB_OMAP2_PANEL_NEC_NL8048HL11=m CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_LCD_CLASS_DEVICE=y CONFIG_LCD_PLATFORM=y @@ -354,6 +355,11 @@ CONFIG_USB_MUSB_DSPS=m CONFIG_USB_INVENTRA_DMA=y CONFIG_USB_TI_CPPI41_DMA=y CONFIG_USB_DWC3=m +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_PL2303=m CONFIG_USB_TEST=m CONFIG_AM335X_PHY_USB=y CONFIG_USB_GADGET=m @@ -387,6 +393,7 @@ CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=m CONFIG_LEDS_GPIO=m CONFIG_LEDS_PWM=m +CONFIG_LEDS_PCA963X=m CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=m CONFIG_LEDS_TRIGGER_ONESHOT=m @@ -449,6 +456,8 @@ CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_ISO8859_1=y CONFIG_PRINTK_TIME=y CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_SPLIT=y +CONFIG_DEBUG_INFO_DWARF4=y CONFIG_MAGIC_SYSRQ=y CONFIG_SCHEDSTATS=y CONFIG_TIMER_STATS=y diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 9cda974a3009..d7f1d69daf6d 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -18,7 +18,6 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/pinctrl/machine.h> -#include <linux/platform_data/mailbox-omap.h> #include <asm/mach-types.h> #include <asm/mach/map.h> @@ -66,32 +65,6 @@ static int __init omap3_l3_init(void) } omap_postcore_initcall(omap3_l3_init); -#if defined(CONFIG_OMAP2PLUS_MBOX) || defined(CONFIG_OMAP2PLUS_MBOX_MODULE) -static inline void __init omap_init_mbox(void) -{ - struct omap_hwmod *oh; - struct platform_device *pdev; - struct omap_mbox_pdata *pdata; - - oh = omap_hwmod_lookup("mailbox"); - if (!oh) { - pr_err("%s: unable to find hwmod\n", __func__); - return; - } - if (!oh->dev_attr) { - pr_err("%s: hwmod doesn't have valid attrs\n", __func__); - return; - } - - pdata = (struct omap_mbox_pdata *)oh->dev_attr; - pdev = omap_device_build("omap-mailbox", -1, oh, pdata, sizeof(*pdata)); - WARN(IS_ERR(pdev), "%s: could not build device, err %ld\n", - __func__, PTR_ERR(pdev)); -} -#else -static inline void omap_init_mbox(void) { } -#endif /* CONFIG_OMAP2PLUS_MBOX */ - static inline void omap_init_sti(void) {} #if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE) @@ -229,7 +202,6 @@ static int __init omap2_init_devices(void) * please keep these calls, and their implementations above, * in alphabetical order so they're easier to sort through. */ - omap_init_mbox(); omap_init_mcspi(); omap_init_sham(); omap_init_aes(); diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index e781e4fae13a..a935d28443da 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -23,6 +23,8 @@ #include <linux/platform_data/pinctrl-single.h> #include <linux/platform_data/iommu-omap.h> #include <linux/platform_data/wkup_m3.h> +#include <linux/platform_data/pwm_omap_dmtimer.h> +#include <plat/dmtimer.h> #include "common.h" #include "common-board-devices.h" @@ -449,6 +451,24 @@ void omap_auxdata_legacy_init(struct device *dev) dev->platform_data = &twl_gpio_auxdata; } +/* Dual mode timer PWM callbacks platdata */ +#if IS_ENABLED(CONFIG_OMAP_DM_TIMER) +struct pwm_omap_dmtimer_pdata pwm_dmtimer_pdata = { + .request_by_node = omap_dm_timer_request_by_node, + .free = omap_dm_timer_free, + .enable = omap_dm_timer_enable, + .disable = omap_dm_timer_disable, + .get_fclk = omap_dm_timer_get_fclk, + .start = omap_dm_timer_start, + .stop = omap_dm_timer_stop, + .set_load = omap_dm_timer_set_load, + .set_match = omap_dm_timer_set_match, + .set_pwm = omap_dm_timer_set_pwm, + .set_prescaler = omap_dm_timer_set_prescaler, + .write_counter = omap_dm_timer_write_counter, +}; +#endif + /* * Few boards still need auxdata populated before we populate * the dev entries in of_platform_populate(). @@ -502,6 +522,9 @@ static struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("ti,am4372-wkup-m3", 0x44d00000, "44d00000.wkup_m3", &wkup_m3_data), #endif +#if IS_ENABLED(CONFIG_OMAP_DM_TIMER) + OF_DEV_AUXDATA("ti,omap-dmtimer-pwm", 0, NULL, &pwm_dmtimer_pdata), +#endif #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) OF_DEV_AUXDATA("ti,omap4-iommu", 0x4a066000, "4a066000.mmu", &omap4_iommu_pdata), diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index eafd120b53f1..1b9f0520dea9 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S @@ -86,13 +86,18 @@ ENTRY(enable_omap3630_toggle_l2_on_restore) stmfd sp!, {lr} @ save registers on stack /* Setup so that we will disable and enable l2 */ mov r1, #0x1 - adrl r2, l2dis_3630 @ may be too distant for plain adr - str r1, [r2] + adrl r3, l2dis_3630_offset @ may be too distant for plain adr + ldr r2, [r3] @ value for offset + str r1, [r2, r3] @ write to l2dis_3630 ldmfd sp!, {pc} @ restore regs and return ENDPROC(enable_omap3630_toggle_l2_on_restore) - .text -/* Function to call rom code to save secure ram context */ +/* + * Function to call rom code to save secure ram context. This gets + * relocated to SRAM, so it can be all in .data section. Otherwise + * we need to initialize api_params separately. + */ + .data .align 3 ENTRY(save_secure_ram_context) stmfd sp!, {r4 - r11, lr} @ save registers on stack @@ -126,6 +131,8 @@ ENDPROC(save_secure_ram_context) ENTRY(save_secure_ram_context_sz) .word . - save_secure_ram_context + .text + /* * ====================== * == Idle entry point == @@ -289,12 +296,6 @@ wait_sdrc_ready: bic r5, r5, #0x40 str r5, [r4] -/* - * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a - * base instead. - * Be careful not to clobber r7 when maintaing this code. - */ - is_dll_in_lock_mode: /* Is dll in lock mode? */ ldr r4, sdrc_dlla_ctrl @@ -302,11 +303,7 @@ is_dll_in_lock_mode: tst r5, #0x4 bne exit_nonoff_modes @ Return if locked /* wait till dll locks */ - adr r7, kick_counter wait_dll_lock_timed: - ldr r4, wait_dll_lock_counter - add r4, r4, #1 - str r4, [r7, #wait_dll_lock_counter - kick_counter] ldr r4, sdrc_dlla_status /* Wait 20uS for lock */ mov r6, #8 @@ -330,9 +327,6 @@ kick_dll: orr r6, r6, #(1<<3) @ enable dll str r6, [r4] dsb - ldr r4, kick_counter - add r4, r4, #1 - str r4, [r7] @ kick_counter b wait_dll_lock_timed exit_nonoff_modes: @@ -360,15 +354,6 @@ sdrc_dlla_status: .word SDRC_DLLA_STATUS_V sdrc_dlla_ctrl: .word SDRC_DLLA_CTRL_V - /* - * When exporting to userspace while the counters are in SRAM, - * these 2 words need to be at the end to facilitate retrival! - */ -kick_counter: - .word 0 -wait_dll_lock_counter: - .word 0 - ENTRY(omap3_do_wfi_sz) .word . - omap3_do_wfi @@ -437,7 +422,9 @@ ENTRY(omap3_restore) cmp r2, #0x0 @ Check if target power state was OFF or RET bne logic_l1_restore - ldr r0, l2dis_3630 + adr r1, l2dis_3630_offset @ address for offset + ldr r0, [r1] @ value for offset + ldr r0, [r1, r0] @ value at l2dis_3630 cmp r0, #0x1 @ should we disable L2 on 3630? bne skipl2dis mrc p15, 0, r0, c1, c0, 1 @@ -449,12 +436,14 @@ skipl2dis: and r1, #0x700 cmp r1, #0x300 beq l2_inv_gp + adr r0, l2_inv_api_params_offset + ldr r3, [r0] + add r3, r3, r0 @ r3 points to dummy parameters mov r0, #40 @ set service ID for PPA mov r12, r0 @ copy secure Service ID in r12 mov r1, #0 @ set task id for ROM code in r1 mov r2, #4 @ set some flags in r2, r6 mov r6, #0xff - adr r3, l2_inv_api_params @ r3 points to dummy parameters dsb @ data write barrier dmb @ data memory barrier smc #1 @ call SMI monitor (smi #1) @@ -488,8 +477,8 @@ skipl2dis: b logic_l1_restore .align -l2_inv_api_params: - .word 0x1, 0x00 +l2_inv_api_params_offset: + .long l2_inv_api_params - . l2_inv_gp: /* Execute smi to invalidate L2 cache */ mov r12, #0x1 @ set up to invalidate L2 @@ -506,7 +495,9 @@ l2_inv_gp: mov r12, #0x2 smc #0 @ Call SMI monitor (smieq) logic_l1_restore: - ldr r1, l2dis_3630 + adr r0, l2dis_3630_offset @ adress for offset + ldr r1, [r0] @ value for offset + ldr r1, [r0, r1] @ value at l2dis_3630 cmp r1, #0x1 @ Test if L2 re-enable needed on 3630 bne skipl2reen mrc p15, 0, r1, c1, c0, 1 @@ -535,9 +526,17 @@ control_stat: .word CONTROL_STAT control_mem_rta: .word CONTROL_MEM_RTA_CTRL +l2dis_3630_offset: + .long l2dis_3630 - . + + .data l2dis_3630: .word 0 + .data +l2_inv_api_params: + .word 0x1, 0x00 + /* * Internal functions */ diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S index 9b09d85d811a..c7a3b4aab4b5 100644 --- a/arch/arm/mach-omap2/sleep44xx.S +++ b/arch/arm/mach-omap2/sleep44xx.S @@ -29,12 +29,6 @@ dsb .endm -ppa_zero_params: - .word 0x0 - -ppa_por_params: - .word 1, 0 - #ifdef CONFIG_ARCH_OMAP4 /* @@ -266,7 +260,9 @@ ENTRY(omap4_cpu_resume) beq skip_ns_smp_enable ppa_actrl_retry: mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX - adr r3, ppa_zero_params @ Pointer to parameters + adr r1, ppa_zero_params_offset + ldr r3, [r1] + add r3, r3, r1 @ Pointer to ppa_zero_params mov r1, #0x0 @ Process ID mov r2, #0x4 @ Flag mov r6, #0xff @@ -303,7 +299,9 @@ skip_ns_smp_enable: ldr r0, =OMAP4_PPA_L2_POR_INDEX ldr r1, =OMAP44XX_SAR_RAM_BASE ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET] - adr r3, ppa_por_params + adr r1, ppa_por_params_offset + ldr r3, [r1] + add r3, r3, r1 @ Pointer to ppa_por_params str r4, [r3, #0x04] mov r1, #0x0 @ Process ID mov r2, #0x4 @ Flag @@ -328,6 +326,8 @@ skip_l2en: #endif b cpu_resume @ Jump to generic resume +ppa_por_params_offset: + .long ppa_por_params - . ENDPROC(omap4_cpu_resume) #endif /* CONFIG_ARCH_OMAP4 */ @@ -380,4 +380,13 @@ ENTRY(omap_do_wfi) nop ldmfd sp!, {pc} +ppa_zero_params_offset: + .long ppa_zero_params - . ENDPROC(omap_do_wfi) + + .data +ppa_zero_params: + .word 0 + +ppa_por_params: + .word 1, 0 diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index def40a0dd60c..70ab4a25a5f8 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig @@ -1,5 +1,6 @@ menuconfig ARCH_REALVIEW - bool "ARM Ltd. RealView family" if ARCH_MULTI_V5 || ARCH_MULTI_V6 || ARCH_MULTI_V7 + bool "ARM Ltd. RealView family" + depends on ARCH_MULTI_V5 || ARCH_MULTI_V6 || ARCH_MULTI_V7 select ARM_AMBA select ARM_TIMER_SP804 select COMMON_CLK_VERSATILE diff --git a/arch/arm/mach-realview/platsmp-dt.c b/arch/arm/mach-realview/platsmp-dt.c index 65585392655b..6964e8876061 100644 --- a/arch/arm/mach-realview/platsmp-dt.c +++ b/arch/arm/mach-realview/platsmp-dt.c @@ -80,7 +80,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus) virt_to_phys(versatile_secondary_startup)); } -struct smp_operations realview_dt_smp_ops __initdata = { +static const struct smp_operations realview_dt_smp_ops __initconst = { .smp_prepare_cpus = realview_smp_prepare_cpus, .smp_secondary_init = versatile_secondary_init, .smp_boot_secondary = versatile_boot_secondary, diff --git a/arch/arm/mach-tango/Kconfig b/arch/arm/mach-tango/Kconfig index d6a3714b096e..ebe15b93bbe8 100644 --- a/arch/arm/mach-tango/Kconfig +++ b/arch/arm/mach-tango/Kconfig @@ -1,5 +1,6 @@ config ARCH_TANGO - bool "Sigma Designs Tango4 (SMP87xx)" if ARCH_MULTI_V7 + bool "Sigma Designs Tango4 (SMP87xx)" + depends on ARCH_MULTI_V7 # Cortex-A9 MPCore r3p0, PL310 r3p2 select ARCH_HAS_HOLES_MEMORYMODEL select ARM_ERRATA_754322 diff --git a/arch/arm/mach-tango/platsmp.c b/arch/arm/mach-tango/platsmp.c index a18d5a34e2f5..a21f55e000d2 100644 --- a/arch/arm/mach-tango/platsmp.c +++ b/arch/arm/mach-tango/platsmp.c @@ -9,7 +9,7 @@ static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle) return 0; } -static struct smp_operations tango_smp_ops __initdata = { +static const struct smp_operations tango_smp_ops __initconst = { .smp_boot_secondary = tango_boot_secondary, }; diff --git a/arch/arm64/boot/dts/arm/juno-base.dtsi b/arch/arm64/boot/dts/arm/juno-base.dtsi index dd5158eb5872..e5b59ca9debb 100644 --- a/arch/arm64/boot/dts/arm/juno-base.dtsi +++ b/arch/arm64/boot/dts/arm/juno-base.dtsi @@ -115,6 +115,7 @@ <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, diff --git a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi index da7b6e613257..933cba359918 100644 --- a/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi +++ b/arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi @@ -23,9 +23,8 @@ soc0: soc@000000000 { }; }; - dsa: dsa@c7000000 { + dsaf0: dsa@c7000000 { compatible = "hisilicon,hns-dsaf-v1"; - dsa_name = "dsaf0"; mode = "6port-16rss"; interrupt-parent = <&mbigen_dsa>; @@ -127,7 +126,7 @@ soc0: soc@000000000 { eth0: ethernet@0{ compatible = "hisilicon,hns-nic-v1"; - ae-name = "dsaf0"; + ae-handle = <&dsaf0>; port-id = <0>; local-mac-address = [00 00 00 01 00 58]; status = "disabled"; @@ -135,14 +134,14 @@ soc0: soc@000000000 { }; eth1: ethernet@1{ compatible = "hisilicon,hns-nic-v1"; - ae-name = "dsaf0"; + ae-handle = <&dsaf0>; port-id = <1>; status = "disabled"; dma-coherent; }; eth2: ethernet@2{ compatible = "hisilicon,hns-nic-v1"; - ae-name = "dsaf0"; + ae-handle = <&dsaf0>; port-id = <2>; local-mac-address = [00 00 00 01 00 5a]; status = "disabled"; @@ -150,7 +149,7 @@ soc0: soc@000000000 { }; eth3: ethernet@3{ compatible = "hisilicon,hns-nic-v1"; - ae-name = "dsaf0"; + ae-handle = <&dsaf0>; port-id = <3>; local-mac-address = [00 00 00 01 00 5b]; status = "disabled"; @@ -158,7 +157,7 @@ soc0: soc@000000000 { }; eth4: ethernet@4{ compatible = "hisilicon,hns-nic-v1"; - ae-name = "dsaf0"; + ae-handle = <&dsaf0>; port-id = <4>; local-mac-address = [00 00 00 01 00 5c]; status = "disabled"; @@ -166,7 +165,7 @@ soc0: soc@000000000 { }; eth5: ethernet@5{ compatible = "hisilicon,hns-nic-v1"; - ae-name = "dsaf0"; + ae-handle = <&dsaf0>; port-id = <5>; local-mac-address = [00 00 00 01 00 5d]; status = "disabled"; @@ -174,7 +173,7 @@ soc0: soc@000000000 { }; eth6: ethernet@6{ compatible = "hisilicon,hns-nic-v1"; - ae-name = "dsaf0"; + ae-handle = <&dsaf0>; port-id = <6>; local-mac-address = [00 00 00 01 00 5e]; status = "disabled"; @@ -182,7 +181,7 @@ soc0: soc@000000000 { }; eth7: ethernet@7{ compatible = "hisilicon,hns-nic-v1"; - ae-name = "dsaf0"; + ae-handle = <&dsaf0>; port-id = <7>; local-mac-address = [00 00 00 01 00 5f]; status = "disabled"; diff --git a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts index 7dfe1c085966..62f33fc84e3e 100644 --- a/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts +++ b/arch/arm64/boot/dts/nvidia/tegra132-norrin.dts @@ -12,6 +12,8 @@ rtc1 = "/rtc@0,7000e000"; }; + chosen { }; + memory { device_type = "memory"; reg = <0x0 0x80000000 0x0 0x80000000>; diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 007a69fc4f40..5f3ab8c1db55 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -121,6 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; asm volatile("// futex_atomic_cmpxchg_inatomic\n" +ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN) " prfm pstl1strm, %2\n" "1: ldxr %w1, %2\n" " sub %w3, %w1, %w4\n" @@ -137,6 +138,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " .align 3\n" " .quad 1b, 4b, 2b, 4b\n" " .popsection\n" +ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN) : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "r" (oldval), "r" (newval), "Ir" (-EFAULT) : "memory"); diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 738a95f93e49..bef6e9243c63 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -182,6 +182,7 @@ #define CPTR_EL2_TCPAC (1 << 31) #define CPTR_EL2_TTA (1 << 20) #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) +#define CPTR_EL2_DEFAULT 0x000033ff /* Hyp Debug Configuration Register bits */ #define MDCR_EL2_TDRA (1 << 11) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 3066328cd86b..779a5872a2c5 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu) static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) { - u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; + u32 mode; - if (vcpu_mode_is_32bit(vcpu)) + if (vcpu_mode_is_32bit(vcpu)) { + mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK; return mode > COMPAT_PSR_MODE_USR; + } + + mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; return mode != PSR_MODE_EL0t; } diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 9b2f5a9d019d..ae615b9d9a55 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -39,6 +39,7 @@ #ifndef __ASSEMBLY__ +#include <linux/personality.h> /* for READ_IMPLIES_EXEC */ #include <asm/pgtable-types.h> extern void __cpu_clear_user_page(void *p, unsigned long user); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index ca8f5a5e2f96..f0e7bdfae134 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) write_sysreg(val, hcr_el2); /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ write_sysreg(1 << 15, hstr_el2); - write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2); + + val = CPTR_EL2_DEFAULT; + val |= CPTR_EL2_TTA | CPTR_EL2_TFP; + write_sysreg(val, cptr_el2); + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); } @@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) write_sysreg(HCR_RW, hcr_el2); write_sysreg(0, hstr_el2); write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2); - write_sysreg(0, cptr_el2); + write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); } static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 648112e90ed5..4d1ac81870d2 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -27,7 +27,11 @@ #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \ PSR_I_BIT | PSR_D_BIT) -#define EL1_EXCEPT_SYNC_OFFSET 0x200 + +#define CURRENT_EL_SP_EL0_VECTOR 0x0 +#define CURRENT_EL_SP_ELx_VECTOR 0x200 +#define LOWER_EL_AArch64_VECTOR 0x400 +#define LOWER_EL_AArch32_VECTOR 0x600 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) { @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, *fsr = 0x14; } +enum exception_type { + except_type_sync = 0, + except_type_irq = 0x80, + except_type_fiq = 0x100, + except_type_serror = 0x180, +}; + +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type) +{ + u64 exc_offset; + + switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) { + case PSR_MODE_EL1t: + exc_offset = CURRENT_EL_SP_EL0_VECTOR; + break; + case PSR_MODE_EL1h: + exc_offset = CURRENT_EL_SP_ELx_VECTOR; + break; + case PSR_MODE_EL0t: + exc_offset = LOWER_EL_AArch64_VECTOR; + break; + default: + exc_offset = LOWER_EL_AArch32_VECTOR; + } + + return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type; +} + static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) { unsigned long cpsr = *vcpu_cpsr(vcpu); @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr *vcpu_spsr(vcpu) = cpsr; *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); + *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync); *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; - *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; vcpu_sys_reg(vcpu, FAR_EL1) = addr; @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu) *vcpu_spsr(vcpu) = cpsr; *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); + *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync); *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64; - *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET; /* * Build an unknown exception, depending on the instruction diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index eec3598b4184..2e90371cfb37 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu, if (likely(r->access(vcpu, params, r))) { /* Skip instruction, since it was emulated */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + /* Handled */ + return 0; } - - /* Handled */ - return 0; } /* Not handled */ @@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, } /** - * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access + * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ @@ -1095,7 +1094,7 @@ out: } /** - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access + * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index cf6240741134..0795c3a36d8f 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> +#include <linux/vmalloc.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> @@ -44,6 +45,7 @@ static int change_memory_common(unsigned long addr, int numpages, unsigned long end = start + size; int ret; struct page_change_data data; + struct vm_struct *area; if (!PAGE_ALIGNED(addr)) { start &= PAGE_MASK; @@ -51,10 +53,23 @@ static int change_memory_common(unsigned long addr, int numpages, WARN_ON_ONCE(1); } - if (start < MODULES_VADDR || start >= MODULES_END) - return -EINVAL; - - if (end < MODULES_VADDR || end >= MODULES_END) + /* + * Kernel VA mappings are always live, and splitting live section + * mappings into page mappings may cause TLB conflicts. This means + * we have to ensure that changing the permission bits of the range + * we are operating on does not result in such splitting. + * + * Let's restrict ourselves to mappings created by vmalloc (or vmap). + * Those are guaranteed to consist entirely of page mappings, and + * splitting is never needed. + * + * So check whether the [addr, addr + size) interval is entirely + * covered by precisely one VM area that has the VM_ALLOC flag set. + */ + area = find_vm_area((void *)addr); + if (!area || + end > (unsigned long)area->addr + area->size || + !(area->flags & VM_ALLOC)) return -EINVAL; if (!numpages) diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 836ac5a963c8..2841c0a3fd3b 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -276,6 +276,7 @@ source "kernel/Kconfig.preempt" config SMP bool "Symmetric multi-processing support" + depends on MMU ---help--- This enables support for systems with more than one CPU. If you have a system with only one CPU, say N. If you have a system with more diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi index 459b9b252c3b..d61b1616b604 100644 --- a/arch/mips/boot/dts/brcm/bcm6328.dtsi +++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi @@ -74,6 +74,7 @@ timer: timer@10000040 { compatible = "syscon"; reg = <0x10000040 0x2c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi index 4fc7ecee273c..1a7efa883c5e 100644 --- a/arch/mips/boot/dts/brcm/bcm7125.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi @@ -98,6 +98,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7125-sun-top-ctrl", "syscon"; reg = <0x404000 0x60c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi index a3039bb53477..d4bf52cfcf17 100644 --- a/arch/mips/boot/dts/brcm/bcm7346.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi @@ -118,6 +118,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7346-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi index 4274ff41ec21..8e2501694d03 100644 --- a/arch/mips/boot/dts/brcm/bcm7358.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi @@ -112,6 +112,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7358-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi index 0dcc9163c27b..7e5f76040fb8 100644 --- a/arch/mips/boot/dts/brcm/bcm7360.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi @@ -112,6 +112,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7360-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi index 2f3f9fc2c478..c739ea77acb0 100644 --- a/arch/mips/boot/dts/brcm/bcm7362.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi @@ -118,6 +118,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7362-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi index bee221b3b568..5f55d0a50a28 100644 --- a/arch/mips/boot/dts/brcm/bcm7420.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi @@ -99,6 +99,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7420-sun-top-ctrl", "syscon"; reg = <0x404000 0x60c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi index 571f30f52e3f..e24d41ab4e30 100644 --- a/arch/mips/boot/dts/brcm/bcm7425.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi @@ -100,6 +100,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7425-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi index 614ee211f71a..8b9432cc062b 100644 --- a/arch/mips/boot/dts/brcm/bcm7435.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi @@ -114,6 +114,7 @@ sun_top_ctrl: syscon@404000 { compatible = "brcm,bcm7425-sun-top-ctrl", "syscon"; reg = <0x404000 0x51c>; + little-endian; }; reboot { diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h index e13d41c392ae..f878bec23576 100644 --- a/arch/um/include/asm/page.h +++ b/arch/um/include/asm/page.h @@ -34,21 +34,18 @@ struct page; #if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT) -typedef struct { unsigned long pte_low, pte_high; } pte_t; +typedef struct { unsigned long pte; } pte_t; typedef struct { unsigned long pmd; } pmd_t; typedef struct { unsigned long pgd; } pgd_t; -#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32)) - -#define pte_get_bits(pte, bits) ((pte).pte_low & (bits)) -#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits)) -#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits)) -#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \ - smp_wmb(); \ - (to).pte_low = (from).pte_low; }) -#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high) -#define pte_set_val(pte, phys, prot) \ - ({ (pte).pte_high = (phys) >> 32; \ - (pte).pte_low = (phys) | pgprot_val(prot); }) +#define pte_val(p) ((p).pte) + +#define pte_get_bits(p, bits) ((p).pte & (bits)) +#define pte_set_bits(p, bits) ((p).pte |= (bits)) +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits)) +#define pte_copy(to, from) ({ (to).pte = (from).pte; }) +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE)) +#define pte_set_val(p, phys, prot) \ + ({ (p).pte = (phys) | pgprot_val(prot); }) #define pmd_val(x) ((x).pmd) #define __pmd(x) ((pmd_t) { (x) } ) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9af2e6338400..ab2ed5328f0a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -475,6 +475,7 @@ config X86_UV depends on X86_64 depends on X86_EXTENDED_PLATFORM depends on NUMA + depends on EFI depends on X86_X2APIC depends on PCI ---help--- diff --git a/arch/x86/crypto/chacha20-ssse3-x86_64.S b/arch/x86/crypto/chacha20-ssse3-x86_64.S index 712b13047b41..3a33124e9112 100644 --- a/arch/x86/crypto/chacha20-ssse3-x86_64.S +++ b/arch/x86/crypto/chacha20-ssse3-x86_64.S @@ -157,7 +157,9 @@ ENTRY(chacha20_4block_xor_ssse3) # done with the slightly better performing SSSE3 byte shuffling, # 7/12-bit word rotation uses traditional shift+OR. - sub $0x40,%rsp + mov %rsp,%r11 + sub $0x80,%rsp + and $~63,%rsp # x0..15[0-3] = s0..3[0..3] movq 0x00(%rdi),%xmm1 @@ -620,6 +622,6 @@ ENTRY(chacha20_4block_xor_ssse3) pxor %xmm1,%xmm15 movdqu %xmm15,0xf0(%rsi) - add $0x40,%rsp + mov %r11,%rsp ret ENDPROC(chacha20_4block_xor_ssse3) diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 42982b26e32b..740d7ac03a55 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -173,10 +173,10 @@ static __init int setup_hugepagesz(char *opt) } __setup("hugepagesz=", setup_hugepagesz); -#ifdef CONFIG_CMA +#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) static __init int gigantic_pages_init(void) { - /* With CMA we can allocate gigantic pages at runtime */ + /* With compaction or CMA we can allocate gigantic pages at runtime */ if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT)) hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); return 0; diff --git a/block/blk-core.c b/block/blk-core.c index ab51685988c2..b83d29755b5a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -2455,14 +2455,16 @@ struct request *blk_peek_request(struct request_queue *q) rq = NULL; break; - } else if (ret == BLKPREP_KILL) { + } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) { + int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO; + rq->cmd_flags |= REQ_QUIET; /* * Mark this request as started so we don't trigger * any debug logic in the end I/O path. */ blk_start_request(rq); - __blk_end_request_all(rq, -EIO); + __blk_end_request_all(rq, err); } else { printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); break; diff --git a/block/ioctl.c b/block/ioctl.c index 77f5d17779d6..d8996bbd7f12 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -434,42 +434,6 @@ bool blkdev_dax_capable(struct block_device *bdev) return true; } - -static int blkdev_daxset(struct block_device *bdev, unsigned long argp) -{ - unsigned long arg; - int rc = 0; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - - if (get_user(arg, (int __user *)(argp))) - return -EFAULT; - arg = !!arg; - if (arg == !!(bdev->bd_inode->i_flags & S_DAX)) - return 0; - - if (arg) - arg = S_DAX; - - if (arg && !blkdev_dax_capable(bdev)) - return -ENOTTY; - - inode_lock(bdev->bd_inode); - if (bdev->bd_map_count == 0) - inode_set_flags(bdev->bd_inode, arg, S_DAX); - else - rc = -EBUSY; - inode_unlock(bdev->bd_inode); - return rc; -} -#else -static int blkdev_daxset(struct block_device *bdev, int arg) -{ - if (arg) - return -ENOTTY; - return 0; -} #endif static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode, @@ -634,8 +598,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case BLKTRACESETUP: case BLKTRACETEARDOWN: return blk_trace_ioctl(bdev, cmd, argp); - case BLKDAXSET: - return blkdev_daxset(bdev, arg); case BLKDAXGET: return put_int(arg, !!(bdev->bd_inode->i_flags & S_DAX)); break; diff --git a/block/partition-generic.c b/block/partition-generic.c index 746935a5973c..fefd01b496a0 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -16,6 +16,7 @@ #include <linux/kmod.h> #include <linux/ctype.h> #include <linux/genhd.h> +#include <linux/dax.h> #include <linux/blktrace_api.h> #include "partitions/check.h" @@ -550,13 +551,24 @@ int invalidate_partitions(struct gendisk *disk, struct block_device *bdev) return 0; } -unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) +static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n) { struct address_space *mapping = bdev->bd_inode->i_mapping; + + return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), + NULL); +} + +unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) +{ struct page *page; - page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), - NULL); + /* don't populate page cache for dax capable devices */ + if (IS_DAX(bdev->bd_inode)) + page = read_dax_sector(bdev, n); + else + page = read_pagecache_sector(bdev, n); + if (!IS_ERR(page)) { if (PageError(page)) goto fail; diff --git a/crypto/Kconfig b/crypto/Kconfig index 7240821137fd..3be07ad1d80d 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -472,11 +472,13 @@ config CRYPTO_CRCT10DIF_PCLMUL config CRYPTO_GHASH tristate "GHASH digest algorithm" select CRYPTO_GF128MUL + select CRYPTO_HASH help GHASH is message digest algorithm for GCM (Galois/Counter Mode). config CRYPTO_POLY1305 tristate "Poly1305 authenticator algorithm" + select CRYPTO_HASH help Poly1305 authenticator algorithm, RFC7539. diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 608a7562839d..68a5ceaa04c8 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -54,7 +54,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg, lock_sock(sk); if (!ctx->more) { - err = crypto_ahash_init(&ctx->req); + err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), + &ctx->completion); if (err) goto unlock; } @@ -125,6 +126,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page, } else { if (!ctx->more) { err = crypto_ahash_init(&ctx->req); + err = af_alg_wait_for_completion(err, &ctx->completion); if (err) goto unlock; } diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 38c1aa89d3a0..28556fce4267 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -65,18 +65,10 @@ struct skcipher_async_req { struct skcipher_async_rsgl first_sgl; struct list_head list; struct scatterlist *tsg; - char iv[]; + atomic_t *inflight; + struct skcipher_request req; }; -#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \ - crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))) - -#define GET_REQ_SIZE(ctx) \ - crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)) - -#define GET_IV_SIZE(ctx) \ - crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req)) - #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ sizeof(struct scatterlist) - 1) @@ -102,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq) static void skcipher_async_cb(struct crypto_async_request *req, int err) { - struct sock *sk = req->data; - struct alg_sock *ask = alg_sk(sk); - struct skcipher_ctx *ctx = ask->private; - struct skcipher_async_req *sreq = GET_SREQ(req, ctx); + struct skcipher_async_req *sreq = req->data; struct kiocb *iocb = sreq->iocb; - atomic_dec(&ctx->inflight); + atomic_dec(sreq->inflight); skcipher_free_async_sgls(sreq); - kfree(req); + kzfree(sreq); iocb->ki_complete(iocb, err, err); } @@ -306,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); struct skcipher_ctx *ctx = ask->private; - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req); + struct skcipher_tfm *skc = pask->private; + struct crypto_skcipher *tfm = skc->skcipher; unsigned ivsize = crypto_skcipher_ivsize(tfm); struct skcipher_sg_list *sgl; struct af_alg_control con = {}; @@ -509,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); struct skcipher_ctx *ctx = ask->private; + struct skcipher_tfm *skc = pask->private; + struct crypto_skcipher *tfm = skc->skcipher; struct skcipher_sg_list *sgl; struct scatterlist *sg; struct skcipher_async_req *sreq; struct skcipher_request *req; struct skcipher_async_rsgl *last_rsgl = NULL; - unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); - unsigned int reqlen = sizeof(struct skcipher_async_req) + - GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); + unsigned int txbufs = 0, len = 0, tx_nents; + unsigned int reqsize = crypto_skcipher_reqsize(tfm); + unsigned int ivsize = crypto_skcipher_ivsize(tfm); int err = -ENOMEM; bool mark = false; + char *iv; - lock_sock(sk); - req = kmalloc(reqlen, GFP_KERNEL); - if (unlikely(!req)) - goto unlock; + sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL); + if (unlikely(!sreq)) + goto out; - sreq = GET_SREQ(req, ctx); + req = &sreq->req; + iv = (char *)(req + 1) + reqsize; sreq->iocb = msg->msg_iocb; - memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl)); INIT_LIST_HEAD(&sreq->list); + sreq->inflight = &ctx->inflight; + + lock_sock(sk); + tx_nents = skcipher_all_sg_nents(ctx); sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL); - if (unlikely(!sreq->tsg)) { - kfree(req); + if (unlikely(!sreq->tsg)) goto unlock; - } sg_init_table(sreq->tsg, tx_nents); - memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); - skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req)); - skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, - skcipher_async_cb, sk); + memcpy(iv, ctx->iv, ivsize); + skcipher_request_set_tfm(req, tfm); + skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, + skcipher_async_cb, sreq); while (iov_iter_count(&msg->msg_iter)) { struct skcipher_async_rsgl *rsgl; @@ -615,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg, sg_mark_end(sreq->tsg + txbufs - 1); skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg, - len, sreq->iv); + len, iv); err = ctx->enc ? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); if (err == -EINPROGRESS) { atomic_inc(&ctx->inflight); err = -EIOCBQUEUED; + sreq = NULL; goto unlock; } free: skcipher_free_async_sgls(sreq); - kfree(req); unlock: skcipher_wmem_wakeup(sk); release_sock(sk); + kzfree(sreq); +out: return err; } @@ -637,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg, { struct sock *sk = sock->sk; struct alg_sock *ask = alg_sk(sk); + struct sock *psk = ask->parent; + struct alg_sock *pask = alg_sk(psk); struct skcipher_ctx *ctx = ask->private; - unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm( - &ctx->req)); + struct skcipher_tfm *skc = pask->private; + struct crypto_skcipher *tfm = skc->skcipher; + unsigned bs = crypto_skcipher_blocksize(tfm); struct skcipher_sg_list *sgl; struct scatterlist *sg; int err = -EAGAIN; @@ -947,7 +950,8 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk) ask->private = ctx; skcipher_request_set_tfm(&ctx->req, skcipher); - skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, + skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP | + CRYPTO_TFM_REQ_MAY_BACKLOG, af_alg_complete, &ctx->completion); sk->sk_destruct = skcipher_sock_destruct; diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index 758acabf2d81..8f3056cd0399 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -547,9 +547,7 @@ int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen, struct pkcs7_signed_info *sinfo = ctx->sinfo; if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) || - !test_bit(sinfo_has_message_digest, &sinfo->aa_set) || - (ctx->msg->data_type == OID_msIndirectData && - !test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))) { + !test_bit(sinfo_has_message_digest, &sinfo->aa_set)) { pr_warn("Missing required AuthAttr\n"); return -EBADMSG; } diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c index 237f3795cfaa..43fe85f20d57 100644 --- a/crypto/crypto_user.c +++ b/crypto/crypto_user.c @@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (link->dump == NULL) return -EINVAL; + down_read(&crypto_alg_sem); list_for_each_entry(alg, &crypto_alg_list, cra_list) dump_alloc += CRYPTO_REPORT_MAXSIZE; @@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) .done = link->done, .min_dump_alloc = dump_alloc, }; - return netlink_dump_start(crypto_nlsk, skb, nlh, &c); + err = netlink_dump_start(crypto_nlsk, skb, nlh, &c); } + up_read(&crypto_alg_sem); + + return err; } err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX, diff --git a/crypto/shash.c b/crypto/shash.c index 88a27de79848..359754591653 100644 --- a/crypto/shash.c +++ b/crypto/shash.c @@ -354,11 +354,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm) crt->final = shash_async_final; crt->finup = shash_async_finup; crt->digest = shash_async_digest; + crt->setkey = shash_async_setkey; + + crt->has_setkey = alg->setkey != shash_no_setkey; - if (alg->setkey) { - crt->setkey = shash_async_setkey; - crt->has_setkey = true; - } if (alg->export) crt->export = shash_async_export; if (alg->import) diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index c570b1d9f094..0872d5fecb82 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -880,7 +880,7 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb, break; case BUS_NOTIFY_DRIVER_NOT_BOUND: case BUS_NOTIFY_UNBOUND_DRIVER: - pdev->dev.pm_domain = NULL; + dev_pm_domain_set(&pdev->dev, NULL); break; case BUS_NOTIFY_ADD_DEVICE: dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 594fcabd22cd..546a3692774f 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ + { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */ + { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */ { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index a4faa438889c..a44c75d4c284 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -250,6 +250,7 @@ enum { AHCI_HFLAG_MULTI_MSI = 0, AHCI_HFLAG_MULTI_MSIX = 0, #endif + AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */ /* ap->flags bits */ diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c index b36cae2fd04b..e87bcec0fd7c 100644 --- a/drivers/ata/ahci_brcmstb.c +++ b/drivers/ata/ahci_brcmstb.c @@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (IS_ERR(hpriv)) return PTR_ERR(hpriv); hpriv->plat_data = priv; + hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP; brcm_sata_alpm_init(hpriv); diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index d61740e78d6d..402967902cbe 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -496,8 +496,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) } } - /* fabricate port_map from cap.nr_ports */ - if (!port_map) { + /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */ + if (!port_map && vers < 0x10300) { port_map = (1 << ahci_nr_ports(cap)) - 1; dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map); @@ -593,8 +593,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine); int ahci_stop_engine(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); + struct ahci_host_priv *hpriv = ap->host->private_data; u32 tmp; + /* + * On some controllers, stopping a port's DMA engine while the port + * is in ALPM state (partial or slumber) results in failures on + * subsequent DMA engine starts. For those controllers, put the + * port back in active state before stopping its DMA engine. + */ + if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) && + (ap->link.lpm_policy > ATA_LPM_MAX_POWER) && + ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) { + dev_err(ap->host->dev, "Failed to wake up port before engine stop\n"); + return -EIO; + } + tmp = readl(port_mmio + PORT_CMD); /* check if the HBA is idle */ @@ -689,6 +703,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, void __iomem *port_mmio = ahci_port_base(ap); if (policy != ATA_LPM_MAX_POWER) { + /* wakeup flag only applies to the max power policy */ + hints &= ~ATA_LPM_WAKE_ONLY; + /* * Disable interrupts on Phy Ready. This keeps us from * getting woken up due to spurious phy ready @@ -704,7 +721,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, u32 cmd = readl(port_mmio + PORT_CMD); if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) { - cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); + if (!(hints & ATA_LPM_WAKE_ONLY)) + cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); cmd |= PORT_CMD_ICC_ACTIVE; writel(cmd, port_mmio + PORT_CMD); @@ -712,6 +730,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, /* wait 10ms to be sure we've come out of LPM state */ ata_msleep(ap, 10); + + if (hints & ATA_LPM_WAKE_ONLY) + return 0; } else { cmd |= PORT_CMD_ALPE; if (policy == ATA_LPM_MIN_POWER) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index cbb74719d2c1..55e257c268dd 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, + { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, /* Odd clown on sil3726/4726 PMPs */ { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index cdf6215a9a22..051b6158d1b7 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap, static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) { struct ata_port *ap = qc->ap; - unsigned long flags; if (ap->ops->error_handler) { if (in_wq) { - spin_lock_irqsave(ap->lock, flags); - /* EH might have kicked in while host lock is * released. */ @@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) } else ata_port_freeze(ap); } - - spin_unlock_irqrestore(ap->lock, flags); } else { if (likely(!(qc->err_mask & AC_ERR_HSM))) ata_qc_complete(qc); @@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) } } else { if (in_wq) { - spin_lock_irqsave(ap->lock, flags); ata_sff_irq_on(ap); ata_qc_complete(qc); - spin_unlock_irqrestore(ap->lock, flags); } else ata_qc_complete(qc); } @@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, { struct ata_link *link = qc->dev->link; struct ata_eh_info *ehi = &link->eh_info; - unsigned long flags = 0; int poll_next; + lockdep_assert_held(ap->lock); + WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); /* Make sure ata_sff_qc_issue() does not throw things @@ -1112,14 +1106,6 @@ fsm_start: } } - /* Send the CDB (atapi) or the first data block (ata pio out). - * During the state transition, interrupt handler shouldn't - * be invoked before the data transfer is complete and - * hsm_task_state is changed. Hence, the following locking. - */ - if (in_wq) - spin_lock_irqsave(ap->lock, flags); - if (qc->tf.protocol == ATA_PROT_PIO) { /* PIO data out protocol. * send first data block. @@ -1135,9 +1121,6 @@ fsm_start: /* send CDB */ atapi_send_cdb(ap, qc); - if (in_wq) - spin_unlock_irqrestore(ap->lock, flags); - /* if polling, ata_sff_pio_task() handles the rest. * otherwise, interrupt handler takes over from here. */ @@ -1296,7 +1279,8 @@ fsm_start: break; default: poll_next = 0; - BUG(); + WARN(true, "ata%d: SFF host state machine in invalid state %d", + ap->print_id, ap->hsm_task_state); } return poll_next; @@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work) u8 status; int poll_next; + spin_lock_irq(ap->lock); + BUG_ON(ap->sff_pio_task_link == NULL); /* qc can be NULL if timeout occurred */ qc = ata_qc_from_tag(ap, link->active_tag); if (!qc) { ap->sff_pio_task_link = NULL; - return; + goto out_unlock; } fsm_start: @@ -1381,11 +1367,14 @@ fsm_start: */ status = ata_sff_busy_wait(ap, ATA_BUSY, 5); if (status & ATA_BUSY) { + spin_unlock_irq(ap->lock); ata_msleep(ap, 2); + spin_lock_irq(ap->lock); + status = ata_sff_busy_wait(ap, ATA_BUSY, 10); if (status & ATA_BUSY) { ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); - return; + goto out_unlock; } } @@ -1402,6 +1391,8 @@ fsm_start: */ if (poll_next) goto fsm_start; +out_unlock: + spin_unlock_irq(ap->lock); } /** diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c index 93ed14cc2252..f6a9ad52cbbf 100644 --- a/drivers/base/power/common.c +++ b/drivers/base/power/common.c @@ -146,7 +146,7 @@ void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd) if (dev->pm_domain == pd) return; - WARN(device_is_bound(dev), + WARN(pd && device_is_bound(dev), "PM domains can only be changed for unbound devices\n"); dev->pm_domain = pd; device_pm_check_callbacks(dev); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 784dbe897a5e..301b785f9f56 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -173,14 +173,14 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) } /** - * __genpd_poweron - Restore power to a given PM domain and its masters. + * genpd_poweron - Restore power to a given PM domain and its masters. * @genpd: PM domain to power up. * @depth: nesting count for lockdep. * * Restore power to @genpd and all of its masters so that it is possible to * resume a device belonging to it. */ -static int __genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) +static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) { struct gpd_link *link; int ret = 0; @@ -200,7 +200,7 @@ static int __genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) genpd_sd_counter_inc(master); mutex_lock_nested(&master->lock, depth + 1); - ret = __genpd_poweron(master, depth + 1); + ret = genpd_poweron(master, depth + 1); mutex_unlock(&master->lock); if (ret) { @@ -227,21 +227,6 @@ static int __genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth) return ret; } -/** - * genpd_poweron - Restore power to a given PM domain and its masters. - * @genpd: PM domain to power up. - */ -static int genpd_poweron(struct generic_pm_domain *genpd) -{ - int ret; - - mutex_lock(&genpd->lock); - ret = __genpd_poweron(genpd, 0); - mutex_unlock(&genpd->lock); - return ret; -} - - static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_CALLBACK(genpd, int, save_state, dev); @@ -489,7 +474,7 @@ static int pm_genpd_runtime_resume(struct device *dev) } mutex_lock(&genpd->lock); - ret = __genpd_poweron(genpd, 0); + ret = genpd_poweron(genpd, 0); mutex_unlock(&genpd->lock); if (ret) @@ -1821,8 +1806,10 @@ int genpd_dev_pm_attach(struct device *dev) dev->pm_domain->detach = genpd_dev_pm_detach; dev->pm_domain->sync = genpd_dev_pm_sync; - ret = genpd_poweron(pd); + mutex_lock(&pd->lock); + ret = genpd_poweron(pd, 0); + mutex_unlock(&pd->lock); out: return ret ? -EPROBE_DEFER : 0; } diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c index 8812bfb9e3b8..eea51569f0eb 100644 --- a/drivers/base/regmap/regmap-mmio.c +++ b/drivers/base/regmap/regmap-mmio.c @@ -133,17 +133,17 @@ static int regmap_mmio_gather_write(void *context, while (val_size) { switch (ctx->val_bytes) { case 1: - __raw_writeb(*(u8 *)val, ctx->regs + offset); + writeb(*(u8 *)val, ctx->regs + offset); break; case 2: - __raw_writew(*(u16 *)val, ctx->regs + offset); + writew(*(u16 *)val, ctx->regs + offset); break; case 4: - __raw_writel(*(u32 *)val, ctx->regs + offset); + writel(*(u32 *)val, ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: - __raw_writeq(*(u64 *)val, ctx->regs + offset); + writeq(*(u64 *)val, ctx->regs + offset); break; #endif default: @@ -193,17 +193,17 @@ static int regmap_mmio_read(void *context, while (val_size) { switch (ctx->val_bytes) { case 1: - *(u8 *)val = __raw_readb(ctx->regs + offset); + *(u8 *)val = readb(ctx->regs + offset); break; case 2: - *(u16 *)val = __raw_readw(ctx->regs + offset); + *(u16 *)val = readw(ctx->regs + offset); break; case 4: - *(u32 *)val = __raw_readl(ctx->regs + offset); + *(u32 *)val = readl(ctx->regs + offset); break; #ifdef CONFIG_64BIT case 8: - *(u64 *)val = __raw_readq(ctx->regs + offset); + *(u64 *)val = readq(ctx->regs + offset); break; #endif default: diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 129d47bcc5fc..9a92c072a485 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -132,7 +132,7 @@ config SUNXI_RSB and AC100/AC200 ICs. config UNIPHIER_SYSTEM_BUS - bool "UniPhier System Bus driver" + tristate "UniPhier System Bus driver" depends on ARCH_UNIPHIER && OF default y help diff --git a/drivers/bus/vexpress-config.c b/drivers/bus/vexpress-config.c index 6575c0fe6a4e..c3cb76b363c6 100644 --- a/drivers/bus/vexpress-config.c +++ b/drivers/bus/vexpress-config.c @@ -192,8 +192,10 @@ static int __init vexpress_config_init(void) /* Need the config devices early, before the "normal" devices... */ for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") { err = vexpress_config_populate(node); - if (err) + if (err) { + of_node_put(node); break; + } } return err; diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index dbf22719462f..ff00331bff49 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -372,6 +372,7 @@ config HW_RANDOM_XGENE config HW_RANDOM_STM32 tristate "STMicroelectronics STM32 random number generator" depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST) + depends on HAS_IOMEM help This driver provides kernel-side support for the Random Number Generator hardware found on STM32 microcontrollers. diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 9fda22e3387e..7fddd8696211 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -68,6 +68,7 @@ #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/acpi.h> #ifdef CONFIG_PARISC #include <asm/hardware.h> /* for register_parisc_driver() stuff */ @@ -2054,8 +2055,6 @@ static int hardcode_find_bmc(void) #ifdef CONFIG_ACPI -#include <linux/acpi.h> - /* * Once we get an ACPI failure, we don't try any more, because we go * through the tables sequentially. Once we don't find a table, there diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c index 6dd3317ca365..3eb3f1279fb7 100644 --- a/drivers/crypto/atmel-aes.c +++ b/drivers/crypto/atmel-aes.c @@ -400,7 +400,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd) { int err; - err = clk_prepare_enable(dd->iclk); + err = clk_enable(dd->iclk); if (err) return err; @@ -430,7 +430,7 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd) dev_info(dd->dev, "version: 0x%x\n", dd->hw_version); - clk_disable_unprepare(dd->iclk); + clk_disable(dd->iclk); return 0; } @@ -448,7 +448,7 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd) static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) { - clk_disable_unprepare(dd->iclk); + clk_disable(dd->iclk); dd->flags &= ~AES_FLAGS_BUSY; if (dd->is_async) @@ -2091,10 +2091,14 @@ static int atmel_aes_probe(struct platform_device *pdev) goto res_err; } - err = atmel_aes_hw_version_init(aes_dd); + err = clk_prepare(aes_dd->iclk); if (err) goto res_err; + err = atmel_aes_hw_version_init(aes_dd); + if (err) + goto iclk_unprepare; + atmel_aes_get_cap(aes_dd); err = atmel_aes_buff_init(aes_dd); @@ -2127,6 +2131,8 @@ err_algs: err_aes_dma: atmel_aes_buff_cleanup(aes_dd); err_aes_buff: +iclk_unprepare: + clk_unprepare(aes_dd->iclk); res_err: tasklet_kill(&aes_dd->done_task); tasklet_kill(&aes_dd->queue_task); @@ -2155,6 +2161,8 @@ static int atmel_aes_remove(struct platform_device *pdev) atmel_aes_dma_cleanup(aes_dd); atmel_aes_buff_cleanup(aes_dd); + clk_unprepare(aes_dd->iclk); + return 0; } diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index 20de861aa0ea..8bf9914d4d15 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -782,7 +782,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err) dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); - clk_disable_unprepare(dd->iclk); + clk_disable(dd->iclk); if (req->base.complete) req->base.complete(&req->base, err); @@ -795,7 +795,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd) { int err; - err = clk_prepare_enable(dd->iclk); + err = clk_enable(dd->iclk); if (err) return err; @@ -822,7 +822,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) dev_info(dd->dev, "version: 0x%x\n", dd->hw_version); - clk_disable_unprepare(dd->iclk); + clk_disable(dd->iclk); } static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, @@ -1410,6 +1410,10 @@ static int atmel_sha_probe(struct platform_device *pdev) goto res_err; } + err = clk_prepare(sha_dd->iclk); + if (err) + goto res_err; + atmel_sha_hw_version_init(sha_dd); atmel_sha_get_cap(sha_dd); @@ -1421,12 +1425,12 @@ static int atmel_sha_probe(struct platform_device *pdev) if (IS_ERR(pdata)) { dev_err(&pdev->dev, "platform data not available\n"); err = PTR_ERR(pdata); - goto res_err; + goto iclk_unprepare; } } if (!pdata->dma_slave) { err = -ENXIO; - goto res_err; + goto iclk_unprepare; } err = atmel_sha_dma_init(sha_dd, pdata); if (err) @@ -1457,6 +1461,8 @@ err_algs: if (sha_dd->caps.has_dma) atmel_sha_dma_cleanup(sha_dd); err_sha_dma: +iclk_unprepare: + clk_unprepare(sha_dd->iclk); res_err: tasklet_kill(&sha_dd->done_task); sha_dd_err: @@ -1483,12 +1489,7 @@ static int atmel_sha_remove(struct platform_device *pdev) if (sha_dd->caps.has_dma) atmel_sha_dma_cleanup(sha_dd); - iounmap(sha_dd->io_base); - - clk_put(sha_dd->iclk); - - if (sha_dd->irq >= 0) - free_irq(sha_dd->irq, sha_dd); + clk_unprepare(sha_dd->iclk); return 0; } diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index 8abb4bc548cc..69d4a1326fee 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -534,8 +534,8 @@ static int caam_probe(struct platform_device *pdev) * long pointers in master configuration register */ clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH | - MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ? - MCFGR_LONG_PTR : 0)); + MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | + (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0)); /* * Read the Compile Time paramters and SCFGR to determine diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index 0643e3366e33..c0656e7f37b5 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa) return -ENOMEM; dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); - if (!dma->cache_pool) + if (!dma->padding_pool) return -ENOMEM; cesa->dma = dma; diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 2aeaebd1c6e7..3f87a03abc22 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c @@ -312,8 +312,8 @@ static int altera_gpio_probe(struct platform_device *pdev) handle_simple_irq, IRQ_TYPE_NONE); if (ret) { - dev_info(&pdev->dev, "could not add irqchip\n"); - return ret; + dev_err(&pdev->dev, "could not add irqchip\n"); + goto teardown; } gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc, @@ -326,6 +326,7 @@ static int altera_gpio_probe(struct platform_device *pdev) skip_irq: return 0; teardown: + of_mm_gpiochip_remove(&altera_gc->mmchip); pr_err("%s: registration failed with status %d\n", node->full_name, ret); diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index ec58f4288649..cd007a67b302 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -195,7 +195,7 @@ static int davinci_gpio_of_xlate(struct gpio_chip *gc, static int davinci_gpio_probe(struct platform_device *pdev) { int i, base; - unsigned ngpio; + unsigned ngpio, nbank; struct davinci_gpio_controller *chips; struct davinci_gpio_platform_data *pdata; struct davinci_gpio_regs __iomem *regs; @@ -224,8 +224,9 @@ static int davinci_gpio_probe(struct platform_device *pdev) if (WARN_ON(ARCH_NR_GPIOS < ngpio)) ngpio = ARCH_NR_GPIOS; + nbank = DIV_ROUND_UP(ngpio, 32); chips = devm_kzalloc(dev, - ngpio * sizeof(struct davinci_gpio_controller), + nbank * sizeof(struct davinci_gpio_controller), GFP_KERNEL); if (!chips) return -ENOMEM; @@ -511,7 +512,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev) return irq; } - irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0, + irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0, &davinci_gpio_irq_ops, chips); if (!irq_domain) { diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 66f729eaf00b..20c9539abc36 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o # add asic specific block -amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \ +amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \ ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \ amdgpu_amdkfd_gfx_v7.o @@ -34,6 +34,7 @@ amdgpu-y += \ # add GMC block amdgpu-y += \ + gmc_v7_0.o \ gmc_v8_0.o # add IH block diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 82edf95b7740..5e7770f9a415 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -87,6 +87,8 @@ extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; extern int amdgpu_enable_semaphores; extern int amdgpu_powerplay; +extern unsigned amdgpu_pcie_gen_cap; +extern unsigned amdgpu_pcie_lane_cap; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -132,47 +134,6 @@ extern int amdgpu_powerplay; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) -/* CG block flags */ -#define AMDGPU_CG_BLOCK_GFX (1 << 0) -#define AMDGPU_CG_BLOCK_MC (1 << 1) -#define AMDGPU_CG_BLOCK_SDMA (1 << 2) -#define AMDGPU_CG_BLOCK_UVD (1 << 3) -#define AMDGPU_CG_BLOCK_VCE (1 << 4) -#define AMDGPU_CG_BLOCK_HDP (1 << 5) -#define AMDGPU_CG_BLOCK_BIF (1 << 6) - -/* CG flags */ -#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0) -#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1) -#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2) -#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3) -#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4) -#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5) -#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6) -#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7) -#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8) -#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9) -#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10) -#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11) -#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12) -#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13) -#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14) -#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15) -#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16) - -/* PG flags */ -#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0) -#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1) -#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2) -#define AMDGPU_PG_SUPPORT_UVD (1 << 3) -#define AMDGPU_PG_SUPPORT_VCE (1 << 4) -#define AMDGPU_PG_SUPPORT_CP (1 << 5) -#define AMDGPU_PG_SUPPORT_GDS (1 << 6) -#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7) -#define AMDGPU_PG_SUPPORT_SDMA (1 << 8) -#define AMDGPU_PG_SUPPORT_ACP (1 << 9) -#define AMDGPU_PG_SUPPORT_SAMU (1 << 10) - /* GFX current status */ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L #define AMDGPU_GFX_SAFE_MODE 0x00000001L @@ -606,8 +567,6 @@ struct amdgpu_sa_manager { uint32_t align; }; -struct amdgpu_sa_bo; - /* sub-allocation buffer */ struct amdgpu_sa_bo { struct list_head olist; @@ -2360,6 +2319,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, uint32_t flags); bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); +bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, + unsigned long end); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_mem_reg *mem); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 0e1376317683..362bedc9e507 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -154,7 +154,7 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_fw_version = get_fw_version }; -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions() +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) { return (struct kfd2kgd_calls *)&kfd2kgd; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 79fa5c7de856..04b744d64b57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -115,7 +115,7 @@ static const struct kfd2kgd_calls kfd2kgd = { .get_fw_version = get_fw_version }; -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions() +struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) { return (struct kfd2kgd_calls *)&kfd2kgd; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index a081dda9fa2f..7a4b101e10c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device, case CGS_SYSTEM_INFO_PCIE_MLW: sys_info->value = adev->pm.pcie_mlw_mask; break; + case CGS_SYSTEM_INFO_CG_FLAGS: + sys_info->value = adev->cg_flags; + break; + case CGS_SYSTEM_INFO_PG_FLAGS: + sys_info->value = adev->pg_flags; + break; default: return -ENODEV; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 65531463f88e..51bfc114584e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1795,15 +1795,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } /* post card */ - amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (!amdgpu_card_posted(adev)) + amdgpu_atom_asic_init(adev->mode_info.atom_context); r = amdgpu_resume(adev); + if (r) + DRM_ERROR("amdgpu_resume failed (%d).\n", r); amdgpu_fence_driver_resume(adev); - r = amdgpu_ib_ring_tests(adev); - if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); + if (resume) { + r = amdgpu_ib_ring_tests(adev); + if (r) + DRM_ERROR("ib ring test failed (%d).\n", r); + } r = amdgpu_late_init(adev); if (r) @@ -1933,80 +1938,97 @@ retry: return r; } +#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */ +#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */ + void amdgpu_get_pcie_info(struct amdgpu_device *adev) { u32 mask; int ret; - if (pci_is_root_bus(adev->pdev->bus)) - return; + if (amdgpu_pcie_gen_cap) + adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap; - if (amdgpu_pcie_gen2 == 0) - return; + if (amdgpu_pcie_lane_cap) + adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap; - if (adev->flags & AMD_IS_APU) + /* covers APUs as well */ + if (pci_is_root_bus(adev->pdev->bus)) { + if (adev->pm.pcie_gen_mask == 0) + adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; + if (adev->pm.pcie_mlw_mask == 0) + adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; return; + } - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (!ret) { - adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | - CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | - CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); - - if (mask & DRM_PCIE_SPEED_25) - adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; - if (mask & DRM_PCIE_SPEED_50) - adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; - if (mask & DRM_PCIE_SPEED_80) - adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; - } - ret = drm_pcie_get_max_link_width(adev->ddev, &mask); - if (!ret) { - switch (mask) { - case 32: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 16: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 12: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 8: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 4: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 2: - adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | - CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); - break; - case 1: - adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; - break; - default: - break; + if (adev->pm.pcie_gen_mask == 0) { + ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); + if (!ret) { + adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | + CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); + + if (mask & DRM_PCIE_SPEED_25) + adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; + if (mask & DRM_PCIE_SPEED_50) + adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; + if (mask & DRM_PCIE_SPEED_80) + adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; + } else { + adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK; + } + } + if (adev->pm.pcie_mlw_mask == 0) { + ret = drm_pcie_get_max_link_width(adev->ddev, &mask); + if (!ret) { + switch (mask) { + case 32: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 16: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 12: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 8: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 4: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 2: + adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | + CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); + break; + case 1: + adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; + break; + default: + break; + } + } else { + adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK; } } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b5dbbb573491..9ef1db87cf26 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -83,6 +83,8 @@ int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_enable_semaphores = 0; int amdgpu_powerplay = -1; +unsigned amdgpu_pcie_gen_cap = 0; +unsigned amdgpu_pcie_lane_cap = 0; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -170,6 +172,12 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = module_param_named(powerplay, amdgpu_powerplay, int, 0444); #endif +MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); +module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); + +MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))"); +module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444); + static struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ @@ -256,11 +264,11 @@ static struct pci_device_id pciidlist[] = { {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU}, #endif /* topaz */ - {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, + {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ}, /* tonga */ {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index b1969f2b2038..d4e2780c0796 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn, list_for_each_entry(bo, &node->bos, mn_list) { - if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, + end)) continue; r = amdgpu_bo_reserve(bo, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index a2a16acee34d..b8fbbd7699e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_cache.h> #include "amdgpu.h" #include "amdgpu_trace.h" @@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev, AMDGPU_GEM_DOMAIN_OA); bo->flags = flags; + + /* For architectures that don't support WC memory, + * mask out the WC flag from the BO + */ + if (!drm_arch_can_wc_memory()) + bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; + amdgpu_fill_placement_to_bo(bo, placement); /* Kernel allocation are uninterruptible */ r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8b88edb0434b..ca72a2e487b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) if (fences[i]) - fences[count++] = fences[i]; + fences[count++] = fence_get(fences[i]); if (count) { spin_unlock(&sa_manager->wq.lock); t = fence_wait_any_timeout(fences, count, false, MAX_SCHEDULE_TIMEOUT); + for (i = 0; i < count; ++i) + fence_put(fences[i]); + r = (t > 0) ? 0 : t; spin_lock(&sa_manager->wq.lock); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8a1752ff3d8e..6442a06d6fdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm) return !!gtt->userptr; } +bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, + unsigned long end) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + unsigned long size; + + if (gtt == NULL) + return false; + + if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr) + return false; + + size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; + if (gtt->userptr > end || gtt->userptr + size <= start) + return false; + + return true; +} + bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) { struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, flags |= AMDGPU_PTE_SNOOPED; } - if (adev->asic_type >= CHIP_TOPAZ) + if (adev->asic_type >= CHIP_TONGA) flags |= AMDGPU_PTE_EXECUTABLE; flags |= AMDGPU_PTE_READABLE; diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 8b4731d4e10e..474ca02b0949 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -31,6 +31,7 @@ #include "ci_dpm.h" #include "gfx_v7_0.h" #include "atom.h" +#include "amd_pcie.h" #include <linux/seq_file.h> #include "smu/smu_7_0_1_d.h" @@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev) u8 frev, crev; struct ci_power_info *pi; int ret; - u32 mask; pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); if (pi == NULL) return -ENOMEM; adev->pm.dpm.priv = pi; - ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); - if (ret) - pi->sys_pcie_mask = 0; - else - pi->sys_pcie_mask = mask; + pi->sys_pcie_mask = + (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >> + CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT; + pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index fd9c9588ef46..155965ed14a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1762,6 +1762,9 @@ static void cik_program_aspm(struct amdgpu_device *adev) if (amdgpu_aspm == 0) return; + if (pci_is_root_bus(adev->pdev->bus)) + return; + /* XXX double check APUs */ if (adev->flags & AMD_IS_APU) return; @@ -2332,72 +2335,72 @@ static int cik_common_early_init(void *handle) switch (adev->asic_type) { case CHIP_BONAIRE: adev->cg_flags = - AMDGPU_CG_SUPPORT_GFX_MGCG | - AMDGPU_CG_SUPPORT_GFX_MGLS | - /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ - AMDGPU_CG_SUPPORT_GFX_CGLS | - AMDGPU_CG_SUPPORT_GFX_CGTS | - AMDGPU_CG_SUPPORT_GFX_CGTS_LS | - AMDGPU_CG_SUPPORT_GFX_CP_LS | - AMDGPU_CG_SUPPORT_MC_LS | - AMDGPU_CG_SUPPORT_MC_MGCG | - AMDGPU_CG_SUPPORT_SDMA_MGCG | - AMDGPU_CG_SUPPORT_SDMA_LS | - AMDGPU_CG_SUPPORT_BIF_LS | - AMDGPU_CG_SUPPORT_VCE_MGCG | - AMDGPU_CG_SUPPORT_UVD_MGCG | - AMDGPU_CG_SUPPORT_HDP_LS | - AMDGPU_CG_SUPPORT_HDP_MGCG; + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x14; break; case CHIP_HAWAII: adev->cg_flags = - AMDGPU_CG_SUPPORT_GFX_MGCG | - AMDGPU_CG_SUPPORT_GFX_MGLS | - /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ - AMDGPU_CG_SUPPORT_GFX_CGLS | - AMDGPU_CG_SUPPORT_GFX_CGTS | - AMDGPU_CG_SUPPORT_GFX_CP_LS | - AMDGPU_CG_SUPPORT_MC_LS | - AMDGPU_CG_SUPPORT_MC_MGCG | - AMDGPU_CG_SUPPORT_SDMA_MGCG | - AMDGPU_CG_SUPPORT_SDMA_LS | - AMDGPU_CG_SUPPORT_BIF_LS | - AMDGPU_CG_SUPPORT_VCE_MGCG | - AMDGPU_CG_SUPPORT_UVD_MGCG | - AMDGPU_CG_SUPPORT_HDP_LS | - AMDGPU_CG_SUPPORT_HDP_MGCG; + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_MC_MGCG | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = 0; adev->external_rev_id = 0x28; break; case CHIP_KAVERI: adev->cg_flags = - AMDGPU_CG_SUPPORT_GFX_MGCG | - AMDGPU_CG_SUPPORT_GFX_MGLS | - /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ - AMDGPU_CG_SUPPORT_GFX_CGLS | - AMDGPU_CG_SUPPORT_GFX_CGTS | - AMDGPU_CG_SUPPORT_GFX_CGTS_LS | - AMDGPU_CG_SUPPORT_GFX_CP_LS | - AMDGPU_CG_SUPPORT_SDMA_MGCG | - AMDGPU_CG_SUPPORT_SDMA_LS | - AMDGPU_CG_SUPPORT_BIF_LS | - AMDGPU_CG_SUPPORT_VCE_MGCG | - AMDGPU_CG_SUPPORT_UVD_MGCG | - AMDGPU_CG_SUPPORT_HDP_LS | - AMDGPU_CG_SUPPORT_HDP_MGCG; + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = - /*AMDGPU_PG_SUPPORT_GFX_PG | - AMDGPU_PG_SUPPORT_GFX_SMG | - AMDGPU_PG_SUPPORT_GFX_DMG |*/ - AMDGPU_PG_SUPPORT_UVD | - /*AMDGPU_PG_SUPPORT_VCE | - AMDGPU_PG_SUPPORT_CP | - AMDGPU_PG_SUPPORT_GDS | - AMDGPU_PG_SUPPORT_RLC_SMU_HS | - AMDGPU_PG_SUPPORT_ACP | - AMDGPU_PG_SUPPORT_SAMU |*/ + /*AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG |*/ + AMD_PG_SUPPORT_UVD | + /*AMD_PG_SUPPORT_VCE | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_ACP | + AMD_PG_SUPPORT_SAMU |*/ 0; if (adev->pdev->device == 0x1312 || adev->pdev->device == 0x1316 || @@ -2409,29 +2412,29 @@ static int cik_common_early_init(void *handle) case CHIP_KABINI: case CHIP_MULLINS: adev->cg_flags = - AMDGPU_CG_SUPPORT_GFX_MGCG | - AMDGPU_CG_SUPPORT_GFX_MGLS | - /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ - AMDGPU_CG_SUPPORT_GFX_CGLS | - AMDGPU_CG_SUPPORT_GFX_CGTS | - AMDGPU_CG_SUPPORT_GFX_CGTS_LS | - AMDGPU_CG_SUPPORT_GFX_CP_LS | - AMDGPU_CG_SUPPORT_SDMA_MGCG | - AMDGPU_CG_SUPPORT_SDMA_LS | - AMDGPU_CG_SUPPORT_BIF_LS | - AMDGPU_CG_SUPPORT_VCE_MGCG | - AMDGPU_CG_SUPPORT_UVD_MGCG | - AMDGPU_CG_SUPPORT_HDP_LS | - AMDGPU_CG_SUPPORT_HDP_MGCG; + AMD_CG_SUPPORT_GFX_MGCG | + AMD_CG_SUPPORT_GFX_MGLS | + /*AMD_CG_SUPPORT_GFX_CGCG |*/ + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_SDMA_MGCG | + AMD_CG_SUPPORT_SDMA_LS | + AMD_CG_SUPPORT_BIF_LS | + AMD_CG_SUPPORT_VCE_MGCG | + AMD_CG_SUPPORT_UVD_MGCG | + AMD_CG_SUPPORT_HDP_LS | + AMD_CG_SUPPORT_HDP_MGCG; adev->pg_flags = - /*AMDGPU_PG_SUPPORT_GFX_PG | - AMDGPU_PG_SUPPORT_GFX_SMG | */ - AMDGPU_PG_SUPPORT_UVD | - /*AMDGPU_PG_SUPPORT_VCE | - AMDGPU_PG_SUPPORT_CP | - AMDGPU_PG_SUPPORT_GDS | - AMDGPU_PG_SUPPORT_RLC_SMU_HS | - AMDGPU_PG_SUPPORT_SAMU |*/ + /*AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | */ + AMD_PG_SUPPORT_UVD | + /*AMD_PG_SUPPORT_VCE | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_SAMU |*/ 0; if (adev->asic_type == CHIP_KABINI) { if (adev->rev_id == 0) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 5f712ceddf08..c55ecf0ea845 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -885,7 +885,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev, { u32 orig, data; - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); } else { @@ -906,7 +906,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev, { u32 orig, data; - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); data |= 0x100; if (orig != data) diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 4dd17f2dd905..9056355309d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->gfx_pg_threshold = 500; pi->caps_fps = true; /* uvd */ - pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; + pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; pi->caps_uvd_dpm = true; /* vce */ - pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; + pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; pi->caps_vce_dpm = true; /* acp */ - pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; + pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; pi->caps_acp_dpm = true; pi->caps_stable_power_state = false; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 72793f93e2fc..7732059ae30f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4109,7 +4109,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) { gfx_v7_0_enable_gui_idle_interrupt(adev, true); tmp = gfx_v7_0_halt_rlc(adev); @@ -4147,9 +4147,9 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) { u32 data, orig, tmp = 0; - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) { - if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) { - if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) { orig = data = RREG32(mmCP_MEM_SLP_CNTL); data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; if (orig != data) @@ -4176,14 +4176,14 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) gfx_v7_0_update_rlc(adev, tmp); - if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) { + if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) { orig = data = RREG32(mmCGTS_SM_CTRL_REG); data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; - if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) && - (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS)) + if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) && + (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS)) data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; @@ -4249,7 +4249,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev, u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS)) data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; else data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; @@ -4263,7 +4263,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev, u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS)) data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; else data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; @@ -4276,7 +4276,7 @@ static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable) u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP)) data &= ~0x8000; else data |= 0x8000; @@ -4289,7 +4289,7 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable) u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS)) data &= ~0x2000; else data |= 0x2000; @@ -4370,7 +4370,7 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, { u32 data, orig; - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) { + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) { orig = data = RREG32(mmRLC_PG_CNTL); data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; if (orig != data) @@ -4442,7 +4442,7 @@ static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev, u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG)) data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; else data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; @@ -4456,7 +4456,7 @@ static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev, u32 data, orig; orig = data = RREG32(mmRLC_PG_CNTL); - if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG)) + if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG)) data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; else data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; @@ -4623,15 +4623,15 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, static void gfx_v7_0_init_pg(struct amdgpu_device *adev) { - if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | - AMDGPU_PG_SUPPORT_GFX_SMG | - AMDGPU_PG_SUPPORT_GFX_DMG | - AMDGPU_PG_SUPPORT_CP | - AMDGPU_PG_SUPPORT_GDS | - AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); - if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { gfx_v7_0_init_gfx_cgpg(adev); gfx_v7_0_enable_cp_pg(adev, true); gfx_v7_0_enable_gds_pg(adev, true); @@ -4643,14 +4643,14 @@ static void gfx_v7_0_init_pg(struct amdgpu_device *adev) static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) { - if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | - AMDGPU_PG_SUPPORT_GFX_SMG | - AMDGPU_PG_SUPPORT_GFX_DMG | - AMDGPU_PG_SUPPORT_CP | - AMDGPU_PG_SUPPORT_GDS | - AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { gfx_v7_0_update_gfx_pg(adev, false); - if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { gfx_v7_0_enable_cp_pg(adev, false); gfx_v7_0_enable_gds_pg(adev, false); } @@ -4738,6 +4738,22 @@ static int gfx_v7_0_early_init(void *handle) return 0; } +static int gfx_v7_0_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); + if (r) + return r; + + r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); + if (r) + return r; + + return 0; +} + static int gfx_v7_0_sw_init(void *handle) { struct amdgpu_ring *ring; @@ -4890,6 +4906,8 @@ static int gfx_v7_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); gfx_v7_0_cp_enable(adev, false); gfx_v7_0_rlc_stop(adev); gfx_v7_0_fini_pg(adev); @@ -5509,14 +5527,14 @@ static int gfx_v7_0_set_powergating_state(void *handle, if (state == AMD_PG_STATE_GATE) gate = true; - if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | - AMDGPU_PG_SUPPORT_GFX_SMG | - AMDGPU_PG_SUPPORT_GFX_DMG | - AMDGPU_PG_SUPPORT_CP | - AMDGPU_PG_SUPPORT_GDS | - AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { + if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_GDS | + AMD_PG_SUPPORT_RLC_SMU_HS)) { gfx_v7_0_update_gfx_pg(adev, gate); - if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) { gfx_v7_0_enable_cp_pg(adev, gate); gfx_v7_0_enable_gds_pg(adev, gate); } @@ -5527,7 +5545,7 @@ static int gfx_v7_0_set_powergating_state(void *handle, const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .early_init = gfx_v7_0_early_init, - .late_init = NULL, + .late_init = gfx_v7_0_late_init, .sw_init = gfx_v7_0_sw_init, .sw_fini = gfx_v7_0_sw_fini, .hw_init = gfx_v7_0_hw_init, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 95c0cdfbd1b3..8f8ec37ecd88 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -111,7 +111,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin"); MODULE_FIRMWARE("amdgpu/topaz_pfp.bin"); MODULE_FIRMWARE("amdgpu/topaz_me.bin"); MODULE_FIRMWARE("amdgpu/topaz_mec.bin"); -MODULE_FIRMWARE("amdgpu/topaz_mec2.bin"); MODULE_FIRMWARE("amdgpu/topaz_rlc.bin"); MODULE_FIRMWARE("amdgpu/fiji_ce.bin"); @@ -828,7 +827,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); - if (adev->asic_type != CHIP_STONEY) { + if ((adev->asic_type != CHIP_STONEY) && + (adev->asic_type != CHIP_TOPAZ)) { snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); if (!err) { @@ -3851,10 +3851,16 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) if (r) return -EINVAL; - r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, - AMDGPU_UCODE_ID_CP_MEC1); - if (r) - return -EINVAL; + if (adev->asic_type == CHIP_TOPAZ) { + r = gfx_v8_0_cp_compute_load_microcode(adev); + if (r) + return r; + } else { + r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, + AMDGPU_UCODE_ID_CP_MEC1); + if (r) + return -EINVAL; + } } } @@ -3901,6 +3907,8 @@ static int gfx_v8_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); + amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); gfx_v8_0_cp_enable(adev, false); gfx_v8_0_rlc_stop(adev); gfx_v8_0_cp_compute_fini(adev); @@ -4329,6 +4337,14 @@ static int gfx_v8_0_late_init(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int r; + r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0); + if (r) + return r; + + r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0); + if (r) + return r; + /* requires IBs so do in late init after IB pool is initialized */ r = gfx_v8_0_do_edc_gpr_workarounds(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 3f956065d069..b8060795b27b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_mc.bin"); +MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); + +static const u32 golden_settings_iceland_a11[] = +{ + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff +}; + +static const u32 iceland_mgcg_cgcg_init[] = +{ + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 +}; + +static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { + case CHIP_TOPAZ: + amdgpu_program_register_sequence(adev, + iceland_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); + amdgpu_program_register_sequence(adev, + golden_settings_iceland_a11, + (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); + break; + default: + break; + } +} /** - * gmc8_mc_wait_for_idle - wait for MC idle callback. + * gmc7_mc_wait_for_idle - wait for MC idle callback. * * @adev: amdgpu_device pointer * @@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) case CHIP_HAWAII: chip_name = "hawaii"; break; + case CHIP_TOPAZ: + chip_name = "topaz"; + break; case CHIP_KAVERI: case CHIP_KABINI: return 0; default: BUG(); } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + if (adev->asic_type == CHIP_TOPAZ) + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name); + err = request_firmware(&adev->mc.fw, fw_name, adev->dev); if (err) goto out; @@ -755,7 +792,7 @@ static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) data |= mc_cg_ls_en[i]; else data &= ~mc_cg_ls_en[i]; @@ -772,7 +809,7 @@ static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev, for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { orig = data = RREG32(mc_cg_registers[i]); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) data |= mc_cg_en[i]; else data &= ~mc_cg_en[i]; @@ -788,7 +825,7 @@ static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev, orig = data = RREG32_PCIE(ixPCIE_CNTL2); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); @@ -811,7 +848,7 @@ static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev, orig = data = RREG32(mmHDP_HOST_PATH_CNTL); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); else data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); @@ -827,7 +864,7 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, orig = data = RREG32(mmHDP_MEM_POWER_LS); - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); else data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); @@ -984,6 +1021,8 @@ static int gmc_v7_0_hw_init(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + gmc_v7_0_init_golden_registers(adev); + gmc_v7_0_mc_program(adev); if (!(adev->flags & AMD_IS_APU)) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index c0c9a0101eb4..3efd45546241 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -42,9 +42,7 @@ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); -MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); -MODULE_FIRMWARE("amdgpu/fiji_mc.bin"); static const u32 golden_settings_tonga_a11[] = { @@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] = mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 }; -static const u32 golden_settings_iceland_a11[] = -{ - mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, - mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, - mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, - mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff -}; - -static const u32 iceland_mgcg_cgcg_init[] = -{ - mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 -}; - static const u32 cz_mgcg_cgcg_init[] = { mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 @@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] = static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) { switch (adev->asic_type) { - case CHIP_TOPAZ: - amdgpu_program_register_sequence(adev, - iceland_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - golden_settings_iceland_a11, - (const u32)ARRAY_SIZE(golden_settings_iceland_a11)); - break; case CHIP_FIJI: amdgpu_program_register_sequence(adev, fiji_mgcg_cgcg_init, @@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); switch (adev->asic_type) { - case CHIP_TOPAZ: - chip_name = "topaz"; - break; case CHIP_TONGA: chip_name = "tonga"; break; case CHIP_FIJI: - chip_name = "fiji"; - break; case CHIP_CARRIZO: case CHIP_STONEY: return 0; @@ -1007,7 +979,7 @@ static int gmc_v8_0_hw_init(void *handle) gmc_v8_0_mc_program(adev); - if (!(adev->flags & AMD_IS_APU)) { + if (adev->asic_type == CHIP_TONGA) { r = gmc_v8_0_mc_load_microcode(adev); if (r) { DRM_ERROR("Failed to load MC firmware!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index 966d4b2ed9da..090486c18249 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type) case AMDGPU_UCODE_ID_CP_ME: return UCODE_ID_CP_ME_MASK; case AMDGPU_UCODE_ID_CP_MEC1: - return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK; + return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK; case AMDGPU_UCODE_ID_CP_MEC2: return UCODE_ID_CP_MEC_MASK; case AMDGPU_UCODE_ID_RLC_G: @@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev) return -EINVAL; } - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2, - &toc->entry[toc->num_entries++])) { - DRM_ERROR("Failed to get firmware entry for MEC_JT2\n"); - return -EINVAL; - } - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0, &toc->entry[toc->num_entries++])) { DRM_ERROR("Failed to get firmware entry for SDMA0\n"); @@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev) UCODE_ID_CP_ME_MASK | UCODE_ID_CP_PFP_MASK | UCODE_ID_CP_MEC_MASK | - UCODE_ID_CP_MEC_JT1_MASK | - UCODE_ID_CP_MEC_JT2_MASK; + UCODE_ID_CP_MEC_JT1_MASK; + if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) { DRM_ERROR("Fail to request SMU load ucode\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index 7e9154c7f1db..654d76723bc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -2859,11 +2859,11 @@ static int kv_dpm_init(struct amdgpu_device *adev) pi->voltage_drop_t = 0; pi->caps_sclk_throttle_low_notification = false; pi->caps_fps = false; /* true? */ - pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; + pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; pi->caps_uvd_dpm = true; - pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; - pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false; - pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; + pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; + pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; + pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; pi->caps_stable_p_state = false; ret = kv_parse_sys_info_table(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 5e9f73af83a8..fbd3767671bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -611,7 +611,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev, { u32 orig, data; - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); data = 0xfff; WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); @@ -830,6 +830,9 @@ static int uvd_v4_2_set_clockgating_state(void *handle, bool gate = false; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) + return 0; + if (state == AMD_CG_STATE_GATE) gate = true; @@ -848,7 +851,10 @@ static int uvd_v4_2_set_powergating_state(void *handle, * revisit this when there is a cleaner line between * the smc and the hw blocks */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) + return 0; if (state == AMD_PG_STATE_GATE) { uvd_v4_2_stop(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index 38864f562981..57f1c5bf3bf1 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -774,6 +774,11 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev, static int uvd_v5_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) + return 0; + return 0; } @@ -789,6 +794,9 @@ static int uvd_v5_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) + return 0; + if (state == AMD_PG_STATE_GATE) { uvd_v5_0_stop(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 3d5913926436..0b365b7651ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -532,7 +532,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uvd_v6_0_mc_resume(adev); /* Set dynamic clock gating in S/W control mode */ - if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) { + if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) { if (adev->flags & AMD_IS_APU) cz_set_uvd_clock_gating_branches(adev, false); else @@ -1000,7 +1000,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle, struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool enable = (state == AMD_CG_STATE_GATE) ? true : false; - if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) + if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) return 0; if (enable) { @@ -1030,6 +1030,9 @@ static int uvd_v6_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD)) + return 0; + if (state == AMD_PG_STATE_GATE) { uvd_v6_0_stop(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 52ac7a8f1e58..a822edacfa95 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -373,7 +373,7 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable) { bool sw_cg = false; - if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) { + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) { if (sw_cg) vce_v2_0_set_sw_cg(adev, true); else @@ -608,6 +608,9 @@ static int vce_v2_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) + return 0; + if (state == AMD_PG_STATE_GATE) /* XXX do we need a vce_v2_0_stop()? */ return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index e99af81e4aec..d662fa9f9091 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -277,7 +277,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) WREG32_P(mmVCE_STATUS, 0, ~1); /* Set Clock-Gating off */ - if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG) + if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) vce_v3_0_set_vce_sw_clock_gating(adev, false); if (r) { @@ -676,7 +676,7 @@ static int vce_v3_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; int i; - if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) + if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) return 0; mutex_lock(&adev->grbm_idx_mutex); @@ -728,6 +728,9 @@ static int vce_v3_0_set_powergating_state(void *handle, */ struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE)) + return 0; + if (state == AMD_PG_STATE_GATE) /* XXX do we need a vce_v3_0_stop()? */ return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 652e76644c31..0d14d108a6c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -61,6 +61,7 @@ #include "vi.h" #include "vi_dpm.h" #include "gmc_v8_0.h" +#include "gmc_v7_0.h" #include "gfx_v8_0.h" #include "sdma_v2_4.h" #include "sdma_v3_0.h" @@ -1109,10 +1110,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] = }, { .type = AMD_IP_BLOCK_TYPE_GMC, - .major = 8, - .minor = 0, + .major = 7, + .minor = 4, .rev = 0, - .funcs = &gmc_v8_0_ip_funcs, + .funcs = &gmc_v7_0_ip_funcs, }, { .type = AMD_IP_BLOCK_TYPE_IH, @@ -1442,8 +1443,7 @@ static int vi_common_early_init(void *handle) break; case CHIP_FIJI: adev->has_uvd = true; - adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG | - AMDGPU_CG_SUPPORT_VCE_MGCG; + adev->cg_flags = 0; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x3c; break; @@ -1457,8 +1457,7 @@ static int vi_common_early_init(void *handle) case CHIP_STONEY: adev->has_uvd = true; adev->cg_flags = 0; - /* Disable UVD pg */ - adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE; + adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x1; break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 9be007081b72..a902ae037398 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -194,7 +194,7 @@ static void kfd_process_wq_release(struct work_struct *work) kfree(p); - kfree((void *)work); + kfree(work); } static void kfd_process_destroy_delayed(struct rcu_head *rcu) diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 1195d06f55bc..dbf7e6413cab 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -85,6 +85,38 @@ enum amd_powergating_state { AMD_PG_STATE_UNGATE, }; +/* CG flags */ +#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0) +#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1) +#define AMD_CG_SUPPORT_GFX_CGCG (1 << 2) +#define AMD_CG_SUPPORT_GFX_CGLS (1 << 3) +#define AMD_CG_SUPPORT_GFX_CGTS (1 << 4) +#define AMD_CG_SUPPORT_GFX_CGTS_LS (1 << 5) +#define AMD_CG_SUPPORT_GFX_CP_LS (1 << 6) +#define AMD_CG_SUPPORT_GFX_RLC_LS (1 << 7) +#define AMD_CG_SUPPORT_MC_LS (1 << 8) +#define AMD_CG_SUPPORT_MC_MGCG (1 << 9) +#define AMD_CG_SUPPORT_SDMA_LS (1 << 10) +#define AMD_CG_SUPPORT_SDMA_MGCG (1 << 11) +#define AMD_CG_SUPPORT_BIF_LS (1 << 12) +#define AMD_CG_SUPPORT_UVD_MGCG (1 << 13) +#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14) +#define AMD_CG_SUPPORT_HDP_LS (1 << 15) +#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16) + +/* PG flags */ +#define AMD_PG_SUPPORT_GFX_PG (1 << 0) +#define AMD_PG_SUPPORT_GFX_SMG (1 << 1) +#define AMD_PG_SUPPORT_GFX_DMG (1 << 2) +#define AMD_PG_SUPPORT_UVD (1 << 3) +#define AMD_PG_SUPPORT_VCE (1 << 4) +#define AMD_PG_SUPPORT_CP (1 << 5) +#define AMD_PG_SUPPORT_GDS (1 << 6) +#define AMD_PG_SUPPORT_RLC_SMU_HS (1 << 7) +#define AMD_PG_SUPPORT_SDMA (1 << 8) +#define AMD_PG_SUPPORT_ACP (1 << 9) +#define AMD_PG_SUPPORT_SAMU (1 << 10) + enum amd_pm_state_type { /* not used for dpm */ POWER_STATE_TYPE_DEFAULT, diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index 713aec954692..aec38fc3834f 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -109,6 +109,8 @@ enum cgs_system_info_id { CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, CGS_SYSTEM_INFO_PCIE_GEN_INFO, CGS_SYSTEM_INFO_PCIE_MLW, + CGS_SYSTEM_INFO_CG_FLAGS, + CGS_SYSTEM_INFO_PG_FLAGS, CGS_SYSTEM_INFO_ID_MAXIMUM, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 0874ab42ee95..cf01177ca3b5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -174,6 +174,8 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) { struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); uint32_t i; + struct cgs_system_info sys_info = {0}; + int result; cz_hwmgr->gfx_ramp_step = 256*25/100; @@ -247,6 +249,22 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableVoltageIsland); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (!result) { + if (sys_info.value & AMD_PG_SUPPORT_UVD) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + if (sys_info.value & AMD_PG_SUPPORT_VCE) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + } + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 44a925006479..980d3bf8ea76 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -4451,6 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); phw_tonga_ulv_parm *ulv; + struct cgs_system_info sys_info = {0}; PP_ASSERT_WITH_CODE((NULL != hwmgr), "Invalid Parameter!", return -1;); @@ -4615,9 +4616,23 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->vddc_phase_shed_control = 0; - if (0 == result) { - struct cgs_system_info sys_info = {0}; + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS; + result = cgs_query_system_info(hwmgr->device, &sys_info); + if (!result) { + if (sys_info.value & AMD_PG_SUPPORT_UVD) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_UVDPowerGating); + if (sys_info.value & AMD_PG_SUPPORT_VCE) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_VCEPowerGating); + } + if (0 == result) { data->is_tlu_enabled = 0; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = TONGA_MAX_HARDWARE_POWERLEVELS; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 6ed90a2437e5..8ae13de272c4 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -803,6 +803,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad) return mstb; } +static void drm_dp_free_mst_port(struct kref *kref); + +static void drm_dp_free_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); + if (mstb->port_parent) { + if (list_empty(&mstb->port_parent->next)) + kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port); + } + kfree(mstb); +} + static void drm_dp_destroy_mst_branch_device(struct kref *kref) { struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref); @@ -810,6 +822,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) bool wake_tx = false; /* + * init kref again to be used by ports to remove mst branch when it is + * not needed anymore + */ + kref_init(kref); + + if (mstb->port_parent && list_empty(&mstb->port_parent->next)) + kref_get(&mstb->port_parent->kref); + + /* * destroy all ports - don't need lock * as there are no more references to the mst branch * device at this point. @@ -835,7 +856,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) if (wake_tx) wake_up(&mstb->mgr->tx_waitq); - kfree(mstb); + + kref_put(kref, drm_dp_free_mst_branch_device); } static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb) @@ -883,6 +905,7 @@ static void drm_dp_destroy_port(struct kref *kref) * from an EDID retrieval */ mutex_lock(&mgr->destroy_connector_lock); + kref_get(&port->parent->kref); list_add(&port->next, &mgr->destroy_connector_list); mutex_unlock(&mgr->destroy_connector_lock); schedule_work(&mgr->destroy_connector_work); @@ -1018,18 +1041,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) return send_link; } -static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb, - struct drm_dp_mst_port *port) +static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid) { int ret; - if (port->dpcd_rev >= 0x12) { - port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid); - if (!port->guid_valid) { - ret = drm_dp_send_dpcd_write(mstb->mgr, - port, - DP_GUID, - 16, port->guid); - port->guid_valid = true; + + memcpy(mstb->guid, guid, 16); + + if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { + if (mstb->port_parent) { + ret = drm_dp_send_dpcd_write( + mstb->mgr, + mstb->port_parent, + DP_GUID, + 16, + mstb->guid); + } else { + + ret = drm_dp_dpcd_write( + mstb->mgr->aux, + DP_GUID, + mstb->guid, + 16); } } } @@ -1086,7 +1118,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, port->dpcd_rev = port_msg->dpcd_revision; port->num_sdp_streams = port_msg->num_sdp_streams; port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks; - memcpy(port->guid, port_msg->peer_guid, 16); /* manage mstb port lists with mgr lock - take a reference for this list */ @@ -1099,11 +1130,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, if (old_ddps != port->ddps) { if (port->ddps) { - drm_dp_check_port_guid(mstb, port); if (!port->input) drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); } else { - port->guid_valid = false; port->available_pbn = 0; } } @@ -1130,13 +1159,11 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, drm_dp_put_port(port); goto out; } - if (port->port_num >= DP_MST_LOGICAL_PORT_0) { - port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); - drm_mode_connector_set_tile_property(port->connector); - } + + drm_mode_connector_set_tile_property(port->connector); + (*mstb->mgr->cbs->register_connector)(port->connector); } - out: /* put reference to this port */ drm_dp_put_port(port); @@ -1161,11 +1188,9 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, port->ddps = conn_stat->displayport_device_plug_status; if (old_ddps != port->ddps) { + dowork = true; if (port->ddps) { - drm_dp_check_port_guid(mstb, port); - dowork = true; } else { - port->guid_valid = false; port->available_pbn = 0; } } @@ -1222,13 +1247,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( struct drm_dp_mst_branch *found_mstb; struct drm_dp_mst_port *port; + if (memcmp(mstb->guid, guid, 16) == 0) + return mstb; + + list_for_each_entry(port, &mstb->ports, next) { if (!port->mstb) continue; - if (port->guid_valid && memcmp(port->guid, guid, 16) == 0) - return port->mstb; - found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid); if (found_mstb) @@ -1247,10 +1273,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid( /* find the port by iterating down */ mutex_lock(&mgr->lock); - if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0) - mstb = mgr->mst_primary; - else - mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); + mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); if (mstb) kref_get(&mstb->kref); @@ -1271,8 +1294,13 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m if (port->input) continue; - if (!port->ddps) + if (!port->ddps) { + if (port->cached_edid) { + kfree(port->cached_edid); + port->cached_edid = NULL; + } continue; + } if (!port->available_pbn) drm_dp_send_enum_path_resources(mgr, mstb, port); @@ -1283,6 +1311,12 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m drm_dp_check_and_send_link_address(mgr, mstb_child); drm_dp_put_mst_branch_device(mstb_child); } + } else if (port->pdt == DP_PEER_DEVICE_SST_SINK || + port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) { + if (!port->cached_edid) { + port->cached_edid = + drm_get_edid(port->connector, &port->aux.ddc); + } } } } @@ -1302,6 +1336,8 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) drm_dp_check_and_send_link_address(mgr, mstb); drm_dp_put_mst_branch_device(mstb); } + + (*mgr->cbs->hotplug)(mgr); } static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, @@ -1555,10 +1591,12 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, txmsg->reply.u.link_addr.ports[i].num_sdp_streams, txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); } + + drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); + for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); } - (*mgr->cbs->hotplug)(mgr); } } else { mstb->link_address_sent = false; @@ -1602,6 +1640,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, return 0; } +static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) +{ + if (!mstb->port_parent) + return NULL; + + if (mstb->port_parent->mstb != mstb) + return mstb->port_parent; + + return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent); +} + +static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + int *port_num) +{ + struct drm_dp_mst_branch *rmstb = NULL; + struct drm_dp_mst_port *found_port; + mutex_lock(&mgr->lock); + if (mgr->mst_primary) { + found_port = drm_dp_get_last_connected_port_to_mstb(mstb); + + if (found_port) { + rmstb = found_port->parent; + kref_get(&rmstb->kref); + *port_num = found_port->port_num; + } + } + mutex_unlock(&mgr->lock); + return rmstb; +} + static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int id, @@ -1609,13 +1678,18 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, { struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_mst_branch *mstb; - int len, ret; + int len, ret, port_num; u8 sinks[DRM_DP_MAX_SDP_STREAMS]; int i; + port_num = port->port_num; mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); - if (!mstb) - return -EINVAL; + if (!mstb) { + mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); + + if (!mstb) + return -EINVAL; + } txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) { @@ -1627,7 +1701,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, sinks[i] = i; txmsg->dst = mstb; - len = build_allocate_payload(txmsg, port->port_num, + len = build_allocate_payload(txmsg, port_num, id, pbn, port->num_sdp_streams, sinks); @@ -1983,31 +2057,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms mgr->mst_primary = mstb; kref_get(&mgr->mst_primary->kref); - { - struct drm_dp_payload reset_pay; - reset_pay.start_slot = 0; - reset_pay.num_slots = 0x3f; - drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); - } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); if (ret < 0) { goto out_unlock; } - - /* sort out guid */ - ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16); - if (ret != 16) { - DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret); - goto out_unlock; - } - - mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid); - if (!mgr->guid_valid) { - ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16); - mgr->guid_valid = true; + { + struct drm_dp_payload reset_pay; + reset_pay.start_slot = 0; + reset_pay.num_slots = 0x3f; + drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); } queue_work(system_long_wq, &mgr->work); @@ -2231,9 +2291,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) } drm_dp_update_port(mstb, &msg.u.conn_stat); - DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); - (*mgr->cbs->hotplug)(mgr); + DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); if (!mstb) @@ -2320,10 +2379,6 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector case DP_PEER_DEVICE_SST_SINK: status = connector_status_connected; - /* for logical ports - cache the EDID */ - if (port->port_num >= 8 && !port->cached_edid) { - port->cached_edid = drm_get_edid(connector, &port->aux.ddc); - } break; case DP_PEER_DEVICE_DP_LEGACY_CONV: if (port->ldps) @@ -2378,10 +2433,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ if (port->cached_edid) edid = drm_edid_duplicate(port->cached_edid); - else { - edid = drm_get_edid(connector, &port->aux.ddc); - drm_mode_connector_set_tile_property(connector); - } + port->has_audio = drm_detect_monitor_audio(edid); drm_dp_put_port(port); return edid; @@ -2446,6 +2498,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn); if (pbn == port->vcpi.pbn) { *slots = port->vcpi.num_slots; + drm_dp_put_port(port); return true; } } @@ -2605,32 +2658,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status); */ int drm_dp_calc_pbn_mode(int clock, int bpp) { - fixed20_12 pix_bw; - fixed20_12 fbpp; - fixed20_12 result; - fixed20_12 margin, tmp; - u32 res; - - pix_bw.full = dfixed_const(clock); - fbpp.full = dfixed_const(bpp); - tmp.full = dfixed_const(8); - fbpp.full = dfixed_div(fbpp, tmp); - - result.full = dfixed_mul(pix_bw, fbpp); - margin.full = dfixed_const(54); - tmp.full = dfixed_const(64); - margin.full = dfixed_div(margin, tmp); - result.full = dfixed_div(result, margin); - - margin.full = dfixed_const(1006); - tmp.full = dfixed_const(1000); - margin.full = dfixed_div(margin, tmp); - result.full = dfixed_mul(result, margin); - - result.full = dfixed_div(result, tmp); - result.full = dfixed_ceil(result); - res = dfixed_trunc(result); - return res; + u64 kbps; + s64 peak_kbps; + u32 numerator; + u32 denominator; + + kbps = clock * bpp; + + /* + * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 + * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on + * common multiplier to render an integer PBN for all link rate/lane + * counts combinations + * calculate + * peak_kbps *= (1006/1000) + * peak_kbps *= (64/54) + * peak_kbps *= 8 convert to bytes + */ + + numerator = 64 * 1006; + denominator = 54 * 8 * 1000 * 1000; + + kbps *= numerator; + peak_kbps = drm_fixp_from_fraction(kbps, denominator); + + return drm_fixp2int_ceil(peak_kbps); } EXPORT_SYMBOL(drm_dp_calc_pbn_mode); @@ -2638,11 +2690,23 @@ static int test_calc_pbn_mode(void) { int ret; ret = drm_dp_calc_pbn_mode(154000, 30); - if (ret != 689) + if (ret != 689) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 154000, 30, 689, ret); return -EINVAL; + } ret = drm_dp_calc_pbn_mode(234000, 30); - if (ret != 1047) + if (ret != 1047) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 234000, 30, 1047, ret); return -EINVAL; + } + ret = drm_dp_calc_pbn_mode(297000, 24); + if (ret != 1063) { + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", + 297000, 24, 1063, ret); + return -EINVAL; + } return 0; } @@ -2783,6 +2847,13 @@ static void drm_dp_tx_work(struct work_struct *work) mutex_unlock(&mgr->qlock); } +static void drm_dp_free_mst_port(struct kref *kref) +{ + struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref); + kref_put(&port->parent->kref, drm_dp_free_mst_branch_device); + kfree(port); +} + static void drm_dp_destroy_connector_work(struct work_struct *work) { struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); @@ -2803,13 +2874,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) list_del(&port->next); mutex_unlock(&mgr->destroy_connector_lock); + kref_init(&port->kref); + INIT_LIST_HEAD(&port->next); + mgr->cbs->destroy_connector(mgr, port->connector); drm_dp_port_teardown_pdt(port, port->pdt); - if (!port->input && port->vcpi.vcpi > 0) - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); - kfree(port); + if (!port->input && port->vcpi.vcpi > 0) { + if (mgr->mst_state) { + drm_dp_mst_reset_vcpi_slots(mgr, port); + drm_dp_update_payload_part1(mgr); + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); + } + } + + kref_put(&port->kref, drm_dp_free_mst_port); send_hotplug = true; } if (send_hotplug) @@ -2847,6 +2927,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; mgr->max_payloads = max_payloads; mgr->conn_base_id = conn_base_id; + if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 || + max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8) + return -EINVAL; mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); if (!mgr->payloads) return -ENOMEM; @@ -2854,7 +2937,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, if (!mgr->proposed_vcpis) return -ENOMEM; set_bit(0, &mgr->payload_mask); - test_calc_pbn_mode(); + if (test_calc_pbn_mode() < 0) + DRM_ERROR("MST PBN self-test failed\n"); + return 0; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init); diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c index b79c316c2ad2..673164b331c8 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ b/drivers/gpu/drm/exynos/exynos_dp_core.c @@ -1392,7 +1392,7 @@ static const struct component_ops exynos_dp_ops = { static int exynos_dp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL; + struct device_node *np = NULL, *endpoint = NULL; struct exynos_dp_device *dp; int ret; @@ -1404,41 +1404,36 @@ static int exynos_dp_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dp); /* This is for the backward compatibility. */ - panel_node = of_parse_phandle(dev->of_node, "panel", 0); - if (panel_node) { - dp->panel = of_drm_find_panel(panel_node); - of_node_put(panel_node); + np = of_parse_phandle(dev->of_node, "panel", 0); + if (np) { + dp->panel = of_drm_find_panel(np); + of_node_put(np); if (!dp->panel) return -EPROBE_DEFER; - } else { - endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); - if (endpoint) { - panel_node = of_graph_get_remote_port_parent(endpoint); - if (panel_node) { - dp->panel = of_drm_find_panel(panel_node); - of_node_put(panel_node); - if (!dp->panel) - return -EPROBE_DEFER; - } else { - DRM_ERROR("no port node for panel device.\n"); - return -EINVAL; - } - } - } - - if (endpoint) goto out; + } endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); if (endpoint) { - bridge_node = of_graph_get_remote_port_parent(endpoint); - if (bridge_node) { - dp->ptn_bridge = of_drm_find_bridge(bridge_node); - of_node_put(bridge_node); - if (!dp->ptn_bridge) - return -EPROBE_DEFER; - } else - return -EPROBE_DEFER; + np = of_graph_get_remote_port_parent(endpoint); + if (np) { + /* The remote port can be either a panel or a bridge */ + dp->panel = of_drm_find_panel(np); + if (!dp->panel) { + dp->ptn_bridge = of_drm_find_bridge(np); + if (!dp->ptn_bridge) { + of_node_put(np); + return -EPROBE_DEFER; + } + } + of_node_put(np); + } else { + DRM_ERROR("no remote endpoint device node found.\n"); + return -EINVAL; + } + } else { + DRM_ERROR("no port endpoint subnode found.\n"); + return -EINVAL; } out: diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index d84a498ef099..e977a81af2e6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1906,8 +1906,7 @@ static int exynos_dsi_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int exynos_dsi_suspend(struct device *dev) +static int __maybe_unused exynos_dsi_suspend(struct device *dev) { struct drm_encoder *encoder = dev_get_drvdata(dev); struct exynos_dsi *dsi = encoder_to_dsi(encoder); @@ -1938,7 +1937,7 @@ static int exynos_dsi_suspend(struct device *dev) return 0; } -static int exynos_dsi_resume(struct device *dev) +static int __maybe_unused exynos_dsi_resume(struct device *dev) { struct drm_encoder *encoder = dev_get_drvdata(dev); struct exynos_dsi *dsi = encoder_to_dsi(encoder); @@ -1972,7 +1971,6 @@ err_clk: return ret; } -#endif static const struct dev_pm_ops exynos_dsi_pm_ops = { SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index b5fbc1cbf024..0a5a60005f7e 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1289,8 +1289,7 @@ static int mixer_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int exynos_mixer_suspend(struct device *dev) +static int __maybe_unused exynos_mixer_suspend(struct device *dev) { struct mixer_context *ctx = dev_get_drvdata(dev); struct mixer_resources *res = &ctx->mixer_res; @@ -1306,7 +1305,7 @@ static int exynos_mixer_suspend(struct device *dev) return 0; } -static int exynos_mixer_resume(struct device *dev) +static int __maybe_unused exynos_mixer_resume(struct device *dev) { struct mixer_context *ctx = dev_get_drvdata(dev); struct mixer_resources *res = &ctx->mixer_res; @@ -1342,7 +1341,6 @@ static int exynos_mixer_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops exynos_mixer_pm_ops = { SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL) diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c index 533d1e3d4a99..a02112ba1c3d 100644 --- a/drivers/gpu/drm/i2c/adv7511.c +++ b/drivers/gpu/drm/i2c/adv7511.c @@ -136,6 +136,7 @@ static bool adv7511_register_volatile(struct device *dev, unsigned int reg) case ADV7511_REG_BKSV(3): case ADV7511_REG_BKSV(4): case ADV7511_REG_DDC_STATUS: + case ADV7511_REG_EDID_READ_CTRL: case ADV7511_REG_BSTATUS(0): case ADV7511_REG_BSTATUS(1): case ADV7511_REG_CHIP_ID_HIGH: @@ -362,24 +363,31 @@ static void adv7511_power_on(struct adv7511 *adv7511) { adv7511->current_edid_segment = -1; - regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_EDID_READY); - regmap_write(adv7511->regmap, ADV7511_REG_INT(1), - ADV7511_INT1_DDC_ERROR); regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, 0); + if (adv7511->i2c_main->irq) { + /* + * Documentation says the INT_ENABLE registers are reset in + * POWER_DOWN mode. My 7511w preserved the bits, however. + * Still, let's be safe and stick to the documentation. + */ + regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0), + ADV7511_INT0_EDID_READY); + regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), + ADV7511_INT1_DDC_ERROR); + } /* - * Per spec it is allowed to pulse the HDP signal to indicate that the + * Per spec it is allowed to pulse the HPD signal to indicate that the * EDID information has changed. Some monitors do this when they wakeup - * from standby or are enabled. When the HDP goes low the adv7511 is + * from standby or are enabled. When the HPD goes low the adv7511 is * reset and the outputs are disabled which might cause the monitor to - * go to standby again. To avoid this we ignore the HDP pin for the + * go to standby again. To avoid this we ignore the HPD pin for the * first few seconds after enabling the output. */ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HDP_SRC_MASK, - ADV7511_REG_POWER2_HDP_SRC_NONE); + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_NONE); /* * Most of the registers are reset during power down or when HPD is low. @@ -413,9 +421,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511) if (ret < 0) return false; - if (irq0 & ADV7511_INT0_HDP) { + if (irq0 & ADV7511_INT0_HPD) { regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_HDP); + ADV7511_INT0_HPD); return true; } @@ -438,7 +446,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511) regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); - if (irq0 & ADV7511_INT0_HDP && adv7511->encoder) + if (irq0 & ADV7511_INT0_HPD && adv7511->encoder) drm_helper_hpd_irq_event(adv7511->encoder->dev); if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { @@ -567,12 +575,14 @@ static int adv7511_get_modes(struct drm_encoder *encoder, /* Reading the EDID only works if the device is powered */ if (!adv7511->powered) { - regmap_write(adv7511->regmap, ADV7511_REG_INT(0), - ADV7511_INT0_EDID_READY); - regmap_write(adv7511->regmap, ADV7511_REG_INT(1), - ADV7511_INT1_DDC_ERROR); regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER, ADV7511_POWER_POWER_DOWN, 0); + if (adv7511->i2c_main->irq) { + regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0), + ADV7511_INT0_EDID_READY); + regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1), + ADV7511_INT1_DDC_ERROR); + } adv7511->current_edid_segment = -1; } @@ -638,10 +648,10 @@ adv7511_encoder_detect(struct drm_encoder *encoder, if (adv7511->status == connector_status_connected) status = connector_status_disconnected; } else { - /* Renable HDP sensing */ + /* Renable HPD sensing */ regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2, - ADV7511_REG_POWER2_HDP_SRC_MASK, - ADV7511_REG_POWER2_HDP_SRC_BOTH); + ADV7511_REG_POWER2_HPD_SRC_MASK, + ADV7511_REG_POWER2_HPD_SRC_BOTH); } adv7511->status = status; diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h index 6599ed538426..38515b30cedf 100644 --- a/drivers/gpu/drm/i2c/adv7511.h +++ b/drivers/gpu/drm/i2c/adv7511.h @@ -90,7 +90,7 @@ #define ADV7511_CSC_ENABLE BIT(7) #define ADV7511_CSC_UPDATE_MODE BIT(5) -#define ADV7511_INT0_HDP BIT(7) +#define ADV7511_INT0_HPD BIT(7) #define ADV7511_INT0_VSYNC BIT(5) #define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4) #define ADV7511_INT0_EDID_READY BIT(2) @@ -157,11 +157,11 @@ #define ADV7511_PACKET_ENABLE_SPARE2 BIT(1) #define ADV7511_PACKET_ENABLE_SPARE1 BIT(0) -#define ADV7511_REG_POWER2_HDP_SRC_MASK 0xc0 -#define ADV7511_REG_POWER2_HDP_SRC_BOTH 0x00 -#define ADV7511_REG_POWER2_HDP_SRC_HDP 0x40 -#define ADV7511_REG_POWER2_HDP_SRC_CEC 0x80 -#define ADV7511_REG_POWER2_HDP_SRC_NONE 0xc0 +#define ADV7511_REG_POWER2_HPD_SRC_MASK 0xc0 +#define ADV7511_REG_POWER2_HPD_SRC_BOTH 0x00 +#define ADV7511_REG_POWER2_HPD_SRC_HPD 0x40 +#define ADV7511_REG_POWER2_HPD_SRC_CEC 0x80 +#define ADV7511_REG_POWER2_HPD_SRC_NONE 0xc0 #define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4) #define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index fcd77b27514d..051eab33e4c7 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -10,7 +10,6 @@ config DRM_I915 # the shmem_readpage() which depends upon tmpfs select SHMEM select TMPFS - select STOP_MACHINE select DRM_KMS_HELPER select DRM_PANEL select DRM_MIPI_DSI diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3ac616d7363b..f357058c74d9 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -501,7 +501,9 @@ void intel_detect_pch(struct drm_device *dev) WARN_ON(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev)); } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || - (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) { + ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && + pch->subsystem_vendor == 0x1af4 && + pch->subsystem_device == 0x1100)) { dev_priv->pch_type = intel_virt_detect_pch(dev); } else continue; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2f00828ccc6e..5feb65725c04 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2946,7 +2946,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane, struct i915_vma *vma; u64 offset; - intel_fill_fb_ggtt_view(&view, intel_plane->base.fb, + intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, intel_plane->base.state); vma = i915_gem_obj_to_ggtt_view(obj, &view); @@ -12075,11 +12075,21 @@ connected_sink_compute_bpp(struct intel_connector *connector, pipe_config->pipe_bpp = connector->base.display_info.bpc*3; } - /* Clamp bpp to 8 on screens without EDID 1.4 */ - if (connector->base.display_info.bpc == 0 && bpp > 24) { - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", - bpp); - pipe_config->pipe_bpp = 24; + /* Clamp bpp to default limit on screens without EDID 1.4 */ + if (connector->base.display_info.bpc == 0) { + int type = connector->base.connector_type; + int clamp_bpp = 24; + + /* Fall back to 18 bpp when DP sink capability is unknown. */ + if (type == DRM_MODE_CONNECTOR_DisplayPort || + type == DRM_MODE_CONNECTOR_eDP) + clamp_bpp = 18; + + if (bpp > clamp_bpp) { + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", + bpp, clamp_bpp); + pipe_config->pipe_bpp = clamp_bpp; + } } } @@ -13883,11 +13893,12 @@ intel_check_primary_plane(struct drm_plane *plane, int max_scale = DRM_PLANE_HELPER_NO_SCALING; bool can_position = false; - /* use scaler when colorkey is not required */ - if (INTEL_INFO(plane->dev)->gen >= 9 && - state->ckey.flags == I915_SET_COLORKEY_NONE) { - min_scale = 1; - max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); + if (INTEL_INFO(plane->dev)->gen >= 9) { + /* use scaler when colorkey is not required */ + if (state->ckey.flags == I915_SET_COLORKEY_NONE) { + min_scale = 1; + max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); + } can_position = true; } diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 3aa614731d7e..f1fa756c5d5d 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -1707,6 +1707,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 339701d7a9a5..40c6aff57256 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -331,6 +331,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { @@ -403,6 +404,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, if (flush_domains) { flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; } if (invalidate_domains) { diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 84d45633d28c..fb6ad143873f 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -33,6 +33,7 @@ #include <linux/slab.h> #include <drm/drmP.h> #include <drm/radeon_drm.h> +#include <drm/drm_cache.h> #include "radeon.h" #include "radeon_trace.h" @@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev, DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for " "better performance thanks to write-combining\n"); bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); +#else + /* For architectures that don't support WC memory, + * mask out the WC flag from the BO + */ + if (!drm_arch_can_wc_memory()) + bo->flags &= ~RADEON_GEM_GTT_WC; #endif radeon_ttm_placement_from_domain(bo, domain); diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c index c507896aca45..197b157b73d0 100644 --- a/drivers/gpu/drm/radeon/radeon_sa.c +++ b/drivers/gpu/drm/radeon/radeon_sa.c @@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev, /* see if we can skip over some allocations */ } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); + for (i = 0; i < RADEON_NUM_RINGS; ++i) + radeon_fence_ref(fences[i]); + spin_unlock(&sa_manager->wq.lock); r = radeon_fence_wait_any(rdev, fences, false); + for (i = 0; i < RADEON_NUM_RINGS; ++i) + radeon_fence_unref(&fences[i]); spin_lock(&sa_manager->wq.lock); /* if we have nothing to wait for block */ if (r == -ENOENT) { diff --git a/drivers/hwspinlock/hwspinlock_core.c b/drivers/hwspinlock/hwspinlock_core.c index 52f708bcf77f..d50c701b19d6 100644 --- a/drivers/hwspinlock/hwspinlock_core.c +++ b/drivers/hwspinlock/hwspinlock_core.c @@ -313,6 +313,10 @@ int of_hwspin_lock_get_id(struct device_node *np, int index) hwlock = radix_tree_deref_slot(slot); if (unlikely(!hwlock)) continue; + if (radix_tree_is_indirect_ptr(hwlock)) { + slot = radix_tree_iter_retry(&iter); + continue; + } if (hwlock->bank->dev->of_node == args.np) { ret = 0; diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index edc29b173f6c..833ea9dd4464 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -213,6 +213,7 @@ config STK8312 config STK8BA50 tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver" depends on I2C + depends on IIO_TRIGGER help Say yes here to get support for the Sensortek STK8BA50 3-axis accelerometer. diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 605ff42c4631..283ded7747a9 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -175,6 +175,7 @@ config DA9150_GPADC config EXYNOS_ADC tristate "Exynos ADC driver support" depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST) + depends on HAS_IOMEM help Core support for the ADC block found in the Samsung EXYNOS series of SoCs for drivers such as the touchscreen and hwmon to use to share @@ -207,6 +208,7 @@ config INA2XX_ADC config IMX7D_ADC tristate "IMX7D ADC driver" depends on ARCH_MXC || COMPILE_TEST + depends on HAS_IOMEM help Say yes here to build support for IMX7D ADC. @@ -409,6 +411,7 @@ config TWL6030_GPADC config VF610_ADC tristate "Freescale vf610 ADC driver" depends on OF + depends on HAS_IOMEM select IIO_BUFFER select IIO_TRIGGERED_BUFFER help diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 942320e32753..c1e05532d437 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -289,7 +289,7 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev, goto error_kfifo_free; indio_dev->setup_ops = setup_ops; - indio_dev->modes |= INDIO_BUFFER_HARDWARE; + indio_dev->modes |= INDIO_BUFFER_SOFTWARE; return 0; diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c index 43d14588448d..b4dde8315210 100644 --- a/drivers/iio/dac/mcp4725.c +++ b/drivers/iio/dac/mcp4725.c @@ -300,6 +300,7 @@ static int mcp4725_probe(struct i2c_client *client, data->client = client; indio_dev->dev.parent = &client->dev; + indio_dev->name = id->name; indio_dev->info = &mcp4725_info; indio_dev->channels = &mcp4725_channel; indio_dev->num_channels = 1; diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c index 1165b1c4f9d6..cfc5a051ab9f 100644 --- a/drivers/iio/humidity/dht11.c +++ b/drivers/iio/humidity/dht11.c @@ -117,7 +117,7 @@ static int dht11_decode(struct dht11 *dht11, int offset, int timeres) if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum) return -EIO; - dht11->timestamp = ktime_get_real_ns(); + dht11->timestamp = ktime_get_boot_ns(); if (hum_int < 20) { /* DHT22 */ dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) * ((temp_int & 0x80) ? -100 : 100); @@ -145,7 +145,7 @@ static irqreturn_t dht11_handle_irq(int irq, void *data) /* TODO: Consider making the handler safe for IRQ sharing */ if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) { - dht11->edges[dht11->num_edges].ts = ktime_get_real_ns(); + dht11->edges[dht11->num_edges].ts = ktime_get_boot_ns(); dht11->edges[dht11->num_edges++].value = gpio_get_value(dht11->gpio); @@ -164,7 +164,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev, int ret, timeres; mutex_lock(&dht11->lock); - if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_real_ns()) { + if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) { timeres = ktime_get_resolution_ns(); if (DHT11_DATA_BIT_HIGH < 2 * timeres) { dev_err(dht11->dev, "timeresolution %dns too low\n", @@ -279,7 +279,7 @@ static int dht11_probe(struct platform_device *pdev) return -EINVAL; } - dht11->timestamp = ktime_get_real_ns() - DHT11_DATA_VALID_TIME - 1; + dht11->timestamp = ktime_get_boot_ns() - DHT11_DATA_VALID_TIME - 1; dht11->num_edges = -1; platform_set_drvdata(pdev, iio); diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig index 48fbc0bc7e2a..8f8d1370ed8b 100644 --- a/drivers/iio/imu/inv_mpu6050/Kconfig +++ b/drivers/iio/imu/inv_mpu6050/Kconfig @@ -5,9 +5,9 @@ config INV_MPU6050_IIO tristate "Invensense MPU6050 devices" depends on I2C && SYSFS + depends on I2C_MUX select IIO_BUFFER select IIO_TRIGGERED_BUFFER - select I2C_MUX help This driver supports the Invensense MPU6050 devices. This driver can also support MPU6500 in MPU6050 compatibility mode diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 80fbbfd76faf..734a0042de0c 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -349,6 +349,8 @@ EXPORT_SYMBOL_GPL(iio_channel_get); void iio_channel_release(struct iio_channel *channel) { + if (!channel) + return; iio_device_put(channel->indio_dev); kfree(channel); } diff --git a/drivers/iio/light/acpi-als.c b/drivers/iio/light/acpi-als.c index 60537ec0c923..53201d99a16c 100644 --- a/drivers/iio/light/acpi-als.c +++ b/drivers/iio/light/acpi-als.c @@ -54,7 +54,9 @@ static const struct iio_chan_spec acpi_als_channels[] = { .realbits = 32, .storagebits = 32, }, - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + /* _RAW is here for backward ABI compatibility */ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_PROCESSED), }, }; @@ -152,7 +154,7 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev, s32 temp_val; int ret; - if (mask != IIO_CHAN_INFO_RAW) + if ((mask != IIO_CHAN_INFO_PROCESSED) && (mask != IIO_CHAN_INFO_RAW)) return -EINVAL; /* we support only illumination (_ALI) so far. */ diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c index 809a961b9a7f..6bf89d8f3741 100644 --- a/drivers/iio/light/ltr501.c +++ b/drivers/iio/light/ltr501.c @@ -180,7 +180,7 @@ static const struct ltr501_samp_table ltr501_ps_samp_table[] = { {500000, 2000000} }; -static unsigned int ltr501_match_samp_freq(const struct ltr501_samp_table *tab, +static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab, int len, int val, int val2) { int i, freq; diff --git a/drivers/iio/pressure/mpl115.c b/drivers/iio/pressure/mpl115.c index f5ecd6e19f5d..a0d7deeac62f 100644 --- a/drivers/iio/pressure/mpl115.c +++ b/drivers/iio/pressure/mpl115.c @@ -117,7 +117,7 @@ static int mpl115_read_raw(struct iio_dev *indio_dev, *val = ret >> 6; return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: - *val = 605; + *val = -605; *val2 = 750000; return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_SCALE: diff --git a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c index 93e29fb67fa0..db35e04a0637 100644 --- a/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c +++ b/drivers/iio/proximity/pulsedlight-lidar-lite-v2.c @@ -87,7 +87,7 @@ static int lidar_i2c_xfer(struct lidar_data *data, u8 reg, u8 *val, int len) ret = i2c_transfer(client->adapter, msg, 2); - return (ret == 2) ? 0 : ret; + return (ret == 2) ? 0 : -EIO; } static int lidar_smbus_xfer(struct lidar_data *data, u8 reg, u8 *val, int len) diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 6727954ab74b..e8a84d12b7ff 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -1207,7 +1207,6 @@ static void xpad_led_disconnect(struct usb_xpad *xpad) #else static int xpad_led_probe(struct usb_xpad *xpad) { return 0; } static void xpad_led_disconnect(struct usb_xpad *xpad) { } -static void xpad_identify_controller(struct usb_xpad *xpad) { } #endif static int xpad_start_input(struct usb_xpad *xpad) diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c index 4d446d5085aa..c01a1d648f9f 100644 --- a/drivers/input/keyboard/adp5589-keys.c +++ b/drivers/input/keyboard/adp5589-keys.c @@ -235,7 +235,7 @@ struct adp5589_kpad { unsigned short gpimapsize; unsigned extend_cfg; bool is_adp5585; - bool adp5585_support_row5; + bool support_row5; #ifdef CONFIG_GPIOLIB unsigned char gpiomap[ADP5589_MAXGPIO]; bool export_gpio; @@ -485,7 +485,7 @@ static int adp5589_build_gpiomap(struct adp5589_kpad *kpad, if (kpad->extend_cfg & C4_EXTEND_CFG) pin_used[kpad->var->c4_extend_cfg] = true; - if (!kpad->adp5585_support_row5) + if (!kpad->support_row5) pin_used[5] = true; for (i = 0; i < kpad->var->maxgpio; i++) @@ -884,12 +884,13 @@ static int adp5589_probe(struct i2c_client *client, switch (id->driver_data) { case ADP5585_02: - kpad->adp5585_support_row5 = true; + kpad->support_row5 = true; case ADP5585_01: kpad->is_adp5585 = true; kpad->var = &const_adp5585; break; case ADP5589: + kpad->support_row5 = true; kpad->var = &const_adp5589; break; } diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c index 378db10001df..4401be225d64 100644 --- a/drivers/input/keyboard/cap11xx.c +++ b/drivers/input/keyboard/cap11xx.c @@ -304,8 +304,10 @@ static int cap11xx_init_leds(struct device *dev, led->cdev.brightness = LED_OFF; error = of_property_read_u32(child, "reg", ®); - if (error != 0 || reg >= num_leds) + if (error != 0 || reg >= num_leds) { + of_node_put(child); return -EINVAL; + } led->reg = reg; led->priv = priv; @@ -313,8 +315,10 @@ static int cap11xx_init_leds(struct device *dev, INIT_WORK(&led->work, cap11xx_led_work); error = devm_led_classdev_register(dev, &led->cdev); - if (error) + if (error) { + of_node_put(child); return error; + } priv->num_leds++; led++; diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index d6d16fa78281..1f2337abcf2f 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -733,7 +733,7 @@ config INPUT_XEN_KBDDEV_FRONTEND module will be called xen-kbdfront. config INPUT_SIRFSOC_ONKEY - bool "CSR SiRFSoC power on/off/suspend key support" + tristate "CSR SiRFSoC power on/off/suspend key support" depends on ARCH_SIRF && OF default y help diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c index 9d5b89befe6f..ed7237f19539 100644 --- a/drivers/input/misc/sirfsoc-onkey.c +++ b/drivers/input/misc/sirfsoc-onkey.c @@ -101,7 +101,7 @@ static void sirfsoc_pwrc_close(struct input_dev *input) static const struct of_device_id sirfsoc_pwrc_of_match[] = { { .compatible = "sirf,prima2-pwrc" }, {}, -} +}; MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match); static int sirfsoc_pwrc_probe(struct platform_device *pdev) diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c index e272f06258ce..a3f0f5a47490 100644 --- a/drivers/input/mouse/vmmouse.c +++ b/drivers/input/mouse/vmmouse.c @@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse) priv->abs_dev = abs_dev; psmouse->private = priv; - input_set_capability(rel_dev, EV_REL, REL_WHEEL); - /* Set up and register absolute device */ snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys); @@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse) abs_dev->id.version = psmouse->model; abs_dev->dev.parent = &psmouse->ps2dev.serio->dev; - error = input_register_device(priv->abs_dev); - if (error) - goto init_fail; - /* Set absolute device capabilities */ input_set_capability(abs_dev, EV_KEY, BTN_LEFT); input_set_capability(abs_dev, EV_KEY, BTN_RIGHT); @@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse) input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0); input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0); + error = input_register_device(priv->abs_dev); + if (error) + goto init_fail; + + /* Add wheel capability to the relative device */ + input_set_capability(rel_dev, EV_REL, REL_WHEEL); + psmouse->protocol_handler = vmmouse_process_byte; psmouse->disconnect = vmmouse_disconnect; psmouse->reconnect = vmmouse_reconnect; diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 8f828975ab10..1ca7f551e2da 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c @@ -134,7 +134,7 @@ static void serio_find_driver(struct serio *serio) int error; error = device_attach(&serio->dev); - if (error < 0) + if (error < 0 && error != -EPROBE_DEFER) dev_warn(&serio->dev, "device_attach() failed for %s (%s), error: %d\n", serio->phys, serio->name, error); diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c index 5d4903a402cc..69828d015d45 100644 --- a/drivers/input/touchscreen/colibri-vf50-ts.c +++ b/drivers/input/touchscreen/colibri-vf50-ts.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/slab.h> diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 0b0f8c17f3f7..23fbe382da8b 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -822,16 +822,22 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev, int error; error = device_property_read_u32(dev, "threshold", &val); - if (!error) - reg_addr->reg_threshold = val; + if (!error) { + edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val); + tsdata->threshold = val; + } error = device_property_read_u32(dev, "gain", &val); - if (!error) - reg_addr->reg_gain = val; + if (!error) { + edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val); + tsdata->gain = val; + } error = device_property_read_u32(dev, "offset", &val); - if (!error) - reg_addr->reg_offset = val; + if (!error) { + edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val); + tsdata->offset = val; + } } static void diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 546d05f4358a..b2bbe8659bed 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig @@ -81,6 +81,7 @@ config STI_MBOX config MAILBOX_TEST tristate "Mailbox Test Client" depends on OF + depends on HAS_IOMEM help Test client to help with testing new Controller driver implementations. diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 45d85aea9955..8f779a1ec99c 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -81,16 +81,10 @@ static struct mbox_controller pcc_mbox_ctrl = {}; */ static struct mbox_chan *get_pcc_channel(int id) { - struct mbox_chan *pcc_chan; - if (id < 0 || id > pcc_mbox_ctrl.num_chans) return ERR_PTR(-ENOENT); - pcc_chan = (struct mbox_chan *) - (unsigned long) pcc_mbox_channels + - (id * sizeof(*pcc_chan)); - - return pcc_chan; + return &pcc_mbox_channels[id]; } /** diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 4f22e919787a..d80cce499a56 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -210,10 +210,6 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) struct block_device *bdev; struct mddev *mddev = bitmap->mddev; struct bitmap_storage *store = &bitmap->storage; - int node_offset = 0; - - if (mddev_is_clustered(bitmap->mddev)) - node_offset = bitmap->cluster_slot * store->file_pages; while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { int size = PAGE_SIZE; diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 4a8e15058e8b..685aa2d77e25 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -170,7 +170,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode) conf->nfaults = n+1; } -static void make_request(struct mddev *mddev, struct bio *bio) +static void faulty_make_request(struct mddev *mddev, struct bio *bio) { struct faulty_conf *conf = mddev->private; int failit = 0; @@ -226,7 +226,7 @@ static void make_request(struct mddev *mddev, struct bio *bio) generic_make_request(bio); } -static void status(struct seq_file *seq, struct mddev *mddev) +static void faulty_status(struct seq_file *seq, struct mddev *mddev) { struct faulty_conf *conf = mddev->private; int n; @@ -259,7 +259,7 @@ static void status(struct seq_file *seq, struct mddev *mddev) } -static int reshape(struct mddev *mddev) +static int faulty_reshape(struct mddev *mddev) { int mode = mddev->new_layout & ModeMask; int count = mddev->new_layout >> ModeShift; @@ -299,7 +299,7 @@ static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disk return sectors; } -static int run(struct mddev *mddev) +static int faulty_run(struct mddev *mddev) { struct md_rdev *rdev; int i; @@ -327,7 +327,7 @@ static int run(struct mddev *mddev) md_set_array_sectors(mddev, faulty_size(mddev, 0, 0)); mddev->private = conf; - reshape(mddev); + faulty_reshape(mddev); return 0; } @@ -344,11 +344,11 @@ static struct md_personality faulty_personality = .name = "faulty", .level = LEVEL_FAULTY, .owner = THIS_MODULE, - .make_request = make_request, - .run = run, + .make_request = faulty_make_request, + .run = faulty_run, .free = faulty_free, - .status = status, - .check_reshape = reshape, + .status = faulty_status, + .check_reshape = faulty_reshape, .size = faulty_size, }; diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 0ded8e97751d..dd97d4245822 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -293,6 +293,7 @@ static void recover_bitmaps(struct md_thread *thread) dlm_unlock: dlm_unlock_sync(bm_lockres); clear_bit: + lockres_free(bm_lockres); clear_bit(slot, &cinfo->recovery_map); } } @@ -682,8 +683,10 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots) bm_lockres = lockres_init(mddev, str, NULL, 1); if (!bm_lockres) return -ENOMEM; - if (i == (cinfo->slot_number - 1)) + if (i == (cinfo->slot_number - 1)) { + lockres_free(bm_lockres); continue; + } bm_lockres->flags |= DLM_LKF_NOQUEUE; ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); @@ -858,6 +861,7 @@ static int leave(struct mddev *mddev) lockres_free(cinfo->token_lockres); lockres_free(cinfo->ack_lockres); lockres_free(cinfo->no_new_dev_lockres); + lockres_free(cinfo->resync_lockres); lockres_free(cinfo->bitmap_lockres); unlock_all_bitmaps(mddev); dlm_release_lockspace(cinfo->lockspace, 2); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c4b913409226..4e3843f7d245 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1044,7 +1044,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) kfree(plug); } -static void make_request(struct mddev *mddev, struct bio * bio) +static void raid1_make_request(struct mddev *mddev, struct bio * bio) { struct r1conf *conf = mddev->private; struct raid1_info *mirror; @@ -1422,7 +1422,7 @@ read_again: wake_up(&conf->wait_barrier); } -static void status(struct seq_file *seq, struct mddev *mddev) +static void raid1_status(struct seq_file *seq, struct mddev *mddev) { struct r1conf *conf = mddev->private; int i; @@ -1439,7 +1439,7 @@ static void status(struct seq_file *seq, struct mddev *mddev) seq_printf(seq, "]"); } -static void error(struct mddev *mddev, struct md_rdev *rdev) +static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r1conf *conf = mddev->private; @@ -2472,7 +2472,8 @@ static int init_resync(struct r1conf *conf) * that can be installed to exclude normal IO requests. */ -static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) +static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr, + int *skipped) { struct r1conf *conf = mddev->private; struct r1bio *r1_bio; @@ -2890,7 +2891,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) } static void raid1_free(struct mddev *mddev, void *priv); -static int run(struct mddev *mddev) +static int raid1_run(struct mddev *mddev) { struct r1conf *conf; int i; @@ -3170,15 +3171,15 @@ static struct md_personality raid1_personality = .name = "raid1", .level = 1, .owner = THIS_MODULE, - .make_request = make_request, - .run = run, + .make_request = raid1_make_request, + .run = raid1_run, .free = raid1_free, - .status = status, - .error_handler = error, + .status = raid1_status, + .error_handler = raid1_error, .hot_add_disk = raid1_add_disk, .hot_remove_disk= raid1_remove_disk, .spare_active = raid1_spare_active, - .sync_request = sync_request, + .sync_request = raid1_sync_request, .resize = raid1_resize, .size = raid1_size, .check_reshape = raid1_reshape, diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index ce959b4ae4df..1c1447dd3417 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1442,7 +1442,7 @@ retry_write: one_write_done(r10_bio); } -static void make_request(struct mddev *mddev, struct bio *bio) +static void raid10_make_request(struct mddev *mddev, struct bio *bio) { struct r10conf *conf = mddev->private; sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); @@ -1484,7 +1484,7 @@ static void make_request(struct mddev *mddev, struct bio *bio) wake_up(&conf->wait_barrier); } -static void status(struct seq_file *seq, struct mddev *mddev) +static void raid10_status(struct seq_file *seq, struct mddev *mddev) { struct r10conf *conf = mddev->private; int i; @@ -1562,7 +1562,7 @@ static int enough(struct r10conf *conf, int ignore) _enough(conf, 1, ignore); } -static void error(struct mddev *mddev, struct md_rdev *rdev) +static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r10conf *conf = mddev->private; @@ -2802,7 +2802,7 @@ static int init_resync(struct r10conf *conf) * */ -static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, +static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) { struct r10conf *conf = mddev->private; @@ -3523,7 +3523,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) return ERR_PTR(err); } -static int run(struct mddev *mddev) +static int raid10_run(struct mddev *mddev) { struct r10conf *conf; int i, disk_idx, chunk_size; @@ -4617,15 +4617,15 @@ static struct md_personality raid10_personality = .name = "raid10", .level = 10, .owner = THIS_MODULE, - .make_request = make_request, - .run = run, + .make_request = raid10_make_request, + .run = raid10_run, .free = raid10_free, - .status = status, - .error_handler = error, + .status = raid10_status, + .error_handler = raid10_error, .hot_add_disk = raid10_add_disk, .hot_remove_disk= raid10_remove_disk, .spare_active = raid10_spare_active, - .sync_request = sync_request, + .sync_request = raid10_sync_request, .quiesce = raid10_quiesce, .size = raid10_size, .resize = raid10_resize, diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a086014dcd49..b4f02c9959f2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2496,7 +2496,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous) dev->sector = raid5_compute_blocknr(sh, i, previous); } -static void error(struct mddev *mddev, struct md_rdev *rdev) +static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) { char b[BDEVNAME_SIZE]; struct r5conf *conf = mddev->private; @@ -2958,7 +2958,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, * If several bio share a stripe. The bio bi_phys_segments acts as a * reference count to avoid race. The reference count should already be * increased before this function is called (for example, in - * make_request()), so other bio sharing this stripe will not free the + * raid5_make_request()), so other bio sharing this stripe will not free the * stripe. If a stripe is owned by one stripe, the stripe lock will * protect it. */ @@ -5135,7 +5135,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) } } -static void make_request(struct mddev *mddev, struct bio * bi) +static void raid5_make_request(struct mddev *mddev, struct bio * bi) { struct r5conf *conf = mddev->private; int dd_idx; @@ -5225,7 +5225,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) new_sector = raid5_compute_sector(conf, logical_sector, previous, &dd_idx, NULL); - pr_debug("raid456: make_request, sector %llu logical %llu\n", + pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n", (unsigned long long)new_sector, (unsigned long long)logical_sector); @@ -5575,7 +5575,8 @@ ret: return retn; } -static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) +static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr, + int *skipped) { struct r5conf *conf = mddev->private; struct stripe_head *sh; @@ -6674,7 +6675,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded return 0; } -static int run(struct mddev *mddev) +static int raid5_run(struct mddev *mddev) { struct r5conf *conf; int working_disks = 0; @@ -7048,7 +7049,7 @@ static void raid5_free(struct mddev *mddev, void *priv) mddev->to_remove = &raid5_attrs_group; } -static void status(struct seq_file *seq, struct mddev *mddev) +static void raid5_status(struct seq_file *seq, struct mddev *mddev) { struct r5conf *conf = mddev->private; int i; @@ -7864,15 +7865,15 @@ static struct md_personality raid6_personality = .name = "raid6", .level = 6, .owner = THIS_MODULE, - .make_request = make_request, - .run = run, + .make_request = raid5_make_request, + .run = raid5_run, .free = raid5_free, - .status = status, - .error_handler = error, + .status = raid5_status, + .error_handler = raid5_error, .hot_add_disk = raid5_add_disk, .hot_remove_disk= raid5_remove_disk, .spare_active = raid5_spare_active, - .sync_request = sync_request, + .sync_request = raid5_sync_request, .resize = raid5_resize, .size = raid5_size, .check_reshape = raid6_check_reshape, @@ -7887,15 +7888,15 @@ static struct md_personality raid5_personality = .name = "raid5", .level = 5, .owner = THIS_MODULE, - .make_request = make_request, - .run = run, + .make_request = raid5_make_request, + .run = raid5_run, .free = raid5_free, - .status = status, - .error_handler = error, + .status = raid5_status, + .error_handler = raid5_error, .hot_add_disk = raid5_add_disk, .hot_remove_disk= raid5_remove_disk, .spare_active = raid5_spare_active, - .sync_request = sync_request, + .sync_request = raid5_sync_request, .resize = raid5_resize, .size = raid5_size, .check_reshape = raid5_check_reshape, @@ -7911,15 +7912,15 @@ static struct md_personality raid4_personality = .name = "raid4", .level = 4, .owner = THIS_MODULE, - .make_request = make_request, - .run = run, + .make_request = raid5_make_request, + .run = raid5_run, .free = raid5_free, - .status = status, - .error_handler = error, + .status = raid5_status, + .error_handler = raid5_error, .hot_add_disk = raid5_add_disk, .hot_remove_disk= raid5_remove_disk, .spare_active = raid5_spare_active, - .sync_request = sync_request, + .sync_request = raid5_sync_request, .resize = raid5_resize, .size = raid5_size, .check_reshape = raid5_check_reshape, diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c index 0e209b56c76c..c6abeb4fba9d 100644 --- a/drivers/media/dvb-frontends/tda1004x.c +++ b/drivers/media/dvb-frontends/tda1004x.c @@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe) { struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache; struct tda1004x_state* state = fe->demodulator_priv; + int status; dprintk("%s\n", __func__); + status = tda1004x_read_byte(state, TDA1004X_STATUS_CD); + if (status == -1) + return -EIO; + + /* Only update the properties cache if device is locked */ + if (!(status & 8)) + return 0; + // inversion status fe_params->inversion = INVERSION_OFF; if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20) diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index 830491960add..bf82726fd3f4 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c @@ -478,7 +478,6 @@ static const struct i2c_device_id ir_kbd_id[] = { { "ir_rx_z8f0811_hdpvr", 0 }, { } }; -MODULE_DEVICE_TABLE(i2c, ir_kbd_id); static struct i2c_driver ir_kbd_driver = { .driver = { diff --git a/drivers/media/i2c/s5k6a3.c b/drivers/media/i2c/s5k6a3.c index b9e43ffa5085..cbe4711e9b31 100644 --- a/drivers/media/i2c/s5k6a3.c +++ b/drivers/media/i2c/s5k6a3.c @@ -144,8 +144,7 @@ static int s5k6a3_set_fmt(struct v4l2_subdev *sd, mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which); if (mf) { mutex_lock(&sensor->lock); - if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) - *mf = fmt->format; + *mf = fmt->format; mutex_unlock(&sensor->lock); } return 0; diff --git a/drivers/media/pci/saa7134/saa7134-alsa.c b/drivers/media/pci/saa7134/saa7134-alsa.c index 1d2c310ce838..94f816244407 100644 --- a/drivers/media/pci/saa7134/saa7134-alsa.c +++ b/drivers/media/pci/saa7134/saa7134-alsa.c @@ -1211,6 +1211,8 @@ static int alsa_device_init(struct saa7134_dev *dev) static int alsa_device_exit(struct saa7134_dev *dev) { + if (!snd_saa7134_cards[dev->nr]) + return 1; snd_card_free(snd_saa7134_cards[dev->nr]); snd_saa7134_cards[dev->nr] = NULL; @@ -1260,7 +1262,8 @@ static void saa7134_alsa_exit(void) int idx; for (idx = 0; idx < SNDRV_CARDS; idx++) { - snd_card_free(snd_saa7134_cards[idx]); + if (snd_saa7134_cards[idx]) + snd_card_free(snd_saa7134_cards[idx]); } saa7134_dmasound_init = NULL; diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 526359447ff9..8b89ebe16d94 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -215,6 +215,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC config VIDEO_STI_BDISP tristate "STMicroelectronics BDISP 2D blitter driver" depends on VIDEO_DEV && VIDEO_V4L2 + depends on HAS_DMA depends on ARCH_STI || COMPILE_TEST select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig index 40423c6c5324..57d42c6172c5 100644 --- a/drivers/media/platform/exynos4-is/Kconfig +++ b/drivers/media/platform/exynos4-is/Kconfig @@ -1,6 +1,6 @@ config VIDEO_SAMSUNG_EXYNOS4_IS - bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver" + tristate "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver" depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST depends on OF && COMMON_CLK diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c index 49658ca39e51..979c388ebf60 100644 --- a/drivers/media/platform/exynos4-is/fimc-is.c +++ b/drivers/media/platform/exynos4-is/fimc-is.c @@ -631,6 +631,12 @@ static int fimc_is_hw_open_sensor(struct fimc_is *is, fimc_is_mem_barrier(); + /* + * Some user space use cases hang up here without this + * empirically chosen delay. + */ + udelay(100); + mcuctl_write(HIC_OPEN_SENSOR, is, MCUCTL_REG_ISSR(0)); mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1)); mcuctl_write(sensor->drvdata->id, is, MCUCTL_REG_ISSR(2)); diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c index bf9261eb57a1..c0816728cbfe 100644 --- a/drivers/media/platform/exynos4-is/fimc-isp-video.c +++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c @@ -218,8 +218,8 @@ static void isp_video_capture_buffer_queue(struct vb2_buffer *vb) ivb->dma_addr[i]; isp_dbg(2, &video->ve.vdev, - "dma_buf %pad (%d/%d/%d) addr: %pad\n", - &buf_index, ivb->index, i, vb->index, + "dma_buf %d (%d/%d/%d) addr: %pad\n", + buf_index, ivb->index, i, vb->index, &ivb->dma_addr[i]); } diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c index f3b2dd30ec77..e79ddbb1e14f 100644 --- a/drivers/media/platform/exynos4-is/media-dev.c +++ b/drivers/media/platform/exynos4-is/media-dev.c @@ -186,6 +186,37 @@ error: } /** + * __fimc_pipeline_enable - enable power of all pipeline subdevs + * and the sensor clock + * @ep: video pipeline structure + * @fmd: fimc media device + * + * Called with the graph mutex held. + */ +static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep, + struct fimc_md *fmd) +{ + struct fimc_pipeline *p = to_fimc_pipeline(ep); + int ret; + + /* Enable PXLASYNC clock if this pipeline includes FIMC-IS */ + if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) { + ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]); + if (ret < 0) + return ret; + } + + ret = fimc_pipeline_s_power(p, 1); + if (!ret) + return 0; + + if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) + clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]); + + return ret; +} + +/** * __fimc_pipeline_open - update the pipeline information, enable power * of all pipeline subdevs and the sensor clock * @me: media entity to start graph walk with @@ -199,7 +230,6 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep, struct fimc_md *fmd = entity_to_fimc_mdev(me); struct fimc_pipeline *p = to_fimc_pipeline(ep); struct v4l2_subdev *sd; - int ret; if (WARN_ON(p == NULL || me == NULL)) return -EINVAL; @@ -208,24 +238,16 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep, fimc_pipeline_prepare(p, me); sd = p->subdevs[IDX_SENSOR]; - if (sd == NULL) - return -EINVAL; - - /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */ - if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) { - ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]); - if (ret < 0) - return ret; - } - - ret = fimc_pipeline_s_power(p, 1); - if (!ret) + if (sd == NULL) { + pr_warn("%s(): No sensor subdev\n", __func__); + /* + * Pipeline open cannot fail so as to make it possible + * for the user space to configure the pipeline. + */ return 0; + } - if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) - clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]); - - return ret; + return __fimc_pipeline_enable(ep, fmd); } /** @@ -269,10 +291,43 @@ static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on) { IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP }, }; struct fimc_pipeline *p = to_fimc_pipeline(ep); + struct fimc_md *fmd = entity_to_fimc_mdev(&p->subdevs[IDX_CSIS]->entity); + enum fimc_subdev_index sd_id; int i, ret = 0; - if (p->subdevs[IDX_SENSOR] == NULL) - return -ENODEV; + if (p->subdevs[IDX_SENSOR] == NULL) { + if (!fmd->user_subdev_api) { + /* + * Sensor must be already discovered if we + * aren't in the user_subdev_api mode + */ + return -ENODEV; + } + + /* Get pipeline sink entity */ + if (p->subdevs[IDX_FIMC]) + sd_id = IDX_FIMC; + else if (p->subdevs[IDX_IS_ISP]) + sd_id = IDX_IS_ISP; + else if (p->subdevs[IDX_FLITE]) + sd_id = IDX_FLITE; + else + return -ENODEV; + + /* + * Sensor could have been linked between open and STREAMON - + * check if this is the case. + */ + fimc_pipeline_prepare(p, &p->subdevs[sd_id]->entity); + + if (p->subdevs[IDX_SENSOR] == NULL) + return -ENODEV; + + ret = __fimc_pipeline_enable(ep, fmd); + if (ret < 0) + return ret; + + } for (i = 0; i < IDX_MAX; i++) { unsigned int idx = seq[on][i]; @@ -282,8 +337,10 @@ static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on) if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) goto error; } + return 0; error: + fimc_pipeline_s_power(p, !on); for (; i >= 0; i--) { unsigned int idx = seq[on][i]; v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on); diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c index c398b285180c..1af779ee3c74 100644 --- a/drivers/media/platform/soc_camera/atmel-isi.c +++ b/drivers/media/platform/soc_camera/atmel-isi.c @@ -795,7 +795,7 @@ static int isi_camera_get_formats(struct soc_camera_device *icd, xlate->host_fmt = &isi_camera_formats[i]; xlate->code = code.code; dev_dbg(icd->parent, "Providing format %s using code %d\n", - isi_camera_formats[0].name, code.code); + xlate->host_fmt->name, xlate->code); } break; default: diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c index cc84c6d6a701..46c7186f7867 100644 --- a/drivers/media/platform/soc_camera/soc_camera.c +++ b/drivers/media/platform/soc_camera/soc_camera.c @@ -1493,6 +1493,8 @@ static void soc_camera_async_unbind(struct v4l2_async_notifier *notifier, struct soc_camera_async_client, notifier); struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev); + icd->control = NULL; + if (icd->clk) { v4l2_clk_unregister(icd->clk); icd->clk = NULL; diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c index 42dff9d020af..533bc796391e 100644 --- a/drivers/media/platform/vsp1/vsp1_drv.c +++ b/drivers/media/platform/vsp1/vsp1_drv.c @@ -256,7 +256,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) /* Create links. */ list_for_each_entry(entity, &vsp1->entities, list_dev) { - if (entity->type == VSP1_ENTITY_LIF) { + if (entity->type == VSP1_ENTITY_WPF) { ret = vsp1_wpf_create_links(vsp1, entity); if (ret < 0) goto done; @@ -264,7 +264,10 @@ static int vsp1_create_entities(struct vsp1_device *vsp1) ret = vsp1_rpf_create_links(vsp1, entity); if (ret < 0) goto done; - } else { + } + + if (entity->type != VSP1_ENTITY_LIF && + entity->type != VSP1_ENTITY_RPF) { ret = vsp1_create_links(vsp1, entity); if (ret < 0) goto done; diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c index 637d0d6f79fb..b4dca57d1ae3 100644 --- a/drivers/media/platform/vsp1/vsp1_video.c +++ b/drivers/media/platform/vsp1/vsp1_video.c @@ -515,7 +515,7 @@ static bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe) bool stopped; spin_lock_irqsave(&pipe->irqlock, flags); - stopped = pipe->state == VSP1_PIPELINE_STOPPED, + stopped = pipe->state == VSP1_PIPELINE_STOPPED; spin_unlock_irqrestore(&pipe->irqlock, flags); return stopped; diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index c5d49d7a0d76..ff8953ae52d1 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -1063,8 +1063,11 @@ EXPORT_SYMBOL_GPL(vb2_discard_done); */ static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb) { - int ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, - vb, pb, vb->planes); + int ret = 0; + + if (pb) + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, + vb, pb, vb->planes); return ret ? ret : call_vb_qop(vb, buf_prepare, vb); } @@ -1077,14 +1080,16 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb) struct vb2_queue *q = vb->vb2_queue; void *mem_priv; unsigned int plane; - int ret; + int ret = 0; enum dma_data_direction dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); /* Copy relevant information provided by the userspace */ - ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes); + if (pb) + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, + vb, pb, planes); if (ret) return ret; @@ -1192,14 +1197,16 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb) struct vb2_queue *q = vb->vb2_queue; void *mem_priv; unsigned int plane; - int ret; + int ret = 0; enum dma_data_direction dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; bool reacquired = vb->planes[0].mem_priv == NULL; memset(planes, 0, sizeof(planes[0]) * vb->num_planes); /* Copy relevant information provided by the userspace */ - ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes); + if (pb) + ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, + vb, pb, planes); if (ret) return ret; @@ -1520,7 +1527,8 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) q->waiting_for_buffers = false; vb->state = VB2_BUF_STATE_QUEUED; - call_void_bufop(q, copy_timestamp, vb, pb); + if (pb) + call_void_bufop(q, copy_timestamp, vb, pb); trace_vb2_qbuf(q, vb); @@ -1532,7 +1540,8 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb) __enqueue_in_driver(vb); /* Fill buffer information for the userspace */ - call_void_bufop(q, fill_user_buffer, vb, pb); + if (pb) + call_void_bufop(q, fill_user_buffer, vb, pb); /* * If streamon has been called, and we haven't yet called @@ -1731,7 +1740,8 @@ static void __vb2_dqbuf(struct vb2_buffer *vb) * The return values from this function are intended to be directly returned * from vidioc_dqbuf handler in driver. */ -int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking) +int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, + bool nonblocking) { struct vb2_buffer *vb = NULL; int ret; @@ -1754,8 +1764,12 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking) call_void_vb_qop(vb, buf_finish, vb); + if (pindex) + *pindex = vb->index; + /* Fill buffer information for the userspace */ - call_void_bufop(q, fill_user_buffer, vb, pb); + if (pb) + call_void_bufop(q, fill_user_buffer, vb, pb); /* Remove from videobuf queue */ list_del(&vb->queued_entry); @@ -1828,7 +1842,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q) * that's done in dqbuf, but that's not going to happen when we * cancel the whole queue. Note: this code belongs here, not in * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical - * call to __fill_v4l2_buffer() after buf_finish(). That order can't + * call to __fill_user_buffer() after buf_finish(). That order can't * be changed, so we can't move the buf_finish() to __vb2_dqbuf(). */ for (i = 0; i < q->num_buffers; ++i) { @@ -2357,7 +2371,6 @@ struct vb2_fileio_data { unsigned int count; unsigned int type; unsigned int memory; - struct vb2_buffer *b; struct vb2_fileio_buf bufs[VB2_MAX_FRAME]; unsigned int cur_index; unsigned int initial_index; @@ -2410,12 +2423,6 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) if (fileio == NULL) return -ENOMEM; - fileio->b = kzalloc(q->buf_struct_size, GFP_KERNEL); - if (fileio->b == NULL) { - kfree(fileio); - return -ENOMEM; - } - fileio->read_once = q->fileio_read_once; fileio->write_immediately = q->fileio_write_immediately; @@ -2460,13 +2467,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) * Queue all buffers. */ for (i = 0; i < q->num_buffers; i++) { - struct vb2_buffer *b = fileio->b; - - memset(b, 0, q->buf_struct_size); - b->type = q->type; - b->memory = q->memory; - b->index = i; - ret = vb2_core_qbuf(q, i, b); + ret = vb2_core_qbuf(q, i, NULL); if (ret) goto err_reqbufs; fileio->bufs[i].queued = 1; @@ -2511,7 +2512,6 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q) q->fileio = NULL; fileio->count = 0; vb2_core_reqbufs(q, fileio->memory, &fileio->count); - kfree(fileio->b); kfree(fileio); dprintk(3, "file io emulator closed\n"); } @@ -2539,7 +2539,8 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ * else is able to provide this information with the write() operation. */ bool copy_timestamp = !read && q->copy_timestamp; - int ret, index; + unsigned index; + int ret; dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n", read ? "read" : "write", (long)*ppos, count, @@ -2564,22 +2565,20 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ */ index = fileio->cur_index; if (index >= q->num_buffers) { - struct vb2_buffer *b = fileio->b; + struct vb2_buffer *b; /* * Call vb2_dqbuf to get buffer back. */ - memset(b, 0, q->buf_struct_size); - b->type = q->type; - b->memory = q->memory; - ret = vb2_core_dqbuf(q, b, nonblock); + ret = vb2_core_dqbuf(q, &index, NULL, nonblock); dprintk(5, "vb2_dqbuf result: %d\n", ret); if (ret) return ret; fileio->dq_count += 1; - fileio->cur_index = index = b->index; + fileio->cur_index = index; buf = &fileio->bufs[index]; + b = q->bufs[index]; /* * Get number of bytes filled by the driver @@ -2630,7 +2629,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ * Queue next buffer if required. */ if (buf->pos == buf->size || (!read && fileio->write_immediately)) { - struct vb2_buffer *b = fileio->b; + struct vb2_buffer *b = q->bufs[index]; /* * Check if this is the last buffer to read. @@ -2643,15 +2642,11 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_ /* * Call vb2_qbuf and give buffer to the driver. */ - memset(b, 0, q->buf_struct_size); - b->type = q->type; - b->memory = q->memory; - b->index = index; b->planes[0].bytesused = buf->pos; if (copy_timestamp) b->timestamp = ktime_get_ns(); - ret = vb2_core_qbuf(q, index, b); + ret = vb2_core_qbuf(q, index, NULL); dprintk(5, "vb2_dbuf result: %d\n", ret); if (ret) return ret; @@ -2713,10 +2708,9 @@ static int vb2_thread(void *data) { struct vb2_queue *q = data; struct vb2_threadio_data *threadio = q->threadio; - struct vb2_fileio_data *fileio = q->fileio; bool copy_timestamp = false; - int prequeue = 0; - int index = 0; + unsigned prequeue = 0; + unsigned index = 0; int ret = 0; if (q->is_output) { @@ -2728,37 +2722,34 @@ static int vb2_thread(void *data) for (;;) { struct vb2_buffer *vb; - struct vb2_buffer *b = fileio->b; /* * Call vb2_dqbuf to get buffer back. */ - memset(b, 0, q->buf_struct_size); - b->type = q->type; - b->memory = q->memory; if (prequeue) { - b->index = index++; + vb = q->bufs[index++]; prequeue--; } else { call_void_qop(q, wait_finish, q); if (!threadio->stop) - ret = vb2_core_dqbuf(q, b, 0); + ret = vb2_core_dqbuf(q, &index, NULL, 0); call_void_qop(q, wait_prepare, q); dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); + if (!ret) + vb = q->bufs[index]; } if (ret || threadio->stop) break; try_to_freeze(); - vb = q->bufs[b->index]; - if (b->state == VB2_BUF_STATE_DONE) + if (vb->state != VB2_BUF_STATE_ERROR) if (threadio->fnc(vb, threadio->priv)) break; call_void_qop(q, wait_finish, q); if (copy_timestamp) - b->timestamp = ktime_get_ns();; + vb->timestamp = ktime_get_ns();; if (!threadio->stop) - ret = vb2_core_qbuf(q, b->index, b); + ret = vb2_core_qbuf(q, vb->index, NULL); call_void_qop(q, wait_prepare, q); if (ret || threadio->stop) break; diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c index c9a28605511a..91f552124050 100644 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c @@ -625,7 +625,7 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, return -EINVAL; } - ret = vb2_core_dqbuf(q, b, nonblocking); + ret = vb2_core_dqbuf(q, NULL, b, nonblocking); return ret; } diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index e6e4bacb09ee..12099b09a9a7 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -2048,6 +2048,7 @@ int db8500_prcmu_config_hotmon(u8 low, u8 high) return 0; } +EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon); static int config_hot_period(u16 val) { @@ -2074,11 +2075,13 @@ int db8500_prcmu_start_temp_sense(u16 cycles32k) return config_hot_period(cycles32k); } +EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense); int db8500_prcmu_stop_temp_sense(void) { return config_hot_period(0xFFFF); } +EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense); static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3) { diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 5914263090fc..fe207e542032 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -47,13 +47,10 @@ #include "queue.h" MODULE_ALIAS("mmc:block"); - -#ifdef KERNEL #ifdef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX #endif #define MODULE_PARAM_PREFIX "mmcblk." -#endif #define INAND_CMD38_ARG_EXT_CSD 113 #define INAND_CMD38_ARG_ERASE 0x00 @@ -655,8 +652,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, } md = mmc_blk_get(bdev->bd_disk); - if (!md) + if (!md) { + err = -EINVAL; goto cmd_err; + } card = md->queue.card; if (IS_ERR(card)) { diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 1c1b45ef3faf..3446097a43c0 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -925,6 +925,10 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd, dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, PAGE_SIZE, dir); + if (dma_mapping_error(dma_dev, dma_addr)) { + data->error = -EFAULT; + break; + } if (direction == DMA_TO_DEVICE) t->tx_dma = dma_addr + sg->offset; else @@ -1393,10 +1397,12 @@ static int mmc_spi_probe(struct spi_device *spi) host->dma_dev = dev; host->ones_dma = dma_map_single(dev, ones, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); + if (dma_mapping_error(dev, host->ones_dma)) + goto fail_ones_dma; host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data), DMA_BIDIRECTIONAL); - - /* REVISIT in theory those map operations can fail... */ + if (dma_mapping_error(dev, host->data_dma)) + goto fail_data_dma; dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*host->data), @@ -1462,6 +1468,11 @@ fail_glue_init: if (host->dma_dev) dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); +fail_data_dma: + if (host->dma_dev) + dma_unmap_single(host->dma_dev, host->ones_dma, + MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); +fail_ones_dma: kfree(host->data); fail_nobuf1: diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index ce08896b9d69..da824772bbb4 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -86,7 +86,7 @@ struct pxamci_host { static inline void pxamci_init_ocr(struct pxamci_host *host) { #ifdef CONFIG_REGULATOR - host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc"); + host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc"); if (IS_ERR(host->vcc)) host->vcc = NULL; @@ -654,12 +654,8 @@ static int pxamci_probe(struct platform_device *pdev) r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (!r || irq < 0) - return -ENXIO; - - r = request_mem_region(r->start, SZ_4K, DRIVER_NAME); - if (!r) - return -EBUSY; + if (irq < 0) + return irq; mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev); if (!mmc) { @@ -695,7 +691,7 @@ static int pxamci_probe(struct platform_device *pdev) host->pdata = pdev->dev.platform_data; host->clkrt = CLKRT_OFF; - host->clk = clk_get(&pdev->dev, NULL); + host->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); host->clk = NULL; @@ -727,9 +723,9 @@ static int pxamci_probe(struct platform_device *pdev) host->irq = irq; host->imask = MMC_I_MASK_ALL; - host->base = ioremap(r->start, SZ_4K); - if (!host->base) { - ret = -ENOMEM; + host->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); goto out; } @@ -742,7 +738,8 @@ static int pxamci_probe(struct platform_device *pdev) writel(64, host->base + MMC_RESTO); writel(host->imask, host->base + MMC_I_MASK); - ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); + ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0, + DRIVER_NAME, host); if (ret) goto out; @@ -804,7 +801,7 @@ static int pxamci_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); goto out; } else { - mmc->caps |= host->pdata->gpio_card_ro_invert ? + mmc->caps2 |= host->pdata->gpio_card_ro_invert ? 0 : MMC_CAP2_RO_ACTIVE_HIGH; } @@ -833,14 +830,9 @@ out: dma_release_channel(host->dma_chan_rx); if (host->dma_chan_tx) dma_release_channel(host->dma_chan_tx); - if (host->base) - iounmap(host->base); - if (host->clk) - clk_put(host->clk); } if (mmc) mmc_free_host(mmc); - release_resource(r); return ret; } @@ -859,9 +851,6 @@ static int pxamci_remove(struct platform_device *pdev) gpio_ro = host->pdata->gpio_card_ro; gpio_power = host->pdata->gpio_power; } - if (host->vcc) - regulator_put(host->vcc); - if (host->pdata && host->pdata->exit) host->pdata->exit(&pdev->dev, mmc); @@ -870,16 +859,10 @@ static int pxamci_remove(struct platform_device *pdev) END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, host->base + MMC_I_MASK); - free_irq(host->irq, host); dmaengine_terminate_all(host->dma_chan_rx); dmaengine_terminate_all(host->dma_chan_tx); dma_release_channel(host->dma_chan_rx); dma_release_channel(host->dma_chan_tx); - iounmap(host->base); - - clk_put(host->clk); - - release_resource(host->res); mmc_free_host(mmc); } diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index f6047fc94062..a5cda926d38e 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { .ops = &sdhci_acpi_ops_int, }; +static int bxt_get_cd(struct mmc_host *mmc) +{ + int gpio_cd = mmc_gpio_get_cd(mmc); + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + int ret = 0; + + if (!gpio_cd) + return 0; + + pm_runtime_get_sync(mmc->parent); + + spin_lock_irqsave(&host->lock, flags); + + if (host->flags & SDHCI_DEVICE_DEAD) + goto out; + + ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +out: + spin_unlock_irqrestore(&host->lock, flags); + + pm_runtime_mark_last_busy(mmc->parent); + pm_runtime_put_autosuspend(mmc->parent); + + return ret; +} + static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev, const char *hid, const char *uid) { @@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev, /* Platform specific code during sd probe slot goes here */ + if (hid && !strcmp(hid, "80865ACA")) + host->mmc_host_ops.get_cd = bxt_get_cd; + return 0; } diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 7e7d8f0c9438..9cb86fb25976 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -217,6 +217,7 @@ static int sdhci_at91_probe(struct platform_device *pdev) pm_runtime_disable: pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); clocks_disable_unprepare: clk_disable_unprepare(priv->gck); clk_disable_unprepare(priv->mainck); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index cc851b065d0a..df3b8eced8c4 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host) sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf); } +static int bxt_get_cd(struct mmc_host *mmc) +{ + int gpio_cd = mmc_gpio_get_cd(mmc); + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + int ret = 0; + + if (!gpio_cd) + return 0; + + pm_runtime_get_sync(mmc->parent); + + spin_lock_irqsave(&host->lock, flags); + + if (host->flags & SDHCI_DEVICE_DEAD) + goto out; + + ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +out: + spin_unlock_irqrestore(&host->lock, flags); + + pm_runtime_mark_last_busy(mmc->parent); + pm_runtime_put_autosuspend(mmc->parent); + + return ret; +} + static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | @@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) slot->cd_con_id = NULL; slot->cd_idx = 0; slot->cd_override_level = true; + if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || + slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) + slot->host->mmc_host_ops.get_cd = bxt_get_cd; + return 0; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index d622435d1bcc..add9fdfd1d8f 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1360,7 +1360,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_runtime_pm_get(host); /* Firstly check card presence */ - present = sdhci_do_get_cd(host); + present = mmc->ops->get_cd(mmc); spin_lock_irqsave(&host->lock, flags); @@ -2849,6 +2849,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev, host = mmc_priv(mmc); host->mmc = mmc; + host->mmc_host_ops = sdhci_ops; + mmc->ops = &host->mmc_host_ops; return host; } @@ -3037,7 +3039,6 @@ int sdhci_add_host(struct sdhci_host *host) /* * Set host parameters. */ - mmc->ops = &sdhci_ops; max_clk = host->max_clk; if (host->ops->get_min_clock) diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 7654ae5d2b4e..0115e9907bf8 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -430,6 +430,7 @@ struct sdhci_host { /* Internal data */ struct mmc_host *mmc; /* MMC structure */ + struct mmc_host_ops mmc_host_ops; /* MMC host ops */ u64 dma_mask; /* custom DMA mask */ #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 1ca8a1359cbc..6234eab38ff3 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -445,7 +445,7 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host) pdata->slave_id_rx); } else { host->chan_tx = dma_request_slave_channel(dev, "tx"); - host->chan_tx = dma_request_slave_channel(dev, "rx"); + host->chan_rx = dma_request_slave_channel(dev, "rx"); } dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, host->chan_rx); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 9fe33fc3c2b9..cf34681af4f6 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -1532,7 +1532,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, /* no PVID with ranges, otherwise it's a bug */ if (pvid) - err = _mv88e6xxx_port_pvid_set(ds, port, vid); + err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end); unlock: mutex_unlock(&ps->smi_mutex); @@ -2163,7 +2163,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) * database, and allow every port to egress frames on all other ports. */ reg = BIT(ps->num_ports) - 1; /* all ports */ - ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port); + reg &= ~BIT(port); /* except itself */ + ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg); if (ret) goto abort; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index a4799c1fc7d4..5eb9b20c0eea 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -628,6 +628,7 @@ static int xgene_enet_register_irq(struct net_device *ndev) int ret; ring = pdata->rx_ring; + irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, IRQF_SHARED, ring->irq_name, ring); if (ret) @@ -635,6 +636,7 @@ static int xgene_enet_register_irq(struct net_device *ndev) if (pdata->cq_cnt) { ring = pdata->tx_ring->cp_ring; + irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq, IRQF_SHARED, ring->irq_name, ring); if (ret) { @@ -649,15 +651,19 @@ static int xgene_enet_register_irq(struct net_device *ndev) static void xgene_enet_free_irq(struct net_device *ndev) { struct xgene_enet_pdata *pdata; + struct xgene_enet_desc_ring *ring; struct device *dev; pdata = netdev_priv(ndev); dev = ndev_to_dev(ndev); - devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring); + ring = pdata->rx_ring; + irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + devm_free_irq(dev, ring->irq, ring); if (pdata->cq_cnt) { - devm_free_irq(dev, pdata->tx_ring->cp_ring->irq, - pdata->tx_ring->cp_ring); + ring = pdata->tx_ring->cp_ring; + irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY); + devm_free_irq(dev, ring->irq, ring); } } diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h index 70d5b62c125a..248dfc40a761 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h @@ -25,6 +25,7 @@ #include <linux/acpi.h> #include <linux/clk.h> #include <linux/efi.h> +#include <linux/irq.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/of_net.h> diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c index ecc4a334c507..f71ab2647a3b 100644 --- a/drivers/net/ethernet/aurora/nb8800.c +++ b/drivers/net/ethernet/aurora/nb8800.c @@ -302,7 +302,7 @@ static int nb8800_poll(struct napi_struct *napi, int budget) nb8800_tx_done(dev); again: - while (work < budget) { + do { struct nb8800_rx_buf *rxb; unsigned int len; @@ -330,7 +330,7 @@ again: rxd->report = 0; last = next; work++; - } + } while (work < budget); if (work) { priv->rx_descs[last].desc.config |= DESC_EOC; diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 8550df189ceb..19f7cd02e085 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -151,8 +151,11 @@ config BNX2X_VXLAN config BGMAC tristate "BCMA bus GBit core support" - depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X) + depends on BCMA && BCMA_HOST_SOC + depends on HAS_DMA + depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST select PHYLIB + select FIXED_PHY ---help--- This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus. They can be found on BCM47xx SoCs and provide gigabit ethernet. diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index df835f5e46d8..5dc89e527e7d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1490,10 +1490,11 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) last = tx_buf->nr_frags; j += 2; - for (k = 0; k < last; k++, j = NEXT_TX(j)) { + for (k = 0; k < last; k++, j++) { + int ring_idx = j & bp->tx_ring_mask; skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; - tx_buf = &txr->tx_buf_ring[j]; + tx_buf = &txr->tx_buf_ring[ring_idx]; dma_unmap_page( &pdev->dev, dma_unmap_addr(tx_buf, mapping), @@ -3406,7 +3407,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; u16 error_code; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1); + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); req.ring_type = ring_type; req.ring_id = cpu_to_le16(ring->fw_ring_id); @@ -4819,8 +4820,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts); - stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts); - stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 0d775964b060..457c3bc8cfff 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -401,7 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev) * Ethernet MAC ISRs */ if (priv->internal_phy) - priv->mii_bus->irq[phydev->mdio.addr] = PHY_IGNORE_INTERRUPT; + priv->phydev->irq = PHY_IGNORE_INTERRUPT; return 0; } diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 9293675df7ba..3010080cfeee 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7831,6 +7831,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, return ret; } +static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) +{ + /* Check if we will never have enough descriptors, + * as gso_segs can be more than current ring size + */ + return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; +} + static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); /* Use GSO to workaround all TSO packets that meet HW bug conditions @@ -7934,14 +7942,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) * vlan encapsulated. */ if (skb->protocol == htons(ETH_P_8021Q) || - skb->protocol == htons(ETH_P_8021AD)) - return tg3_tso_bug(tp, tnapi, txq, skb); + skb->protocol == htons(ETH_P_8021AD)) { + if (tg3_tso_bug_gso_check(tnapi, skb)) + return tg3_tso_bug(tp, tnapi, txq, skb); + goto drop; + } if (!skb_is_gso_v6(skb)) { if (unlikely((ETH_HLEN + hdr_len) > 80) && - tg3_flag(tp, TSO_BUG)) - return tg3_tso_bug(tp, tnapi, txq, skb); - + tg3_flag(tp, TSO_BUG)) { + if (tg3_tso_bug_gso_check(tnapi, skb)) + return tg3_tso_bug(tp, tnapi, txq, skb); + goto drop; + } ip_csum = iph->check; ip_tot_len = iph->tot_len; iph->check = 0; @@ -8073,7 +8086,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if (would_hit_hwbug) { tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); - if (mss) { + if (mss && tg3_tso_bug_gso_check(tnapi, skb)) { /* If it's a TSO packet, do GSO instead of * allocating and copying to a large linear SKB */ @@ -12016,7 +12029,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, int ret; u32 offset, len, b_offset, odd_len; u8 *buf; - __be32 start, end; + __be32 start = 0, end; if (tg3_flag(tp, NO_NVRAM) || eeprom->magic != TG3_EEPROM_MAGIC) diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 9d9984a87d42..50c94104f19c 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2823,7 +2823,7 @@ static int macb_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct device_node *phy_node; const struct macb_config *macb_config = NULL; - struct clk *pclk, *hclk, *tx_clk; + struct clk *pclk, *hclk = NULL, *tx_clk = NULL; unsigned int queue_mask, num_queues; struct macb_platform_data *pdata; bool native_io; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index b89504405b72..872765527081 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1526,7 +1526,6 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { u64 ns; - u32 remainder; unsigned long flags; struct lio *lio = container_of(ptp, struct lio, ptp_info); struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; @@ -1536,8 +1535,7 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, ns += lio->ptp_adjust; spin_unlock_irqrestore(&lio->ptp_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h index 1671fa3332c2..7ba6d530b0c0 100644 --- a/drivers/net/ethernet/cisco/enic/enic.h +++ b/drivers/net/ethernet/cisco/enic/enic.h @@ -33,7 +33,7 @@ #define DRV_NAME "enic" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" -#define DRV_VERSION "2.3.0.12" +#define DRV_VERSION "2.3.0.20" #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" #define ENIC_BARS_MAX 6 diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 1ffd1050860b..1fdf5fe12a95 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) { struct devcmd2_controller *dc2c = vdev->devcmd2; - struct devcmd2_result *result = dc2c->result + dc2c->next_result; + struct devcmd2_result *result; + u8 color; unsigned int i; int delay, err; u32 fetch_index, new_posted; @@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) return 0; + result = dc2c->result + dc2c->next_result; + color = dc2c->color; + + dc2c->next_result++; + if (dc2c->next_result == dc2c->result_size) { + dc2c->next_result = 0; + dc2c->color = dc2c->color ? 0 : 1; + } + for (delay = 0; delay < wait; delay++) { - if (result->color == dc2c->color) { - dc2c->next_result++; - if (dc2c->next_result == dc2c->result_size) { - dc2c->next_result = 0; - dc2c->color = dc2c->color ? 0 : 1; - } + if (result->color == color) { if (result->error) { err = result->error; if (err != ERR_ECMDUNKNOWN || diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig index 48ecbc8aaaea..b423ad380b6a 100644 --- a/drivers/net/ethernet/ezchip/Kconfig +++ b/drivers/net/ethernet/ezchip/Kconfig @@ -18,6 +18,7 @@ if NET_VENDOR_EZCHIP config EZCHIP_NPS_MANAGEMENT_ENET tristate "EZchip NPS management enet support" depends on OF_IRQ && OF_NET + depends on HAS_IOMEM ---help--- Simple LAN device for debug or management purposes. Device supports interrupts for RX and TX(completion). diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile index 4097c58d17a7..cbe21dc7e37e 100644 --- a/drivers/net/ethernet/freescale/Makefile +++ b/drivers/net/ethernet/freescale/Makefile @@ -4,6 +4,9 @@ obj-$(CONFIG_FEC) += fec.o fec-objs :=fec_main.o fec_ptp.o +CFLAGS_fec_main.o := -D__CHECK_ENDIAN__ +CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__ + obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 99d33e2d35e6..2106d72c91dc 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -19,8 +19,7 @@ #include <linux/timecounter.h> #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ - defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) /* * Just figures, Motorola would have to change the offsets for * registers in the same peripheral device on different models @@ -190,28 +189,45 @@ /* * Define the buffer descriptor structure. + * + * Evidently, ARM SoCs have the FEC block generated in a + * little endian mode so adjust endianness accordingly. */ -#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) +#if defined(CONFIG_ARM) +#define fec32_to_cpu le32_to_cpu +#define fec16_to_cpu le16_to_cpu +#define cpu_to_fec32 cpu_to_le32 +#define cpu_to_fec16 cpu_to_le16 +#define __fec32 __le32 +#define __fec16 __le16 + struct bufdesc { - unsigned short cbd_datlen; /* Data length */ - unsigned short cbd_sc; /* Control and status info */ - unsigned long cbd_bufaddr; /* Buffer address */ + __fec16 cbd_datlen; /* Data length */ + __fec16 cbd_sc; /* Control and status info */ + __fec32 cbd_bufaddr; /* Buffer address */ }; #else +#define fec32_to_cpu be32_to_cpu +#define fec16_to_cpu be16_to_cpu +#define cpu_to_fec32 cpu_to_be32 +#define cpu_to_fec16 cpu_to_be16 +#define __fec32 __be32 +#define __fec16 __be16 + struct bufdesc { - unsigned short cbd_sc; /* Control and status info */ - unsigned short cbd_datlen; /* Data length */ - unsigned long cbd_bufaddr; /* Buffer address */ + __fec16 cbd_sc; /* Control and status info */ + __fec16 cbd_datlen; /* Data length */ + __fec32 cbd_bufaddr; /* Buffer address */ }; #endif struct bufdesc_ex { struct bufdesc desc; - unsigned long cbd_esc; - unsigned long cbd_prot; - unsigned long cbd_bdu; - unsigned long ts; - unsigned short res0[4]; + __fec32 cbd_esc; + __fec32 cbd_prot; + __fec32 cbd_bdu; + __fec32 ts; + __fec16 res0[4]; }; /* diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 502da6f48f95..41c81f6ec630 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev) bdp = txq->tx_bd_base; do { - pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", + pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", index, bdp == txq->cur_tx ? 'S' : ' ', bdp == txq->dirty_tx ? 'H' : ' ', - bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, + fec16_to_cpu(bdp->cbd_sc), + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), txq->tx_skbuff[index]); bdp = fec_enet_get_nextdesc(bdp, fep, 0); index++; @@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, bdp = fec_enet_get_nextdesc(bdp, fep, queue); ebdp = (struct bufdesc_ex *)bdp; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); frag_len = skb_shinfo(skb)->frags[frag].size; @@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; @@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, goto dma_mapping_error; } - bdp->cbd_bufaddr = addr; - bdp->cbd_datlen = frag_len; - bdp->cbd_sc = status; + bdp->cbd_bufaddr = cpu_to_fec32(addr); + bdp->cbd_datlen = cpu_to_fec16(frag_len); + bdp->cbd_sc = cpu_to_fec16(status); } return bdp; @@ -445,8 +447,8 @@ dma_mapping_error: bdp = txq->cur_tx; for (i = 0; i < frag; i++) { bdp = fec_enet_get_nextdesc(bdp, fep, queue); - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - bdp->cbd_datlen, DMA_TO_DEVICE); + dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); } return ERR_PTR(-ENOMEM); } @@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, /* Fill in a Tx ring entry */ bdp = txq->cur_tx; last_bdp = bdp; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; /* Set buffer length and buffer pointer */ @@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); /* Save skb pointer */ txq->tx_skbuff[index] = skb; - bdp->cbd_datlen = buflen; - bdp->cbd_bufaddr = addr; + bdp->cbd_datlen = cpu_to_fec16(buflen); + bdp->cbd_bufaddr = cpu_to_fec32(addr); /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); /* If this was the last BD in the ring, start at the beginning again. */ bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); @@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, unsigned int estatus = 0; dma_addr_t addr; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); @@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, return NETDEV_TX_BUSY; } - bdp->cbd_datlen = size; - bdp->cbd_bufaddr = addr; + bdp->cbd_datlen = cpu_to_fec16(size); + bdp->cbd_bufaddr = cpu_to_fec32(addr); if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) @@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } /* Handle the last BD specially */ @@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, if (is_last) { status |= BD_ENET_TX_INTR; if (fep->bufdesc_ex) - ebdp->cbd_esc |= BD_ENET_TX_INT; + ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); } - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); return 0; } @@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, unsigned short status; unsigned int estatus = 0; - status = bdp->cbd_sc; + status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); @@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, } } - bdp->cbd_bufaddr = dmabuf; - bdp->cbd_datlen = hdr_len; + bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); + bdp->cbd_datlen = cpu_to_fec16(hdr_len); if (fep->bufdesc_ex) { if (fep->quirks & FEC_QUIRK_HAS_AVB) @@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, if (skb->ip_summed == CHECKSUM_PARTIAL) estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; ebdp->cbd_bdu = 0; - ebdp->cbd_esc = estatus; + ebdp->cbd_esc = cpu_to_fec32(estatus); } - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); return 0; } @@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev) /* Initialize the BD for every fragment in the page. */ if (bdp->cbd_bufaddr) - bdp->cbd_sc = BD_ENET_RX_EMPTY; + bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); else - bdp->cbd_sc = 0; + bdp->cbd_sc = cpu_to_fec16(0); bdp = fec_enet_get_nextdesc(bdp, fep, q); } /* Set the last buffer to wrap */ bdp = fec_enet_get_prevdesc(bdp, fep, q); - bdp->cbd_sc |= BD_SC_WRAP; + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); rxq->cur_rx = rxq->rx_bd_base; } @@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev) for (i = 0; i < txq->tx_ring_size; i++) { /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; + bdp->cbd_sc = cpu_to_fec16(0); if (txq->tx_skbuff[i]) { dev_kfree_skb_any(txq->tx_skbuff[i]); txq->tx_skbuff[i] = NULL; } - bdp->cbd_bufaddr = 0; + bdp->cbd_bufaddr = cpu_to_fec32(0); bdp = fec_enet_get_nextdesc(bdp, fep, q); } /* Set the last buffer to wrap */ bdp = fec_enet_get_prevdesc(bdp, fep, q); - bdp->cbd_sc |= BD_SC_WRAP; + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); txq->dirty_tx = bdp; } } @@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev) */ if (fep->quirks & FEC_QUIRK_ENET_MAC) { memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); - writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); - writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); + writel((__force u32)cpu_to_be32(temp_mac[0]), + fep->hwp + FEC_ADDR_LOW); + writel((__force u32)cpu_to_be32(temp_mac[1]), + fep->hwp + FEC_ADDR_HIGH); } /* Clear any outstanding interrupt. */ @@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) while (bdp != READ_ONCE(txq->cur_tx)) { /* Order the load of cur_tx and cbd_sc */ rmb(); - status = READ_ONCE(bdp->cbd_sc); + status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); if (status & BD_ENET_TX_READY) break; @@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) skb = txq->tx_skbuff[index]; txq->tx_skbuff[index] = NULL; - if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, - bdp->cbd_datlen, DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; + if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + bdp->cbd_bufaddr = cpu_to_fec32(0); if (!skb) { bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); continue; @@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) struct skb_shared_hwtstamps shhwtstamps; struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); + fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); skb_tstamp_tx(skb, &shhwtstamps); } @@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff if (off) skb_reserve(skb, fep->rx_align + 1 - off); - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, - FEC_ENET_RX_FRSIZE - fep->rx_align, - DMA_FROM_DEVICE); - if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { + bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE)); + if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { if (net_ratelimit()) netdev_err(ndev, "Rx DMA memory map failed\n"); return -ENOMEM; @@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, if (!new_skb) return false; - dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, + dma_sync_single_for_cpu(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); if (!swap) @@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) */ bdp = rxq->cur_rx; - while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { + while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { if (pkt_received >= budget) break; @@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* Process the incoming frame. */ ndev->stats.rx_packets++; - pkt_len = bdp->cbd_datlen; + pkt_len = fec16_to_cpu(bdp->cbd_datlen); ndev->stats.rx_bytes += pkt_len; index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); @@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) ndev->stats.rx_dropped++; goto rx_processing_done; } - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); } @@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* If this is a VLAN packet remove the VLAN Tag */ vlan_packet_rcvd = false; if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && - fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { + fep->bufdesc_ex && + (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { /* Push and remove the vlan tag */ struct vlan_hdr *vlan_header = (struct vlan_hdr *) (data + ETH_HLEN); @@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) /* Get receive timestamp from the skb */ if (fep->hwts_rx_en && fep->bufdesc_ex) - fec_enet_hwtstamp(fep, ebdp->ts, + fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), skb_hwtstamps(skb)); if (fep->bufdesc_ex && (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { - if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { + if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { /* don't check it */ skb->ip_summed = CHECKSUM_UNNECESSARY; } else { @@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) napi_gro_receive(&fep->napi, skb); if (is_copybreak) { - dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, + dma_sync_single_for_device(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); } else { @@ -1527,12 +1535,12 @@ rx_processing_done: /* Mark the buffer empty */ status |= BD_ENET_RX_EMPTY; - bdp->cbd_sc = status; + bdp->cbd_sc = cpu_to_fec16(status); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); ebdp->cbd_prot = 0; ebdp->cbd_bdu = 0; } @@ -2145,8 +2153,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) /* List of registers that can be safety be read to dump them with ethtool */ #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ - defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) static u32 fec_enet_register_offset[] = { FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, @@ -2662,7 +2669,7 @@ static void fec_enet_free_buffers(struct net_device *ndev) rxq->rx_skbuff[i] = NULL; if (skb) { dma_unmap_single(&fep->pdev->dev, - bdp->cbd_bufaddr, + fec32_to_cpu(bdp->cbd_bufaddr), FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE); dev_kfree_skb(skb); @@ -2777,11 +2784,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) } rxq->rx_skbuff[i] = skb; - bdp->cbd_sc = BD_ENET_RX_EMPTY; + bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_RX_INT; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); } bdp = fec_enet_get_nextdesc(bdp, fep, queue); @@ -2789,7 +2796,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) /* Set the last buffer to wrap. */ bdp = fec_enet_get_prevdesc(bdp, fep, queue); - bdp->cbd_sc |= BD_SC_WRAP; + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); return 0; err_alloc: @@ -2812,12 +2819,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) if (!txq->tx_bounce[i]) goto err_alloc; - bdp->cbd_sc = 0; - bdp->cbd_bufaddr = 0; + bdp->cbd_sc = cpu_to_fec16(0); + bdp->cbd_bufaddr = cpu_to_fec32(0); if (fep->bufdesc_ex) { struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_TX_INT; + ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); } bdp = fec_enet_get_nextdesc(bdp, fep, queue); @@ -2825,7 +2832,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) /* Set the last buffer to wrap. */ bdp = fec_enet_get_prevdesc(bdp, fep, queue); - bdp->cbd_sc |= BD_SC_WRAP; + bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); return 0; diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c index 52e0091b4fb2..1ba359f17ec6 100644 --- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c +++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c @@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev) cbd_t __iomem *prev_bd; cbd_t __iomem *last_tx_bd; - last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t)); + last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1); /* get the current bd held in TBPTR and scan back from this point */ recheck_bd = curr_tbptr = (cbd_t __iomem *) diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index b3645297477e..3bfe36f9405b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -95,21 +95,17 @@ static struct hnae_buf_ops hnae_bops = { static int __ae_match(struct device *dev, const void *data) { struct hnae_ae_dev *hdev = cls_to_ae_dev(dev); - const char *ae_id = data; - if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE)) - return 1; - - return 0; + return hdev->dev->of_node == data; } -static struct hnae_ae_dev *find_ae(const char *ae_id) +static struct hnae_ae_dev *find_ae(const struct device_node *ae_node) { struct device *dev; - WARN_ON(!ae_id); + WARN_ON(!ae_node); - dev = class_find_device(hnae_class, NULL, ae_id, __ae_match); + dev = class_find_device(hnae_class, NULL, ae_node, __ae_match); return dev ? cls_to_ae_dev(dev) : NULL; } @@ -316,7 +312,8 @@ EXPORT_SYMBOL(hnae_reinit_handle); * return handle ptr or ERR_PTR */ struct hnae_handle *hnae_get_handle(struct device *owner_dev, - const char *ae_id, u32 port_id, + const struct device_node *ae_node, + u32 port_id, struct hnae_buf_ops *bops) { struct hnae_ae_dev *dev; @@ -324,7 +321,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, int i, j; int ret; - dev = find_ae(ae_id); + dev = find_ae(ae_node); if (!dev) return ERR_PTR(-ENODEV); diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h index 6ca94dc3dda3..1cbcb9fa3fb5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.h +++ b/drivers/net/ethernet/hisilicon/hns/hnae.h @@ -524,8 +524,11 @@ struct hnae_handle { #define ring_to_dev(ring) ((ring)->q->dev->dev) -struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id, - u32 port_id, struct hnae_buf_ops *bops); +struct hnae_handle *hnae_get_handle(struct device *owner_dev, + const struct device_node *ae_node, + u32 port_id, + struct hnae_buf_ops *bops); + void hnae_put_handle(struct hnae_handle *handle); int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner); void hnae_ae_unregister(struct hnae_ae_dev *dev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index 522b264866b4..a0070d0e740d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -847,6 +847,7 @@ static struct hnae_ae_ops hns_dsaf_ops = { int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) { struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev; + static atomic_t id = ATOMIC_INIT(-1); switch (dsaf_dev->dsaf_ver) { case AE_VERSION_1: @@ -858,6 +859,9 @@ int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev) default: break; } + + snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME, + (int)atomic_inc_return(&id)); ae_dev->ops = &hns_dsaf_ops; ae_dev->dev = dsaf_dev->dev; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 1c33bd06bd5c..9439f04962e1 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -35,7 +35,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) int ret, i; u32 desc_num; u32 buf_size; - const char *name, *mode_str; + const char *mode_str; struct device_node *np = dsaf_dev->dev->of_node; if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1")) @@ -43,14 +43,6 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev) else dsaf_dev->dsaf_ver = AE_VERSION_2; - ret = of_property_read_string(np, "dsa_name", &name); - if (ret) { - dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret); - return ret; - } - strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE); - dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0'; - ret = of_property_read_string(np, "mode", &mode_str); if (ret) { dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 31c312f9826e..40205b910f80 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -18,6 +18,7 @@ struct hns_mac_cb; #define DSAF_DRV_NAME "hns_dsaf" #define DSAF_MOD_VERSION "v1.0" +#define DSAF_DEVICE_NAME "dsaf" #define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 0e30846a24f8..3f77ff77abbc 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1802,7 +1802,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev) int ret; h = hnae_get_handle(&priv->netdev->dev, - priv->ae_name, priv->port_id, NULL); + priv->ae_node, priv->port_id, NULL); if (IS_ERR_OR_NULL(h)) { ret = PTR_ERR(h); dev_dbg(priv->dev, "has not handle, register notifier!\n"); @@ -1880,13 +1880,16 @@ static int hns_nic_dev_probe(struct platform_device *pdev) else priv->enet_ver = AE_VERSION_2; - ret = of_property_read_string(node, "ae-name", &priv->ae_name); - if (ret) - goto out_read_string_fail; + priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0); + if (IS_ERR_OR_NULL(priv->ae_node)) { + ret = PTR_ERR(priv->ae_node); + dev_err(dev, "not find ae-handle\n"); + goto out_read_prop_fail; + } ret = of_property_read_u32(node, "port-id", &priv->port_id); if (ret) - goto out_read_string_fail; + goto out_read_prop_fail; hns_init_mac_addr(ndev); @@ -1945,7 +1948,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev) out_notify_fail: (void)cancel_work_sync(&priv->service_task); -out_read_string_fail: +out_read_prop_fail: free_netdev(ndev); return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h index 4b75270f014e..c68ab3d34fc2 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h @@ -51,7 +51,7 @@ struct hns_nic_ops { }; struct hns_nic_priv { - const char *ae_name; + const struct device_node *ae_node; u32 enet_ver; u32 port_id; int phy_mode; diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c index 1d5c3e16d8f4..3daf2d4a7ca0 100644 --- a/drivers/net/ethernet/hp/hp100.c +++ b/drivers/net/ethernet/hp/hp100.c @@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = { }; #endif -#ifdef CONFIG_EISA static struct eisa_device_id hp100_eisa_tbl[] = { { "HWPF180" }, /* HP J2577 rev A */ { "HWP1920" }, /* HP 27248B */ @@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = { { "" } /* Mandatory final entry ! */ }; MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl); -#endif -#ifdef CONFIG_PCI static const struct pci_device_id hp100_pci_tbl[] = { {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,}, {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,}, @@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = { {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, hp100_pci_tbl); -#endif static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO; static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX; @@ -2842,7 +2838,6 @@ static void cleanup_dev(struct net_device *d) free_netdev(d); } -#ifdef CONFIG_EISA static int hp100_eisa_probe(struct device *gendev) { struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private)); @@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = { .remove = hp100_eisa_remove, } }; -#endif -#ifdef CONFIG_PCI static int hp100_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = { .probe = hp100_pci_probe, .remove = hp100_pci_remove, }; -#endif /* * module section @@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void) err = hp100_isa_init(); if (err && err != -ENODEV) goto out; -#ifdef CONFIG_EISA err = eisa_driver_register(&hp100_eisa_driver); if (err && err != -ENODEV) goto out2; -#endif -#ifdef CONFIG_PCI err = pci_register_driver(&hp100_pci_driver); if (err && err != -ENODEV) goto out3; -#endif out: return err; out3: -#ifdef CONFIG_EISA eisa_driver_unregister (&hp100_eisa_driver); out2: -#endif hp100_isa_cleanup(); goto out; } @@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void) static void __exit hp100_module_exit(void) { hp100_isa_cleanup(); -#ifdef CONFIG_EISA eisa_driver_unregister (&hp100_eisa_driver); -#endif -#ifdef CONFIG_PCI pci_unregister_driver (&hp100_pci_driver); -#endif } module_init(hp100_module_init) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index bb4612c159fd..8f3b53e0dc46 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -7117,9 +7117,7 @@ static void i40e_service_task(struct work_struct *work) i40e_watchdog_subtask(pf); i40e_fdir_reinit_subtask(pf); i40e_sync_filters_subtask(pf); -#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE) i40e_sync_udp_filters_subtask(pf); -#endif i40e_clean_adminq_subtask(pf); i40e_service_event_complete(pf); @@ -8515,6 +8513,8 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) } #endif + +#if IS_ENABLED(CONFIG_VXLAN) /** * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up * @netdev: This physical port's netdev @@ -8524,7 +8524,6 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port) static void i40e_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8557,7 +8556,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev, pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN; pf->pending_udp_bitmap |= BIT_ULL(next_idx); pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; -#endif } /** @@ -8569,7 +8567,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev, static void i40e_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_VXLAN) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8592,9 +8589,10 @@ static void i40e_del_vxlan_port(struct net_device *netdev, netdev_warn(netdev, "vxlan port %d was not found, not deleting\n", ntohs(port)); } -#endif } +#endif +#if IS_ENABLED(CONFIG_GENEVE) /** * i40e_add_geneve_port - Get notifications about GENEVE ports that come up * @netdev: This physical port's netdev @@ -8604,7 +8602,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev, static void i40e_add_geneve_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_GENEVE) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8639,7 +8636,6 @@ static void i40e_add_geneve_port(struct net_device *netdev, pf->flags |= I40E_FLAG_UDP_FILTER_SYNC; dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port)); -#endif } /** @@ -8651,7 +8647,6 @@ static void i40e_add_geneve_port(struct net_device *netdev, static void i40e_del_geneve_port(struct net_device *netdev, sa_family_t sa_family, __be16 port) { -#if IS_ENABLED(CONFIG_GENEVE) struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_vsi *vsi = np->vsi; struct i40e_pf *pf = vsi->back; @@ -8677,8 +8672,8 @@ static void i40e_del_geneve_port(struct net_device *netdev, netdev_warn(netdev, "geneve port %d was not found, not deleting\n", ntohs(port)); } -#endif } +#endif static int i40e_get_phys_port_id(struct net_device *netdev, struct netdev_phys_item_id *ppid) diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 720516b0e8ee..47bd8b3145a7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2313,8 +2313,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags, struct iphdr *this_ip_hdr; u32 network_hdr_len; u8 l4_hdr = 0; - struct udphdr *oudph; - struct iphdr *oiph; + struct udphdr *oudph = NULL; + struct iphdr *oiph = NULL; u32 l4_tunnel = 0; if (skb->encapsulation) { diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index a0c03834a2f7..55831188bc32 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -762,10 +762,10 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq, if (length <= 8 && (uintptr_t)data & 0x7) { /* Copy unaligned small data fragment to TSO header data area */ - memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE, + memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE, data, length); desc->buf_ptr = txq->tso_hdrs_dma - + txq->tx_curr_desc * TSO_HEADER_SIZE; + + tx_index * TSO_HEADER_SIZE; } else { /* Alignment is okay, map buffer and hand off to hardware */ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index fabc8df40392..662c2ee268c7 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -11,28 +11,28 @@ * warranty of any kind, whether express or implied. */ -#include <linux/kernel.h> -#include <linux/netdevice.h> +#include <linux/clk.h> +#include <linux/cpu.h> #include <linux/etherdevice.h> -#include <linux/platform_device.h> -#include <linux/skbuff.h> +#include <linux/if_vlan.h> #include <linux/inetdevice.h> -#include <linux/mbus.h> -#include <linux/module.h> #include <linux/interrupt.h> -#include <linux/if_vlan.h> -#include <net/ip.h> -#include <net/ipv6.h> #include <linux/io.h> -#include <net/tso.h> +#include <linux/kernel.h> +#include <linux/mbus.h> +#include <linux/module.h> +#include <linux/netdevice.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> -#include <linux/of_address.h> #include <linux/phy.h> -#include <linux/clk.h> -#include <linux/cpu.h> +#include <linux/platform_device.h> +#include <linux/skbuff.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/tso.h> /* Registers */ #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) @@ -373,6 +373,8 @@ struct mvneta_port { /* Core clock */ struct clk *clk; + /* AXI clock */ + struct clk *clk_bus; u8 mcast_count[256]; u16 tx_ring_size; u16 rx_ring_size; @@ -3242,26 +3244,25 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp) const struct mvneta_statistic *s; void __iomem *base = pp->base; u32 high, low, val; + u64 val64; int i; for (i = 0, s = mvneta_statistics; s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); s++, i++) { - val = 0; - switch (s->type) { case T_REG_32: val = readl_relaxed(base + s->offset); + pp->ethtool_stats[i] += val; break; case T_REG_64: /* Docs say to read low 32-bit then high */ low = readl_relaxed(base + s->offset); high = readl_relaxed(base + s->offset + 4); - val = (u64)high << 32 | low; + val64 = (u64)high << 32 | low; + pp->ethtool_stats[i] += val64; break; } - - pp->ethtool_stats[i] += val; } } @@ -3605,7 +3606,9 @@ static int mvneta_probe(struct platform_device *pdev) pp->indir[0] = rxq_def; - pp->clk = devm_clk_get(&pdev->dev, NULL); + pp->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(pp->clk)) + pp->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(pp->clk)) { err = PTR_ERR(pp->clk); goto err_put_phy_node; @@ -3613,6 +3616,10 @@ static int mvneta_probe(struct platform_device *pdev) clk_prepare_enable(pp->clk); + pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(pp->clk_bus)) + clk_prepare_enable(pp->clk_bus); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pp->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pp->base)) { @@ -3724,6 +3731,7 @@ err_free_stats: err_free_ports: free_percpu(pp->ports); err_clk: + clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); err_put_phy_node: of_node_put(phy_node); @@ -3741,6 +3749,7 @@ static int mvneta_remove(struct platform_device *pdev) struct mvneta_port *pp = netdev_priv(dev); unregister_netdev(dev); + clk_disable_unprepare(pp->clk_bus); clk_disable_unprepare(pp->clk); free_percpu(pp->ports); free_percpu(pp->stats); diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0c5237264e3e..bb77e2207804 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -1044,6 +1044,92 @@ static inline void mlxsw_reg_sftr_pack(char *payload, mlxsw_reg_sftr_port_mask_set(payload, port, 1); } +/* SFDF - Switch Filtering DB Flush + * -------------------------------- + * The switch filtering DB flush register is used to flush the FDB. + * Note that FDB notifications are flushed as well. + */ +#define MLXSW_REG_SFDF_ID 0x2013 +#define MLXSW_REG_SFDF_LEN 0x14 + +static const struct mlxsw_reg_info mlxsw_reg_sfdf = { + .id = MLXSW_REG_SFDF_ID, + .len = MLXSW_REG_SFDF_LEN, +}; + +/* reg_sfdf_swid + * Switch partition ID. + * Access: Index + */ +MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8); + +enum mlxsw_reg_sfdf_flush_type { + MLXSW_REG_SFDF_FLUSH_PER_SWID, + MLXSW_REG_SFDF_FLUSH_PER_FID, + MLXSW_REG_SFDF_FLUSH_PER_PORT, + MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID, + MLXSW_REG_SFDF_FLUSH_PER_LAG, + MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID, +}; + +/* reg_sfdf_flush_type + * Flush type. + * 0 - All SWID dynamic entries are flushed. + * 1 - All FID dynamic entries are flushed. + * 2 - All dynamic entries pointing to port are flushed. + * 3 - All FID dynamic entries pointing to port are flushed. + * 4 - All dynamic entries pointing to LAG are flushed. + * 5 - All FID dynamic entries pointing to LAG are flushed. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4); + +/* reg_sfdf_flush_static + * Static. + * 0 - Flush only dynamic entries. + * 1 - Flush both dynamic and static entries. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1); + +static inline void mlxsw_reg_sfdf_pack(char *payload, + enum mlxsw_reg_sfdf_flush_type type) +{ + MLXSW_REG_ZERO(sfdf, payload); + mlxsw_reg_sfdf_flush_type_set(payload, type); + mlxsw_reg_sfdf_flush_static_set(payload, true); +} + +/* reg_sfdf_fid + * FID to flush. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16); + +/* reg_sfdf_system_port + * Port to flush. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16); + +/* reg_sfdf_port_fid_system_port + * Port to flush, pointed to by FID. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16); + +/* reg_sfdf_lag_id + * LAG ID to flush. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10); + +/* reg_sfdf_lag_fid_lag_id + * LAG ID to flush, pointed to by FID. + * Access: RW + */ +MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10); + /* SLDR - Switch LAG Descriptor Register * ----------------------------------------- * The switch LAG descriptor register is populated by LAG descriptors. @@ -1701,20 +1787,20 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8); * Module number. * Access: RW */ -MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false); +MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false); /* reg_pmlp_tx_lane * Tx Lane. When rxtx field is cleared, this field is used for Rx as well. * Access: RW */ -MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false); +MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false); /* reg_pmlp_rx_lane * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is * equal to Tx lane. * Access: RW */ -MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false); +MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false); static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port) { @@ -3121,6 +3207,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id) return "SFGC"; case MLXSW_REG_SFTR_ID: return "SFTR"; + case MLXSW_REG_SFDF_ID: + return "SFDF"; case MLXSW_REG_SLDR_ID: return "SLDR"; case MLXSW_REG_SLCR_ID: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index ce6845d534a8..217856bdd400 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1979,6 +1979,115 @@ static struct mlxsw_driver mlxsw_sp_driver = { .profile = &mlxsw_sp_config_profile, }; +static int +mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT); + mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); + mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, + mlxsw_sp_port->local_port); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG); + mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port, + u16 fid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sfdf_pl[MLXSW_REG_SFDF_LEN]; + + mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID); + mlxsw_reg_sfdf_fid_set(sfdf_pl, fid); + mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); +} + +static int +__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err, last_err = 0; + u16 vid; + + for (vid = 1; vid < VLAN_N_VID - 1; vid++) { + err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid); + if (err) + last_err = err; + } + + return last_err; +} + +static int +__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port) +{ + int err, last_err = 0; + u16 vid; + + for (vid = 1; vid < VLAN_N_VID - 1; vid++) { + err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid); + if (err) + last_err = err; + } + + return last_err; +} + +static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port) +{ + if (!list_empty(&mlxsw_sp_port->vports_list)) + if (mlxsw_sp_port->lagged) + return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port); + else + return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port); + else + if (mlxsw_sp_port->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port); + else + return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port); +} + +static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport); + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + + if (mlxsw_sp_vport->lagged) + return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport, + fid); + else + return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid); +} + static bool mlxsw_sp_port_dev_check(const struct net_device *dev) { return dev->netdev_ops == &mlxsw_sp_port_netdev_ops; @@ -2006,10 +2115,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } -static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) +static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, + bool flush_fdb) { struct net_device *dev = mlxsw_sp_port->dev; + if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) + netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); + mlxsw_sp_port->learning = 0; mlxsw_sp_port->learning_sync = 0; mlxsw_sp_port->uc_flood = 0; @@ -2200,10 +2313,15 @@ err_col_port_enable: return err; } +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev, + bool flush_fdb); + static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_port *mlxsw_sp_vport; struct mlxsw_sp_upper *lag; u16 lag_id = mlxsw_sp_port->lag_id; int err; @@ -2220,7 +2338,32 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, if (err) return err; + /* In case we leave a LAG device that has bridges built on top, + * then their teardown sequence is never issued and we need to + * invoke the necessary cleanup routines ourselves. + */ + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + struct net_device *br_dev; + + if (!mlxsw_sp_vport->bridged) + continue; + + br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false); + } + + if (mlxsw_sp_port->bridged) { + mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); + + if (lag->ref_count == 1) + mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL); + } + if (lag->ref_count == 1) { + if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port)) + netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); if (err) return err; @@ -2272,9 +2415,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); } -static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev); - static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *vlan_dev) { @@ -2312,7 +2452,7 @@ static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *br_dev; br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); - mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev); + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true); } mlxsw_sp_vport->dev = mlxsw_sp_port->dev; @@ -2374,7 +2514,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, } mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); } else { - err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); + err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port, + true); mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); if (err) { netdev_err(dev, "Failed to leave bridge\n"); @@ -2541,7 +2682,8 @@ static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, } static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, - struct net_device *br_dev) + struct net_device *br_dev, + bool flush_fdb) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); @@ -2604,6 +2746,9 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, goto err_vport_flood_set; } + if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) + netdev_err(dev, "Failed to flush FDB\n"); + /* Switch between the vFIDs and destroy the old one if needed. */ new_vfid->nr_vports++; mlxsw_sp_vport->vport.vfid = new_vfid; @@ -2777,7 +2922,7 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, if (!mlxsw_sp_vport) return NOTIFY_DONE; err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, - upper_dev); + upper_dev, true); if (err) { netdev_err(dev, "Failed to leave bridge\n"); return NOTIFY_BAD; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index a23dc610d259..7f42eb1c320e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -120,7 +120,6 @@ struct mlxsw_sp { } fdb_notify; #define MLXSW_SP_DEFAULT_AGEING_TIME 300 u32 ageing_time; - struct mutex fdb_lock; /* Make sure FDB sessions are atomic. */ struct mlxsw_sp_upper master_bridge; struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX]; }; @@ -254,5 +253,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid); int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, bool set, bool only_uc); +void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 45479ef5bcf4..e492ca2cdecd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -45,6 +45,7 @@ #include <linux/if_bridge.h> #include <linux/workqueue.h> #include <linux/jiffies.h> +#include <linux/rtnetlink.h> #include <net/switchdev.h> #include "spectrum.h" @@ -124,14 +125,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port, int err; switch (state) { - case BR_STATE_DISABLED: /* fall-through */ case BR_STATE_FORWARDING: spms_state = MLXSW_REG_SPMS_STATE_FORWARDING; break; - case BR_STATE_LISTENING: /* fall-through */ case BR_STATE_LEARNING: spms_state = MLXSW_REG_SPMS_STATE_LEARNING; break; + case BR_STATE_LISTENING: /* fall-through */ + case BR_STATE_DISABLED: /* fall-through */ case BR_STATE_BLOCKING: spms_state = MLXSW_REG_SPMS_STATE_DISCARDING; break; @@ -936,6 +937,14 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, vlan->vid_begin, vlan->vid_end, false); } +void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port) +{ + u16 vid; + + for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID) + __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false); +} + static int mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_fdb *fdb) @@ -1040,10 +1049,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, struct switchdev_obj_port_fdb *fdb, - switchdev_obj_dump_cb_t *cb) + switchdev_obj_dump_cb_t *cb, + struct net_device *orig_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - u16 vport_vid = 0, vport_fid = 0; + struct mlxsw_sp_port *tmp; + u16 vport_fid = 0; char *sfd_pl; char mac[ETH_ALEN]; u16 fid; @@ -1058,13 +1069,11 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, if (!sfd_pl) return -ENOMEM; - mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) { u16 tmp; tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port); vport_fid = mlxsw_sp_vfid_to_fid(tmp); - vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port); } mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0); @@ -1088,12 +1097,13 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid, &local_port); if (local_port == mlxsw_sp_port->local_port) { - if (vport_fid && vport_fid != fid) - continue; - else if (vport_fid) - fdb->vid = vport_vid; - else + if (vport_fid && vport_fid == fid) + fdb->vid = 0; + else if (!vport_fid && + !mlxsw_sp_fid_is_vfid(fid)) fdb->vid = fid; + else + continue; ether_addr_copy(fdb->addr, mac); fdb->ndm_state = NUD_REACHABLE; err = cb(&fdb->obj); @@ -1104,14 +1114,22 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG: mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i, mac, &fid, &lag_id); - if (mlxsw_sp_port == - mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) { - if (vport_fid && vport_fid != fid) + tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id); + if (tmp && tmp->local_port == + mlxsw_sp_port->local_port) { + /* LAG records can only point to LAG + * devices or VLAN devices on top. + */ + if (!netif_is_lag_master(orig_dev) && + !is_vlan_dev(orig_dev)) continue; - else if (vport_fid) - fdb->vid = vport_vid; - else + if (vport_fid && vport_fid == fid) + fdb->vid = 0; + else if (!vport_fid && + !mlxsw_sp_fid_is_vfid(fid)) fdb->vid = fid; + else + continue; ether_addr_copy(fdb->addr, mac); fdb->ndm_state = NUD_REACHABLE; err = cb(&fdb->obj); @@ -1124,7 +1142,6 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port, } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT); out: - mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock); kfree(sfd_pl); return stored_err ? stored_err : err; } @@ -1176,7 +1193,8 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev, break; case SWITCHDEV_OBJ_ID_PORT_FDB: err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port, - SWITCHDEV_OBJ_PORT_FDB(obj), cb); + SWITCHDEV_OBJ_PORT_FDB(obj), cb, + obj->orig_dev); break; default: err = -EOPNOTSUPP; @@ -1194,14 +1212,14 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { .switchdev_port_obj_dump = mlxsw_sp_port_obj_dump, }; -static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync, - bool adding, char *mac, u16 vid, +static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding, + char *mac, u16 vid, struct net_device *dev) { struct switchdev_notifier_fdb_info info; unsigned long notifier_type; - if (learning && learning_sync) { + if (learning_sync) { info.addr = mac; info.vid = vid; notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL; @@ -1237,7 +1255,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp, netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n"); goto just_remove; } - vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + vid = 0; /* Override the physical port with the vPort. */ mlxsw_sp_port = mlxsw_sp_vport; } else { @@ -1257,8 +1275,7 @@ do_fdb_op: if (!do_notification) return; - mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, - mlxsw_sp_port->learning_sync, + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, vid, mlxsw_sp_port->dev); return; @@ -1273,6 +1290,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, bool adding) { struct mlxsw_sp_port *mlxsw_sp_port; + struct net_device *dev; char mac[ETH_ALEN]; u16 lag_vid = 0; u16 lag_id; @@ -1298,11 +1316,13 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, goto just_remove; } - vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); - lag_vid = vid; + lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + dev = mlxsw_sp_vport->dev; + vid = 0; /* Override the physical port with the vPort. */ mlxsw_sp_port = mlxsw_sp_vport; } else { + dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev; vid = fid; } @@ -1319,10 +1339,8 @@ do_fdb_op: if (!do_notification) return; - mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning, - mlxsw_sp_port->learning_sync, - adding, mac, vid, - mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev); + mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac, + vid, dev); return; just_remove: @@ -1374,7 +1392,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work); - mutex_lock(&mlxsw_sp->fdb_lock); + rtnl_lock(); do { mlxsw_reg_sfn_pack(sfn_pl); err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl); @@ -1387,7 +1405,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work) mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i); } while (num_rec); - mutex_unlock(&mlxsw_sp->fdb_lock); + rtnl_unlock(); kfree(sfn_pl); mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); @@ -1402,7 +1420,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n"); return err; } - mutex_init(&mlxsw_sp->fdb_lock); INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work); mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index a10c928bbd6b..00cfd95ca59d 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -28,6 +28,16 @@ #include "moxart_ether.h" +static inline void moxart_desc_write(u32 data, u32 *desc) +{ + *desc = cpu_to_le32(data); +} + +static inline u32 moxart_desc_read(u32 *desc) +{ + return le32_to_cpu(*desc); +} + static inline void moxart_emac_write(struct net_device *ndev, unsigned int reg, unsigned long value) { @@ -112,7 +122,7 @@ static void moxart_mac_enable(struct net_device *ndev) static void moxart_mac_setup_desc_ring(struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); - void __iomem *desc; + void *desc; int i; for (i = 0; i < TX_DESC_NUM; i++) { @@ -121,7 +131,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i; } - writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); + moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1); priv->tx_head = 0; priv->tx_tail = 0; @@ -129,8 +139,8 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) for (i = 0; i < RX_DESC_NUM; i++) { desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE; memset(desc, 0, RX_REG_DESC_SIZE); - writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); - writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, + moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); + moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK, desc + RX_REG_OFFSET_DESC1); priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i; @@ -141,12 +151,12 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev) if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i])) netdev_err(ndev, "DMA mapping error\n"); - writel(priv->rx_mapping[i], + moxart_desc_write(priv->rx_mapping[i], desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS); - writel(priv->rx_buf[i], + moxart_desc_write((uintptr_t)priv->rx_buf[i], desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT); } - writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); + moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1); priv->rx_head = 0; @@ -201,14 +211,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) napi); struct net_device *ndev = priv->ndev; struct sk_buff *skb; - void __iomem *desc; + void *desc; unsigned int desc0, len; int rx_head = priv->rx_head; int rx = 0; while (rx < budget) { desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); - desc0 = readl(desc + RX_REG_OFFSET_DESC0); + desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0); + rmb(); /* ensure desc0 is up to date */ if (desc0 & RX_DESC0_DMA_OWN) break; @@ -250,7 +261,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) priv->stats.multicast++; rx_next: - writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); + wmb(); /* prevent setting ownership back too early */ + moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); rx_head = RX_NEXT(rx_head); priv->rx_head = rx_head; @@ -310,7 +322,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id) static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct moxart_mac_priv_t *priv = netdev_priv(ndev); - void __iomem *desc; + void *desc; unsigned int len; unsigned int tx_head = priv->tx_head; u32 txdes1; @@ -319,11 +331,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head); spin_lock_irq(&priv->txlock); - if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { + if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) { net_dbg_ratelimited("no TX space for packet\n"); priv->stats.tx_dropped++; goto out_unlock; } + rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */ len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len; @@ -337,9 +350,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) priv->tx_len[tx_head] = len; priv->tx_skb[tx_head] = skb; - writel(priv->tx_mapping[tx_head], + moxart_desc_write(priv->tx_mapping[tx_head], desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS); - writel(skb->data, + moxart_desc_write((uintptr_t)skb->data, desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT); if (skb->len < ETH_ZLEN) { @@ -354,8 +367,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); if (tx_head == TX_DESC_NUM_MASK) txdes1 |= TX_DESC1_END; - writel(txdes1, desc + TX_REG_OFFSET_DESC1); - writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); + moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1); + wmb(); /* flush descriptor before transferring ownership */ + moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); /* start to send packet */ writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND); diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h index 2be9280d608c..93a9563ac7c6 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.h +++ b/drivers/net/ethernet/moxa/moxart_ether.h @@ -300,7 +300,7 @@ struct moxart_mac_priv_t { dma_addr_t rx_base; dma_addr_t rx_mapping[RX_DESC_NUM]; - void __iomem *rx_desc_base; + void *rx_desc_base; unsigned char *rx_buf_base; unsigned char *rx_buf[RX_DESC_NUM]; unsigned int rx_head; @@ -308,7 +308,7 @@ struct moxart_mac_priv_t { dma_addr_t tx_base; dma_addr_t tx_mapping[TX_DESC_NUM]; - void __iomem *tx_desc_base; + void *tx_desc_base; unsigned char *tx_buf_base; unsigned char *tx_buf[RX_DESC_NUM]; unsigned int tx_head; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 50d5604833ed..e0993eba5df3 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2223,8 +2223,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id) return IRQ_NONE; } -#ifdef CONFIG_PCI_MSI - static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id) { struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id; @@ -2442,16 +2440,13 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev) if (vdev->config.intr_type == MSI_X) pci_disable_msix(vdev->pdev); } -#endif static void vxge_rem_isr(struct vxgedev *vdev) { -#ifdef CONFIG_PCI_MSI - if (vdev->config.intr_type == MSI_X) { + if (IS_ENABLED(CONFIG_PCI_MSI) && + vdev->config.intr_type == MSI_X) { vxge_rem_msix_isr(vdev); - } else -#endif - if (vdev->config.intr_type == INTA) { + } else if (vdev->config.intr_type == INTA) { synchronize_irq(vdev->pdev->irq); free_irq(vdev->pdev->irq, vdev); } @@ -2460,11 +2455,10 @@ static void vxge_rem_isr(struct vxgedev *vdev) static int vxge_add_isr(struct vxgedev *vdev) { int ret = 0; -#ifdef CONFIG_PCI_MSI int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; int pci_fun = PCI_FUNC(vdev->pdev->devfn); - if (vdev->config.intr_type == MSI_X) + if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) ret = vxge_enable_msix(vdev); if (ret) { @@ -2475,7 +2469,7 @@ static int vxge_add_isr(struct vxgedev *vdev) vdev->config.intr_type = INTA; } - if (vdev->config.intr_type == MSI_X) { + if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) { for (intr_idx = 0; intr_idx < (vdev->no_of_vpath * VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) { @@ -2576,9 +2570,8 @@ static int vxge_add_isr(struct vxgedev *vdev) vdev->vxge_entries[intr_cnt].in_use = 1; vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0]; } -INTA_MODE: -#endif +INTA_MODE: if (vdev->config.intr_type == INTA) { snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge:INTA", vdev->ndev->name); @@ -3889,12 +3882,12 @@ static void vxge_device_config_init(struct vxge_hw_device_config *device_config, if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT) max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT; -#ifndef CONFIG_PCI_MSI - vxge_debug_init(VXGE_ERR, - "%s: This Kernel does not support " - "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); - *intr_type = INTA; -#endif + if (!IS_ENABLED(CONFIG_PCI_MSI)) { + vxge_debug_init(VXGE_ERR, + "%s: This Kernel does not support " + "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME); + *intr_type = INTA; + } /* Configure whether MSI-X or IRQL. */ switch (*intr_type) { diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index a4ab71d43e4e..166a7fc87e2f 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work) info.addr = lw->addr; info.vid = lw->vid; + rtnl_lock(); if (learned && removing) call_switchdev_notifiers(SWITCHDEV_FDB_DEL, lw->rocker_port->dev, &info.info); else if (learned && !removing) call_switchdev_notifiers(SWITCHDEV_FDB_ADD, lw->rocker_port->dev, &info.info); + rtnl_unlock(); rocker_port_kfree(lw->trans, work); } diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index cc106d892e29..23fa29877f5b 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -389,17 +389,27 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc) if (vio_version_after_eq(&port->vio, 1, 8)) { struct vio_net_dext *dext = vio_net_ext(desc); + skb_reset_network_header(skb); + if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) { if (skb->protocol == ETH_P_IP) { - struct iphdr *iph = (struct iphdr *)skb->data; + struct iphdr *iph = ip_hdr(skb); iph->check = 0; ip_send_check(iph); } } if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) && - skb->ip_summed == CHECKSUM_NONE) - vnet_fullcsum(skb); + skb->ip_summed == CHECKSUM_NONE) { + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + int ihl = iph->ihl * 4; + + skb_reset_transport_header(skb); + skb_set_transport_header(skb, ihl); + vnet_fullcsum(skb); + } + } if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) { skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_level = 0; diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 70814b7386b3..fc8bbff2d7e3 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c @@ -1880,9 +1880,9 @@ static int dwceqos_open(struct net_device *ndev) } netdev_reset_queue(ndev); + dwceqos_init_hw(lp); napi_enable(&lp->napi); phy_start(lp->phy_dev); - dwceqos_init_hw(lp); netif_start_queue(ndev); tasklet_enable(&lp->tx_bdreclaim_tasklet); diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c index 657b65bf5cac..18bf3a8fdc50 100644 --- a/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/drivers/net/ethernet/ti/davinci_cpdma.c @@ -82,7 +82,7 @@ struct cpdma_desc { struct cpdma_desc_pool { phys_addr_t phys; - u32 hw_addr; + dma_addr_t hw_addr; void __iomem *iomap; /* ioremap map */ void *cpumap; /* dma_alloc map */ int desc_size, mem_size; @@ -152,7 +152,7 @@ struct cpdma_chan { * abstract out these details */ static struct cpdma_desc_pool * -cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, +cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr, int size, int align) { int bitmap_size; @@ -176,13 +176,13 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, if (phys) { pool->phys = phys; - pool->iomap = ioremap(phys, size); + pool->iomap = ioremap(phys, size); /* should be memremap? */ pool->hw_addr = hw_addr; } else { - pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, + pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr, GFP_KERNEL); - pool->iomap = pool->cpumap; - pool->hw_addr = pool->phys; + pool->iomap = (void __iomem __force *)pool->cpumap; + pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */ } if (pool->iomap) diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 7f975a2c8990..b0de8ecd7fe8 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -533,8 +533,8 @@ static int dfx_register(struct device *bdev) const char *print_name = dev_name(bdev); struct net_device *dev; DFX_board_t *bp; /* board pointer */ - resource_size_t bar_start[3]; /* pointers to ports */ - resource_size_t bar_len[3]; /* resource length */ + resource_size_t bar_start[3] = {0}; /* pointers to ports */ + resource_size_t bar_len[3] = {0}; /* resource length */ int alloc_size; /* total buffer size used */ struct resource *region; int err = 0; @@ -3697,8 +3697,8 @@ static void dfx_unregister(struct device *bdev) int dfx_bus_pci = dev_is_pci(bdev); int dfx_bus_tc = DFX_BUS_TC(bdev); int dfx_use_mmio = DFX_MMIO || dfx_bus_tc; - resource_size_t bar_start[3]; /* pointers to ports */ - resource_size_t bar_len[3]; /* resource lengths */ + resource_size_t bar_start[3] = {0}; /* pointers to ports */ + resource_size_t bar_len[3] = {0}; /* resource lengths */ int alloc_size; /* total buffer size used */ unregister_netdev(dev); diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 7456569f53c1..028e3873c310 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -980,9 +980,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, opts = ip_tunnel_info_opts(info); if (key->tun_flags & TUNNEL_CSUM) - flags |= GENEVE_F_UDP_CSUM; + flags &= ~GENEVE_F_UDP_ZERO_CSUM6_TX; else - flags &= ~GENEVE_F_UDP_CSUM; + flags |= GENEVE_F_UDP_ZERO_CSUM6_TX; err = geneve6_build_skb(dst, skb, key->tun_flags, vni, info->options_len, opts, @@ -1039,6 +1039,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev) return geneve_xmit_skb(skb, dev, info); } +static int geneve_change_mtu(struct net_device *dev, int new_mtu) +{ + /* GENEVE overhead is not fixed, so we can't enforce a more + * precise max MTU. + */ + if (new_mtu < 68 || new_mtu > IP_MAX_MTU) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); @@ -1083,7 +1094,7 @@ static const struct net_device_ops geneve_netdev_ops = { .ndo_stop = geneve_stop, .ndo_start_xmit = geneve_xmit, .ndo_get_stats64 = ip_tunnel_get_stats64, - .ndo_change_mtu = eth_change_mtu, + .ndo_change_mtu = geneve_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_fill_metadata_dst = geneve_fill_metadata_dst, @@ -1442,11 +1453,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, err = geneve_configure(net, dev, &geneve_remote_unspec, 0, 0, 0, htons(dst_port), true, 0); - if (err) { - free_netdev(dev); - return ERR_PTR(err); - } + if (err) + goto err; + + /* openvswitch users expect packet sizes to be unrestricted, + * so set the largest MTU we can. + */ + err = geneve_change_mtu(dev, IP_MAX_MTU); + if (err) + goto err; + return dev; + + err: + free_netdev(dev); + return ERR_PTR(err); } EXPORT_SYMBOL_GPL(geneve_dev_create_fb); diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index f4130af09244..fcb92c0d0eb9 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -624,6 +624,7 @@ struct nvsp_message { #define RNDIS_PKT_ALIGN_DEFAULT 8 struct multi_send_data { + struct sk_buff *skb; /* skb containing the pkt */ struct hv_netvsc_packet *pkt; /* netvsc pkt pending */ u32 count; /* counter of batched packets */ }; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 059fc5231601..ec313fc08d82 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -841,6 +841,18 @@ static inline int netvsc_send_pkt( return ret; } +/* Move packet out of multi send data (msd), and clear msd */ +static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send, + struct sk_buff **msd_skb, + struct multi_send_data *msdp) +{ + *msd_skb = msdp->skb; + *msd_send = msdp->pkt; + msdp->skb = NULL; + msdp->pkt = NULL; + msdp->count = 0; +} + int netvsc_send(struct hv_device *device, struct hv_netvsc_packet *packet, struct rndis_message *rndis_msg, @@ -855,6 +867,7 @@ int netvsc_send(struct hv_device *device, unsigned int section_index = NETVSC_INVALID_INDEX; struct multi_send_data *msdp; struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL; + struct sk_buff *msd_skb = NULL; bool try_batch; bool xmit_more = (skb != NULL) ? skb->xmit_more : false; @@ -897,10 +910,8 @@ int netvsc_send(struct hv_device *device, net_device->send_section_size) { section_index = netvsc_get_next_send_section(net_device); if (section_index != NETVSC_INVALID_INDEX) { - msd_send = msdp->pkt; - msdp->pkt = NULL; - msdp->count = 0; - msd_len = 0; + move_pkt_msd(&msd_send, &msd_skb, msdp); + msd_len = 0; } } @@ -919,31 +930,31 @@ int netvsc_send(struct hv_device *device, packet->total_data_buflen += msd_len; } - if (msdp->pkt) - dev_kfree_skb_any(skb); + if (msdp->skb) + dev_kfree_skb_any(msdp->skb); if (xmit_more && !packet->cp_partial) { + msdp->skb = skb; msdp->pkt = packet; msdp->count++; } else { cur_send = packet; + msdp->skb = NULL; msdp->pkt = NULL; msdp->count = 0; } } else { - msd_send = msdp->pkt; - msdp->pkt = NULL; - msdp->count = 0; + move_pkt_msd(&msd_send, &msd_skb, msdp); cur_send = packet; } if (msd_send) { - m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb); + m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb); if (m_ret != 0) { netvsc_free_send_slot(net_device, msd_send->send_buf_index); - dev_kfree_skb_any(skb); + dev_kfree_skb_any(msd_skb); } } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 1c8db9afdcda..1d3a66563bac 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -196,65 +196,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size, return ppi; } -union sub_key { - u64 k; - struct { - u8 pad[3]; - u8 kb; - u32 ka; - }; -}; - -/* Toeplitz hash function - * data: network byte order - * return: host byte order - */ -static u32 comp_hash(u8 *key, int klen, void *data, int dlen) -{ - union sub_key subk; - int k_next = 4; - u8 dt; - int i, j; - u32 ret = 0; - - subk.k = 0; - subk.ka = ntohl(*(u32 *)key); - - for (i = 0; i < dlen; i++) { - subk.kb = key[k_next]; - k_next = (k_next + 1) % klen; - dt = ((u8 *)data)[i]; - for (j = 0; j < 8; j++) { - if (dt & 0x80) - ret ^= subk.ka; - dt <<= 1; - subk.k <<= 1; - } - } - - return ret; -} - -static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb) -{ - struct flow_keys flow; - int data_len; - - if (!skb_flow_dissect_flow_keys(skb, &flow, 0) || - !(flow.basic.n_proto == htons(ETH_P_IP) || - flow.basic.n_proto == htons(ETH_P_IPV6))) - return false; - - if (flow.basic.ip_proto == IPPROTO_TCP) - data_len = 12; - else - data_len = 8; - - *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len); - - return true; -} - static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { @@ -267,11 +208,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb, if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1) return 0; - if (netvsc_set_hash(&hash, skb)) { - q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % - ndev->real_num_tx_queues; - skb_set_hash(skb, hash, PKT_HASH_TYPE_L3); - } + hash = skb_get_hash(skb); + q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] % + ndev->real_num_tx_queues; if (!nvsc_dev->chn_table[q_idx]) q_idx = 0; diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h index 29cbde8501ed..d47cf14bb4a5 100644 --- a/drivers/net/irda/bfin_sir.h +++ b/drivers/net/irda/bfin_sir.h @@ -82,9 +82,6 @@ struct bfin_sir_self { #define DRIVER_NAME "bfin_sir" -#define port_membase(port) (((struct bfin_sir_port *)(port))->membase) -#define get_lsr_cache(port) (((struct bfin_sir_port *)(port))->lsr) -#define put_lsr_cache(port, v) (((struct bfin_sir_port *)(port))->lsr = (v)) #include <asm/bfin_serial.h> static const unsigned short per[][4] = { diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 6a57a005e0ca..94e688805dd2 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1323,6 +1323,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, list_add_tail_rcu(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); + linkwatch_fire_event(dev); return 0; @@ -1522,6 +1523,7 @@ static int macvlan_device_event(struct notifier_block *unused, port = macvlan_port_get_rtnl(dev); switch (event) { + case NETDEV_UP: case NETDEV_CHANGE: list_for_each_entry(vlan, &port->vlans, list) netif_stacked_transfer_operstate(vlan->lowerdev, diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 60994a83a0d6..f0a77020037a 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -186,6 +186,7 @@ config MDIO_GPIO config MDIO_OCTEON tristate "Support for MDIO buses on Octeon and ThunderX SOCs" depends on 64BIT + depends on HAS_IOMEM help This module provides a driver for the Octeon and ThunderX MDIO diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 180f69952779..7a240fce3a7e 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -846,6 +846,11 @@ static void decode_rxts(struct dp83640_private *dp83640, struct skb_shared_hwtstamps *shhwtstamps = NULL; struct sk_buff *skb; unsigned long flags; + u8 overflow; + + overflow = (phy_rxts->ns_hi >> 14) & 0x3; + if (overflow) + pr_debug("rx timestamp queue overflow, count %d\n", overflow); spin_lock_irqsave(&dp83640->rx_lock, flags); @@ -888,6 +893,7 @@ static void decode_txts(struct dp83640_private *dp83640, struct skb_shared_hwtstamps shhwtstamps; struct sk_buff *skb; u64 ns; + u8 overflow; /* We must already have the skb that triggered this. */ @@ -897,6 +903,17 @@ static void decode_txts(struct dp83640_private *dp83640, pr_debug("have timestamp but tx_queue empty\n"); return; } + + overflow = (phy_txts->ns_hi >> 14) & 0x3; + if (overflow) { + pr_debug("tx timestamp queue overflow, count %d\n", overflow); + while (skb) { + skb_complete_tx_timestamp(skb, NULL); + skb = skb_dequeue(&dp83640->tx_queue); + } + return; + } + ns = phy2txts(phy_txts); memset(&shhwtstamps, 0, sizeof(shhwtstamps)); shhwtstamps.hwtstamp = ns_to_ktime(ns); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 8763bb20988a..5590b9c182c9 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -692,25 +692,29 @@ void phy_change(struct work_struct *work) struct phy_device *phydev = container_of(work, struct phy_device, phy_queue); - if (phydev->drv->did_interrupt && - !phydev->drv->did_interrupt(phydev)) - goto ignore; + if (phy_interrupt_is_valid(phydev)) { + if (phydev->drv->did_interrupt && + !phydev->drv->did_interrupt(phydev)) + goto ignore; - if (phy_disable_interrupts(phydev)) - goto phy_err; + if (phy_disable_interrupts(phydev)) + goto phy_err; + } mutex_lock(&phydev->lock); if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) phydev->state = PHY_CHANGELINK; mutex_unlock(&phydev->lock); - atomic_dec(&phydev->irq_disable); - enable_irq(phydev->irq); + if (phy_interrupt_is_valid(phydev)) { + atomic_dec(&phydev->irq_disable); + enable_irq(phydev->irq); - /* Reenable interrupts */ - if (PHY_HALTED != phydev->state && - phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) - goto irq_enable_err; + /* Reenable interrupts */ + if (PHY_HALTED != phydev->state && + phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED)) + goto irq_enable_err; + } /* reschedule state queue work to run as soon as possible */ cancel_delayed_work_sync(&phydev->state_queue); @@ -905,10 +909,10 @@ void phy_state_machine(struct work_struct *work) phydev->adjust_link(phydev->attached_dev); break; case PHY_RUNNING: - /* Only register a CHANGE if we are polling or ignoring - * interrupts and link changed since latest checking. + /* Only register a CHANGE if we are polling and link changed + * since latest checking. */ - if (!phy_interrupt_is_valid(phydev)) { + if (phydev->irq == PHY_POLL) { old_link = phydev->link; err = phy_read_status(phydev); if (err) @@ -1000,15 +1004,21 @@ void phy_state_machine(struct work_struct *work) phy_state_to_str(old_state), phy_state_to_str(phydev->state)); - queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, - PHY_STATE_TIME * HZ); + /* Only re-schedule a PHY state machine change if we are polling the + * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving + * between states from phy_mac_interrupt() + */ + if (phydev->irq == PHY_POLL) + queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, + PHY_STATE_TIME * HZ); } void phy_mac_interrupt(struct phy_device *phydev, int new_link) { - cancel_work_sync(&phydev->phy_queue); phydev->link = new_link; - schedule_work(&phydev->phy_queue); + + /* Trigger a state machine change */ + queue_work(system_power_efficient_wq, &phydev->phy_queue); } EXPORT_SYMBOL(phy_mac_interrupt); diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index e485f2653c82..2e21e9366f76 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -24,6 +24,10 @@ #include <linux/netdevice.h> #include <linux/smscphy.h> +struct smsc_phy_priv { + bool energy_enable; +}; + static int smsc_phy_config_intr(struct phy_device *phydev) { int rc = phy_write (phydev, MII_LAN83C185_IM, @@ -43,19 +47,14 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev) static int smsc_phy_config_init(struct phy_device *phydev) { - int __maybe_unused len; - struct device *dev __maybe_unused = &phydev->mdio.dev; - struct device_node *of_node __maybe_unused = dev->of_node; + struct smsc_phy_priv *priv = phydev->priv; + int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); - int enable_energy = 1; if (rc < 0) return rc; - if (of_find_property(of_node, "smsc,disable-energy-detect", &len)) - enable_energy = 0; - - if (enable_energy) { + if (priv->energy_enable) { /* Enable energy detect mode for this SMSC Transceivers */ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc | MII_LAN83C185_EDPWRDOWN); @@ -110,10 +109,13 @@ static int lan911x_config_init(struct phy_device *phydev) */ static int lan87xx_read_status(struct phy_device *phydev) { + struct smsc_phy_priv *priv = phydev->priv; + int err = genphy_read_status(phydev); - int i; - if (!phydev->link) { + if (!phydev->link && priv->energy_enable) { + int i; + /* Disable EDPD to wake up PHY */ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); if (rc < 0) @@ -149,6 +151,26 @@ static int lan87xx_read_status(struct phy_device *phydev) return err; } +static int smsc_phy_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct device_node *of_node = dev->of_node; + struct smsc_phy_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->energy_enable = true; + + if (of_property_read_bool(of_node, "smsc,disable-energy-detect")) + priv->energy_enable = false; + + phydev->priv = priv; + + return 0; +} + static struct phy_driver smsc_phy_driver[] = { { .phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */ @@ -159,6 +181,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -180,6 +204,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -201,6 +227,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = lan87xx_read_status, @@ -222,6 +250,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, @@ -242,6 +272,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = lan87xx_read_status, @@ -263,6 +295,8 @@ static struct phy_driver smsc_phy_driver[] = { | SUPPORTED_Asym_Pause), .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, + .probe = smsc_phy_probe, + /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = lan87xx_read_status, diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 90868ca5e341..ae0905ed4a32 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr) return i < MAX_CALLID; } -static int add_chan(struct pppox_sock *sock) +static int add_chan(struct pppox_sock *sock, + struct pptp_addr *sa) { static int call_id; spin_lock(&chan_lock); - if (!sock->proto.pptp.src_addr.call_id) { + if (!sa->call_id) { call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1); if (call_id == MAX_CALLID) { call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1); if (call_id == MAX_CALLID) goto out_err; } - sock->proto.pptp.src_addr.call_id = call_id; - } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap)) + sa->call_id = call_id; + } else if (test_bit(sa->call_id, callid_bitmap)) { goto out_err; + } - set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap); - rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock); + sock->proto.pptp.src_addr = *sa; + set_bit(sa->call_id, callid_bitmap); + rcu_assign_pointer(callid_sock[sa->call_id], sock); spin_unlock(&chan_lock); return 0; @@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, struct sock *sk = sock->sk; struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr; struct pppox_sock *po = pppox_sk(sk); - struct pptp_opt *opt = &po->proto.pptp; int error = 0; if (sockaddr_len < sizeof(struct sockaddr_pppox)) @@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); - opt->src_addr = sp->sa_addr.pptp; - if (add_chan(po)) + if (sk->sk_state & PPPOX_DEAD) { + error = -EALREADY; + goto out; + } + + if (sk->sk_state & PPPOX_BOUND) { error = -EBUSY; + goto out; + } + + if (add_chan(po, &sp->sa_addr.pptp)) + error = -EBUSY; + else + sk->sk_state |= PPPOX_BOUND; +out: release_sock(sk); return error; } @@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, } opt->dst_addr = sp->sa_addr.pptp; - sk->sk_state = PPPOX_CONNECTED; + sk->sk_state |= PPPOX_CONNECTED; end: release_sock(sk); diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 2ed53331bfb2..1c299b8a162d 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -36,7 +36,7 @@ #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices" #define DRIVER_NAME "lan78xx" -#define DRIVER_VERSION "1.0.1" +#define DRIVER_VERSION "1.0.2" #define TX_TIMEOUT_JIFFIES (5 * HZ) #define THROTTLE_JIFFIES (HZ / 8) @@ -462,32 +462,53 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { u32 val; + u32 saved; int i, ret; + int retval; - ret = lan78xx_eeprom_confirm_not_busy(dev); - if (ret) - return ret; + /* depends on chip, some EEPROM pins are muxed with LED function. + * disable & restore LED function to access EEPROM. + */ + ret = lan78xx_read_reg(dev, HW_CFG, &val); + saved = val; + if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) { + val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); + ret = lan78xx_write_reg(dev, HW_CFG, val); + } + + retval = lan78xx_eeprom_confirm_not_busy(dev); + if (retval) + return retval; for (i = 0; i < length; i++) { val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_; val |= (offset & E2P_CMD_EPC_ADDR_MASK_); ret = lan78xx_write_reg(dev, E2P_CMD, val); - if (unlikely(ret < 0)) - return -EIO; + if (unlikely(ret < 0)) { + retval = -EIO; + goto exit; + } - ret = lan78xx_wait_eeprom(dev); - if (ret < 0) - return ret; + retval = lan78xx_wait_eeprom(dev); + if (retval < 0) + goto exit; ret = lan78xx_read_reg(dev, E2P_DATA, &val); - if (unlikely(ret < 0)) - return -EIO; + if (unlikely(ret < 0)) { + retval = -EIO; + goto exit; + } data[i] = val & 0xFF; offset++; } - return 0; + retval = 0; +exit: + if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) + ret = lan78xx_write_reg(dev, HW_CFG, saved); + + return retval; } static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset, @@ -509,44 +530,67 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset, u32 length, u8 *data) { u32 val; + u32 saved; int i, ret; + int retval; - ret = lan78xx_eeprom_confirm_not_busy(dev); - if (ret) - return ret; + /* depends on chip, some EEPROM pins are muxed with LED function. + * disable & restore LED function to access EEPROM. + */ + ret = lan78xx_read_reg(dev, HW_CFG, &val); + saved = val; + if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) { + val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_); + ret = lan78xx_write_reg(dev, HW_CFG, val); + } + + retval = lan78xx_eeprom_confirm_not_busy(dev); + if (retval) + goto exit; /* Issue write/erase enable command */ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_; ret = lan78xx_write_reg(dev, E2P_CMD, val); - if (unlikely(ret < 0)) - return -EIO; + if (unlikely(ret < 0)) { + retval = -EIO; + goto exit; + } - ret = lan78xx_wait_eeprom(dev); - if (ret < 0) - return ret; + retval = lan78xx_wait_eeprom(dev); + if (retval < 0) + goto exit; for (i = 0; i < length; i++) { /* Fill data register */ val = data[i]; ret = lan78xx_write_reg(dev, E2P_DATA, val); - if (ret < 0) - return ret; + if (ret < 0) { + retval = -EIO; + goto exit; + } /* Send "write" command */ val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_; val |= (offset & E2P_CMD_EPC_ADDR_MASK_); ret = lan78xx_write_reg(dev, E2P_CMD, val); - if (ret < 0) - return ret; + if (ret < 0) { + retval = -EIO; + goto exit; + } - ret = lan78xx_wait_eeprom(dev); - if (ret < 0) - return ret; + retval = lan78xx_wait_eeprom(dev); + if (retval < 0) + goto exit; offset++; } - return 0; + retval = 0; +exit: + if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) + ret = lan78xx_write_reg(dev, HW_CFG, saved); + + return retval; } static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset, @@ -904,7 +948,6 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) if (!phydev->link && dev->link_on) { dev->link_on = false; - netif_carrier_off(dev->net); /* reset MAC */ ret = lan78xx_read_reg(dev, MAC_CR, &buf); @@ -914,6 +957,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) ret = lan78xx_write_reg(dev, MAC_CR, buf); if (unlikely(ret < 0)) return -EIO; + + phy_mac_interrupt(phydev, 0); } else if (phydev->link && !dev->link_on) { dev->link_on = true; @@ -953,7 +998,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv); ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv); - netif_carrier_on(dev->net); + phy_mac_interrupt(phydev, 1); } return ret; @@ -1495,7 +1540,6 @@ done: static int lan78xx_mdio_init(struct lan78xx_net *dev) { int ret; - int i; dev->mdiobus = mdiobus_alloc(); if (!dev->mdiobus) { @@ -1511,10 +1555,6 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev) snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum); - /* handle our own interrupt */ - for (i = 0; i < PHY_MAX_ADDR; i++) - dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT; - switch (dev->devid & ID_REV_CHIP_ID_MASK_) { case 0x78000000: case 0x78500000: @@ -1558,6 +1598,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) return -EIO; } + /* Enable PHY interrupts. + * We handle our own interrupt + */ + ret = phy_read(phydev, LAN88XX_INT_STS); + ret = phy_write(phydev, LAN88XX_INT_MASK, + LAN88XX_INT_MASK_MDINTPIN_EN_ | + LAN88XX_INT_MASK_LINK_CHANGE_); + + phydev->irq = PHY_IGNORE_INTERRUPT; + ret = phy_connect_direct(dev->net, phydev, lan78xx_link_status_change, PHY_INTERFACE_MODE_GMII); @@ -1580,14 +1630,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) SUPPORTED_Pause | SUPPORTED_Asym_Pause); genphy_config_aneg(phydev); - /* Workaround to enable PHY interrupt. - * phy_start_interrupts() is API for requesting and enabling - * PHY interrupt. However, USB-to-Ethernet device can't use - * request_irq() called in phy_start_interrupts(). - * Set PHY to PHY_HALTED and call phy_start() - * to make a call to phy_enable_interrupts() - */ - phy_stop(phydev); phy_start(phydev); netif_dbg(dev, ifup, dev->net, "phy initialised successfully"); @@ -2221,7 +2263,9 @@ netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net) if (skb2) { skb_queue_tail(&dev->txq_pend, skb2); - if (skb_queue_len(&dev->txq_pend) > 10) + /* throttle TX patch at slower than SUPER SPEED USB */ + if ((dev->udev->speed < USB_SPEED_SUPER) && + (skb_queue_len(&dev->txq_pend) > 10)) netif_stop_queue(net); } else { netif_dbg(dev, tx_err, dev->net, diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 2d88c799d2ac..a31cd954b308 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -73,7 +73,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); static int vxlan_net_id; static struct rtnl_link_ops vxlan_link_ops; -static const u8 all_zeros_mac[ETH_ALEN]; +static const u8 all_zeros_mac[ETH_ALEN + 2]; static int vxlan_sock_add(struct vxlan_dev *vxlan); @@ -1985,11 +1985,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, vxlan->cfg.port_max, true); if (info) { - if (info->key.tun_flags & TUNNEL_CSUM) - flags |= VXLAN_F_UDP_CSUM; - else - flags &= ~VXLAN_F_UDP_CSUM; - ttl = info->key.ttl; tos = info->key.tos; @@ -2004,8 +1999,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto drop; sk = vxlan->vn4_sock->sock->sk; - if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)) - df = htons(IP_DF); + if (info) { + if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) + df = htons(IP_DF); + + if (info->key.tun_flags & TUNNEL_CSUM) + flags |= VXLAN_F_UDP_CSUM; + else + flags &= ~VXLAN_F_UDP_CSUM; + } memset(&fl4, 0, sizeof(fl4)); fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0; @@ -2101,6 +2103,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, return; } + if (info) { + if (info->key.tun_flags & TUNNEL_CSUM) + flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX; + else + flags |= VXLAN_F_UDP_ZERO_CSUM6_TX; + } + ttl = ttl ? : ip6_dst_hoplimit(ndst); err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr, 0, ttl, src_port, dst_port, htonl(vni << 8), md, @@ -2358,29 +2367,43 @@ static void vxlan_set_multicast_list(struct net_device *dev) { } -static int vxlan_change_mtu(struct net_device *dev, int new_mtu) +static int __vxlan_change_mtu(struct net_device *dev, + struct net_device *lowerdev, + struct vxlan_rdst *dst, int new_mtu, bool strict) { - struct vxlan_dev *vxlan = netdev_priv(dev); - struct vxlan_rdst *dst = &vxlan->default_dst; - struct net_device *lowerdev; - int max_mtu; + int max_mtu = IP_MAX_MTU; - lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex); - if (lowerdev == NULL) - return eth_change_mtu(dev, new_mtu); + if (lowerdev) + max_mtu = lowerdev->mtu; if (dst->remote_ip.sa.sa_family == AF_INET6) - max_mtu = lowerdev->mtu - VXLAN6_HEADROOM; + max_mtu -= VXLAN6_HEADROOM; else - max_mtu = lowerdev->mtu - VXLAN_HEADROOM; + max_mtu -= VXLAN_HEADROOM; - if (new_mtu < 68 || new_mtu > max_mtu) + if (new_mtu < 68) return -EINVAL; + if (new_mtu > max_mtu) { + if (strict) + return -EINVAL; + + new_mtu = max_mtu; + } + dev->mtu = new_mtu; return 0; } +static int vxlan_change_mtu(struct net_device *dev, int new_mtu) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_rdst *dst = &vxlan->default_dst; + struct net_device *lowerdev = __dev_get_by_index(vxlan->net, + dst->remote_ifindex); + return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true); +} + static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, struct ip_tunnel_info *info, __be16 sport, __be16 dport) @@ -2756,6 +2779,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, int err; bool use_ipv6 = false; __be16 default_port = vxlan->cfg.dst_port; + struct net_device *lowerdev = NULL; vxlan->net = src_net; @@ -2776,9 +2800,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, } if (conf->remote_ifindex) { - struct net_device *lowerdev - = __dev_get_by_index(src_net, conf->remote_ifindex); - + lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex); dst->remote_ifindex = conf->remote_ifindex; if (!lowerdev) { @@ -2802,6 +2824,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, needed_headroom = lowerdev->hard_header_len; } + if (conf->mtu) { + err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); + if (err) + return err; + } + if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) needed_headroom += VXLAN6_HEADROOM; else diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c index a7afdeee698c..73fb4232f9f2 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom.c +++ b/drivers/net/wireless/ath/ath9k/eeprom.c @@ -150,18 +150,18 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size) return -EIO; } - if (magic == AR5416_EEPROM_MAGIC) { - *swap_needed = false; - } else if (swab16(magic) == AR5416_EEPROM_MAGIC) { + *swap_needed = false; + if (swab16(magic) == AR5416_EEPROM_MAGIC) { if (ah->ah_flags & AH_NO_EEP_SWAP) { ath_info(common, "Ignoring endianness difference in EEPROM magic bytes.\n"); - - *swap_needed = false; } else { *swap_needed = true; } - } else { + } else if (magic != AR5416_EEPROM_MAGIC) { + if (ath9k_hw_use_flash(ah)) + return 0; + ath_err(common, "Invalid EEPROM Magic (0x%04x).\n", magic); return -EINVAL; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 53637399bb99..b98db8a0a069 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -879,11 +879,24 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn) return 0; } -static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) +void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev) { + struct sdio_func *func; + struct mmc_host *host; + uint max_blocks; uint nents; int err; + func = sdiodev->func[2]; + host = func->card->host; + sdiodev->sg_support = host->max_segs > 1; + max_blocks = min_t(uint, host->max_blk_count, 511u); + sdiodev->max_request_size = min_t(uint, host->max_req_size, + max_blocks * func->cur_blksize); + sdiodev->max_segment_count = min_t(uint, host->max_segs, + SG_MAX_SINGLE_ALLOC); + sdiodev->max_segment_size = host->max_seg_size; + if (!sdiodev->sg_support) return; @@ -1021,9 +1034,6 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host) static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) { - struct sdio_func *func; - struct mmc_host *host; - uint max_blocks; int ret = 0; sdiodev->num_funcs = 2; @@ -1054,26 +1064,6 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) goto out; } - /* - * determine host related variables after brcmf_sdiod_probe() - * as func->cur_blksize is properly set and F2 init has been - * completed successfully. - */ - func = sdiodev->func[2]; - host = func->card->host; - sdiodev->sg_support = host->max_segs > 1; - max_blocks = min_t(uint, host->max_blk_count, 511u); - sdiodev->max_request_size = min_t(uint, host->max_req_size, - max_blocks * func->cur_blksize); - sdiodev->max_segment_count = min_t(uint, host->max_segs, - SG_MAX_SINGLE_ALLOC); - sdiodev->max_segment_size = host->max_seg_size; - - /* allocate scatter-gather table. sg support - * will be disabled upon allocation failure. - */ - brcmf_sdiod_sgtable_alloc(sdiodev); - ret = brcmf_sdiod_freezer_attach(sdiodev); if (ret) goto out; @@ -1084,7 +1074,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev) ret = -ENODEV; goto out; } - brcmf_sdiod_host_fixup(host); + brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host); out: if (ret) brcmf_sdiod_remove(sdiodev); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 4265b50faa98..cfee477a6eb1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -17,6 +17,7 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/netdevice.h> +#include <linux/module.h> #include <brcmu_wifi.h> #include <brcmu_utils.h> #include "core.h" diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index dd6614332836..a14d9d9da094 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -4114,6 +4114,11 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) goto fail; } + /* allocate scatter-gather table. sg support + * will be disabled upon allocation failure. + */ + brcmf_sdiod_sgtable_alloc(bus->sdiodev); + /* Query the F2 block size, set roundup accordingly */ bus->blocksize = bus->sdiodev->func[2]->cur_blksize; bus->roundup = min(max_roundup, bus->blocksize); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index 5ec7a6d87672..23f223150cef 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -342,6 +342,7 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address, /* Issue an abort to the specified function */ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn); +void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev); void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev, enum brcmf_sdiod_state state); #ifdef CONFIG_PM_SLEEP diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index e60cf141ed79..fa41a5e1c890 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c @@ -74,16 +74,19 @@ #define IWL7260_UCODE_API_MAX 17 #define IWL7265_UCODE_API_MAX 17 #define IWL7265D_UCODE_API_MAX 20 +#define IWL3168_UCODE_API_MAX 20 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 13 #define IWL7265_UCODE_API_OK 13 #define IWL7265D_UCODE_API_OK 13 +#define IWL3168_UCODE_API_OK 20 /* Lowest firmware API version supported */ #define IWL7260_UCODE_API_MIN 13 #define IWL7265_UCODE_API_MIN 13 #define IWL7265D_UCODE_API_MIN 13 +#define IWL3168_UCODE_API_MIN 20 /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d @@ -92,6 +95,8 @@ #define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL3165_NVM_VERSION 0x709 #define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ +#define IWL3168_NVM_VERSION 0xd01 +#define IWL3168_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7265_NVM_VERSION 0x0a1d #define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7265D_NVM_VERSION 0x0c11 @@ -109,6 +114,9 @@ #define IWL3160_FW_PRE "iwlwifi-3160-" #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode" +#define IWL3168_FW_PRE "iwlwifi-3168-" +#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode" + #define IWL7265_FW_PRE "iwlwifi-7265-" #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" @@ -180,6 +188,12 @@ static const struct iwl_ht_params iwl7000_ht_params = { .ucode_api_ok = IWL7265_UCODE_API_OK, \ .ucode_api_min = IWL7265_UCODE_API_MIN +#define IWL_DEVICE_3008 \ + IWL_DEVICE_7000_COMMON, \ + .ucode_api_max = IWL3168_UCODE_API_MAX, \ + .ucode_api_ok = IWL3168_UCODE_API_OK, \ + .ucode_api_min = IWL3168_UCODE_API_MIN + #define IWL_DEVICE_7005D \ IWL_DEVICE_7000_COMMON, \ .ucode_api_max = IWL7265D_UCODE_API_MAX, \ @@ -299,11 +313,11 @@ const struct iwl_cfg iwl3165_2ac_cfg = { const struct iwl_cfg iwl3168_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 3168", - .fw_name_pre = IWL7265D_FW_PRE, - IWL_DEVICE_7000, + .fw_name_pre = IWL3168_FW_PRE, + IWL_DEVICE_3008, .ht_params = &iwl7000_ht_params, - .nvm_ver = IWL3165_NVM_VERSION, - .nvm_calib_ver = IWL3165_TX_POWER_VERSION, + .nvm_ver = IWL3168_NVM_VERSION, + .nvm_calib_ver = IWL3168_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; @@ -376,5 +390,6 @@ const struct iwl_cfg iwl7265d_n_cfg = { MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK)); +MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK)); MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK)); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index 0036d18334af..ba3f0bbddde8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h @@ -510,6 +510,9 @@ struct iwl_mvm_tx_resp { * @scd_ssn: the index of the last contiguously sent packet * @txed: number of Txed frames in this batch * @txed_2_done: number of Acked frames in this batch + * @reduced_txp: power reduced according to TPC. This is the actual value and + * not a copy from the LQ command. Thus, if not the first rate was used + * for Tx-ing then this value will be set to 0 by FW. */ struct iwl_mvm_ba_notif { __le32 sta_addr_lo32; @@ -524,7 +527,8 @@ struct iwl_mvm_ba_notif { __le16 scd_ssn; u8 txed; u8 txed_2_done; - __le16 reserved1; + u8 reduced_txp; + u8 reserved1; } __packed; /* diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 7bb6fd0e4391..94caa88df442 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -2,6 +2,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH + * Copyright(c) 2016 Intel Deutschland GmbH * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as @@ -724,14 +725,28 @@ static int _rs_collect_tx_data(struct iwl_mvm *mvm, return 0; } -static int rs_collect_tx_data(struct iwl_mvm *mvm, - struct iwl_lq_sta *lq_sta, - struct iwl_scale_tbl_info *tbl, - int scale_index, int attempts, int successes, - u8 reduced_txp) +static int rs_collect_tpc_data(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes, + u8 reduced_txp) +{ + struct iwl_rate_scale_data *window = NULL; + + if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION)) + return -EINVAL; + + window = &tbl->tpc_win[reduced_txp]; + return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, + window); +} + +static int rs_collect_tlc_data(struct iwl_mvm *mvm, + struct iwl_lq_sta *lq_sta, + struct iwl_scale_tbl_info *tbl, + int scale_index, int attempts, int successes) { struct iwl_rate_scale_data *window = NULL; - int ret; if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) return -EINVAL; @@ -745,16 +760,6 @@ static int rs_collect_tx_data(struct iwl_mvm *mvm, /* Select window for current tx bit rate */ window = &(tbl->win[scale_index]); - - ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, - window); - if (ret) - return ret; - - if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION)) - return -EINVAL; - - window = &tbl->tpc_win[reduced_txp]; return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes, window); } @@ -1301,17 +1306,30 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, * first index into rate scale table. */ if (info->flags & IEEE80211_TX_STAT_AMPDU) { - /* ampdu_ack_len = 0 marks no BA was received. In this case - * treat it as a single frame loss as we don't want the success - * ratio to dip too quickly because a BA wasn't received + rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index, + info->status.ampdu_len, + info->status.ampdu_ack_len, + reduced_txp); + + /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat + * it as a single frame loss as we don't want the success ratio + * to dip too quickly because a BA wasn't received. + * For TPC, there's no need for this optimisation since we want + * to recover very quickly from a bad power reduction and, + * therefore we'd like the success ratio to get an immediate hit + * when failing to get a BA, so we'd switch back to a lower or + * zero power reduction. When FW transmits agg with a rate + * different from the initial rate, it will not use reduced txp + * and will send BA notification twice (one empty with reduced + * txp equal to the value from LQ and one with reduced txp 0). + * We need to update counters for each txp level accordingly. */ if (info->status.ampdu_ack_len == 0) info->status.ampdu_len = 1; - rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index, - info->status.ampdu_len, - info->status.ampdu_ack_len, - reduced_txp); + rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index, + info->status.ampdu_len, + info->status.ampdu_ack_len); /* Update success/fail counts if not searching for new mode */ if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) { @@ -1344,9 +1362,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, else continue; - rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index, - 1, i < retries ? 0 : legacy_success, - reduced_txp); + rs_collect_tpc_data(mvm, lq_sta, tmp_tbl, + lq_rate.index, 1, + i < retries ? 0 : legacy_success, + reduced_txp); + rs_collect_tlc_data(mvm, lq_sta, tmp_tbl, + lq_rate.index, 1, + i < retries ? 0 : legacy_success); } /* Update success/fail counts if not searching for new mode */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 8bf48a7d0f4e..0914ec2fd574 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -1029,7 +1029,6 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); mvmsta->tid_data[tid].rate_n_flags = le32_to_cpu(tx_resp->initial_rate); - mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc; mvmsta->tid_data[tid].tx_time = le16_to_cpu(tx_resp->wireless_media_time); } @@ -1060,7 +1059,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info, /* TODO: not accounted if the whole A-MPDU failed */ info->status.tx_time = tid_data->tx_time; info->status.status_driver_data[0] = - (void *)(uintptr_t)tid_data->reduced_tpc; + (void *)(uintptr_t)ba_notif->reduced_txp; info->status.status_driver_data[1] = (void *)(uintptr_t)tid_data->rate_n_flags; } @@ -1133,6 +1132,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) scd_flow, ba_resp_scd_ssn, ba_notif->txed, ba_notif->txed_2_done); + IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n", + ba_notif->reduced_txp); tid_data->next_reclaimed = ba_resp_scd_ssn; iwl_mvm_check_ratid_empty(mvm, sta, tid); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 6261a68cae90..00335ea6b3eb 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -378,7 +378,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, /* 3168 Series */ + {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)}, /* 7265 Series */ @@ -475,6 +478,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)}, {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)}, /* 9000 Series */ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)}, diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index c32889a1e39c..a28414c50edf 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -991,7 +991,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, goto nla_put_failure; } - if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, hdr->addr2)) + if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, + ETH_ALEN, data->addresses[1].addr)) goto nla_put_failure; /* We get the skb->data */ @@ -2736,7 +2737,7 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr) spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) { - if (mac80211_hwsim_addr_match(data, addr)) { + if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) { _found = true; break; } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 9a3966cd6fbe..155f343981fe 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c @@ -273,8 +273,10 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); - rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(®, RXCSR0_DROP_TODS, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index 1a6740b4d396..2553cdd74066 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c @@ -274,8 +274,10 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(®, RXCSR0_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); - rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, RXCSR0_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(®, RXCSR0_DROP_TODS, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(®, RXCSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(®, RXCSR0_DROP_MCAST, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index d26018f30b7d..2d64611de300 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -437,8 +437,10 @@ static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field16(®, TXRX_CSR2_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); - rt2x00_set_field16(®, TXRX_CSR2_DROP_NOT_TO_ME, 1); + rt2x00_set_field16(®, TXRX_CSR2_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field16(®, TXRX_CSR2_DROP_TODS, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field16(®, TXRX_CSR2_DROP_VERSION_ERROR, 1); rt2x00_set_field16(®, TXRX_CSR2_DROP_MULTICAST, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 9733b31a780d..a26afcab03ed 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -1490,7 +1490,8 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_PHY_ERROR, !(filter_flags & FIF_PLCPFAIL)); - rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_VER_ERROR, 1); rt2x00_set_field32(®, RX_FILTER_CFG_DROP_MULTICAST, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 3282ddb766f4..26427140a963 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -669,6 +669,7 @@ enum rt2x00_state_flags { CONFIG_POWERSAVING, CONFIG_HT_DISABLED, CONFIG_QOS_DISABLED, + CONFIG_MONITORING, /* * Mark we currently are sequentially reading TX_STA_FIFO register diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c index 7e8bb1198ae9..6a1f508d472f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00config.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00config.c @@ -277,6 +277,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, else clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); + if (conf->flags & IEEE80211_CONF_MONITOR) + set_bit(CONFIG_MONITORING, &rt2x00dev->flags); + else + clear_bit(CONFIG_MONITORING, &rt2x00dev->flags); + rt2x00dev->curr_band = conf->chandef.chan->band; rt2x00dev->curr_freq = conf->chandef.chan->center_freq; rt2x00dev->tx_power = conf->power_level; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 3c26ee65a415..13da95a24cf7 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -385,11 +385,6 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw, *total_flags |= FIF_PSPOLL; } - /* - * Check if there is any work left for us. - */ - if (rt2x00dev->packet_filter == *total_flags) - return; rt2x00dev->packet_filter = *total_flags; rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags); diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index c0e730ea1b69..24a3436ef952 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -530,8 +530,10 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); - rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index 7081e13b4fd6..7bbc86931168 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -480,8 +480,10 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(®, TXRX_CSR0_DROP_CONTROL, !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); - rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, 1); + rt2x00_set_field32(®, TXRX_CSR0_DROP_NOT_TO_ME, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(®, TXRX_CSR0_DROP_TO_DS, + !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(®, TXRX_CSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(®, TXRX_CSR0_DROP_MULTICAST, diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c index a62bf0a65c32..5be34118e0af 100644 --- a/drivers/net/wireless/realtek/rtlwifi/regd.c +++ b/drivers/net/wireless/realtek/rtlwifi/regd.c @@ -351,7 +351,6 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select( case COUNTRY_CODE_SPAIN: case COUNTRY_CODE_FRANCE: case COUNTRY_CODE_ISRAEL: - case COUNTRY_CODE_WORLD_WIDE_13: return &rtl_regdom_12_13; case COUNTRY_CODE_MKK: case COUNTRY_CODE_MKK1: @@ -360,6 +359,7 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select( return &rtl_regdom_14_60_64; case COUNTRY_CODE_GLOBAL_DOMAIN: return &rtl_regdom_14; + case COUNTRY_CODE_WORLD_WIDE_13: case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL: return &rtl_regdom_12_13_5g_all; default: diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index d6abf191122a..96ccd4e943db 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -364,6 +364,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) RING_IDX cons, prod; unsigned short id; struct sk_buff *skb; + bool more_to_do; BUG_ON(!netif_carrier_ok(queue->info->netdev)); @@ -398,18 +399,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) queue->tx.rsp_cons = prod; - /* - * Set a new event, then check for race with update of tx_cons. - * Note that it is essential to schedule a callback, no matter - * how few buffers are pending. Even if there is space in the - * transmit ring, higher layers may be blocked because too much - * data is outstanding: in such cases notification from Xen is - * likely to be the only kick that we'll get. - */ - queue->tx.sring->rsp_event = - prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1; - mb(); /* update shared area */ - } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod)); + RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do); + } while (more_to_do); xennet_maybe_wake_tx(queue); } diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 8ebfcaae3f5a..9edf7eb7d17c 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1277,10 +1277,12 @@ static ssize_t mode_show(struct device *dev, device_lock(dev); claim = ndns->claim; - if (pmem_should_map_pages(dev) || (claim && is_nd_pfn(claim))) - mode = "memory"; - else if (claim && is_nd_btt(claim)) + if (claim && is_nd_btt(claim)) mode = "safe"; + else if (claim && is_nd_pfn(claim)) + mode = "memory"; + else if (!claim && pmem_should_map_pages(dev)) + mode = "memory"; else mode = "raw"; rc = sprintf(buf, "%s\n", mode); diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 0cc9048b86e2..ae81a2f1da50 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -301,10 +301,8 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) switch (le32_to_cpu(pfn_sb->mode)) { case PFN_MODE_RAM: - break; case PFN_MODE_PMEM: - /* TODO: allocate from PMEM support */ - return -ENOTTY; + break; default: return -ENXIO; } diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 86829f8064a6..39c4be41ef83 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c @@ -143,11 +143,32 @@ int of_mdio_parse_addr(struct device *dev, const struct device_node *np) } EXPORT_SYMBOL(of_mdio_parse_addr); +/* The following is a list of PHY compatible strings which appear in + * some DTBs. The compatible string is never matched against a PHY + * driver, so is pointless. We only expect devices which are not PHYs + * to have a compatible string, so they can be matched to an MDIO + * driver. Encourage users to upgrade their DT blobs to remove these. + */ +static const struct of_device_id whitelist_phys[] = { + { .compatible = "brcm,40nm-ephy" }, + { .compatible = "marvell,88E1111", }, + { .compatible = "marvell,88e1116", }, + { .compatible = "marvell,88e1118", }, + { .compatible = "marvell,88e1145", }, + { .compatible = "marvell,88e1149r", }, + { .compatible = "marvell,88e1310", }, + { .compatible = "marvell,88E1510", }, + { .compatible = "marvell,88E1514", }, + { .compatible = "moxa,moxart-rtl8201cp", }, + {} +}; + /* * Return true if the child node is for a phy. It must either: * o Compatible string of "ethernet-phy-idX.X" * o Compatible string of "ethernet-phy-ieee802.3-c45" * o Compatible string of "ethernet-phy-ieee802.3-c22" + * o In the white list above (and issue a warning) * o No compatibility string * * A device which is not a phy is expected to have a compatible string @@ -166,6 +187,13 @@ static bool of_mdiobus_child_is_phy(struct device_node *child) if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22")) return true; + if (of_match_node(whitelist_phys, child)) { + pr_warn(FW_WARN + "%s: Whitelisted compatible string. Please remove\n", + child->full_name); + return true; + } + if (!of_find_property(child, "compatible", NULL)) return true; @@ -256,11 +284,19 @@ static int of_phy_match(struct device *dev, void *phy_np) struct phy_device *of_phy_find_device(struct device_node *phy_np) { struct device *d; + struct mdio_device *mdiodev; + if (!phy_np) return NULL; d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match); - return d ? to_phy_device(d) : NULL; + if (d) { + mdiodev = to_mdio_device(d); + if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) + return to_phy_device(d); + } + + return NULL; } EXPORT_SYMBOL(of_phy_find_device); diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 5816bceddb65..a576aeeb22da 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -64,7 +64,6 @@ #define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT) #define MAX_NUM_OB_WINDOWS 2 -#define MAX_NUM_PAXC_PF 4 #define IPROC_PCIE_REG_INVALID 0xffff @@ -170,20 +169,6 @@ static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie, writel(val, pcie->base + offset + (window * 8)); } -static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie, - unsigned int slot, - unsigned int fn) -{ - if (slot > 0) - return false; - - /* PAXC can only support limited number of functions */ - if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF) - return false; - - return true; -} - /** * Note access to the configuration registers are protected at the higher layer * by 'pci_lock' in drivers/pci/access.c @@ -199,11 +184,11 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, u32 val; u16 offset; - if (!iproc_pcie_device_is_valid(pcie, slot, fn)) - return NULL; - /* root complex access */ if (busno == 0) { + if (slot > 0 || fn > 0) + return NULL; + iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, where & CFG_IND_ADDR_MASK); offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); @@ -213,6 +198,14 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, return (pcie->base + offset); } + /* + * PAXC is connected to an internally emulated EP within the SoC. It + * allows only one device. + */ + if (pcie->type == IPROC_PCIE_PAXC) + if (slot > 0) + return NULL; + /* EP device access */ val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | (slot << CFG_ADDR_DEV_NUM_SHIFT) | diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 0bf82a20a0fb..48d21e0edd56 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev) rpc->rpd = dev; INIT_WORK(&rpc->dpc_handler, aer_isr); mutex_init(&rpc->rpc_mutex); - init_waitqueue_head(&rpc->wait_release); /* Use PCIe bus function to store rpc into PCIe device */ set_service_data(dev, rpc); @@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev) if (rpc->isr) free_irq(dev->irq, dev); - wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); - + flush_work(&rpc->dpc_handler); aer_disable_rootport(rpc); kfree(rpc); set_service_data(dev, NULL); diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h index 84420b7c9456..945c939a86c5 100644 --- a/drivers/pci/pcie/aer/aerdrv.h +++ b/drivers/pci/pcie/aer/aerdrv.h @@ -72,7 +72,6 @@ struct aer_rpc { * recovery on the same * root port hierarchy */ - wait_queue_head_t wait_release; }; struct aer_broadcast_data { diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index 712392504ed9..521e39c1b66d 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work) while (get_e_source(rpc, &e_src)) aer_isr_one_error(p_device, &e_src); mutex_unlock(&rpc->rpc_mutex); - - wake_up(&rpc->wait_release); } /** diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 20f0ad9bb9f3..e20f23e04c24 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -41,8 +41,7 @@ static const struct key_entry intel_hid_keymap[] = { { KE_KEY, 4, { KEY_HOME } }, { KE_KEY, 5, { KEY_END } }, { KE_KEY, 6, { KEY_PAGEUP } }, - { KE_KEY, 4, { KEY_PAGEDOWN } }, - { KE_KEY, 4, { KEY_HOME } }, + { KE_KEY, 7, { KEY_PAGEDOWN } }, { KE_KEY, 8, { KEY_RFKILL } }, { KE_KEY, 9, { KEY_POWER } }, { KE_KEY, 11, { KEY_SLEEP } }, diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c index 02bc5a6343c3..aa454241489c 100644 --- a/drivers/platform/x86/intel_scu_ipcutil.c +++ b/drivers/platform/x86/intel_scu_ipcutil.c @@ -49,7 +49,7 @@ struct scu_ipc_data { static int scu_reg_access(u32 cmd, struct scu_ipc_data *data) { - int count = data->count; + unsigned int count = data->count; if (count == 0 || count == 3 || count > 4) return -EINVAL; diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c index f700723ca5d6..d28e3ab9479c 100644 --- a/drivers/pnp/quirks.c +++ b/drivers/pnp/quirks.c @@ -342,6 +342,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev) /* Device IDs of parts that have 32KB MCH space */ static const unsigned int mch_quirk_devices[] = { 0x0154, /* Ivy Bridge */ + 0x0a04, /* Haswell-ULT */ 0x0c00, /* Haswell */ 0x1604, /* Broadwell */ }; diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c index 934c139916c6..ee4f183ef9ee 100644 --- a/drivers/ptp/ptp_ixp46x.c +++ b/drivers/ptp/ptp_ixp46x.c @@ -178,7 +178,6 @@ static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta) static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) { u64 ns; - u32 remainder; unsigned long flags; struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); struct ixp46x_ts_regs *regs = ixp_clock->regs; @@ -189,8 +188,7 @@ static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) spin_unlock_irqrestore(®ister_lock, flags); - ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder); - ts->tv_nsec = remainder; + *ts = ns_to_timespec64(ns); return 0; } @@ -202,8 +200,7 @@ static int ptp_ixp_settime(struct ptp_clock_info *ptp, struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps); struct ixp46x_ts_regs *regs = ixp_clock->regs; - ns = ts->tv_sec * 1000000000ULL; - ns += ts->tv_nsec; + ns = timespec64_to_ns(ts); spin_lock_irqsave(®ister_lock, flags); diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 361358134315..93880ed6291c 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -562,7 +562,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev, /* * Command Lock contention */ - err = SCSI_DH_RETRY; + err = SCSI_DH_IMM_RETRY; break; default: break; @@ -612,6 +612,8 @@ retry: err = mode_select_handle_sense(sdev, h->sense); if (err == SCSI_DH_RETRY && retry_cnt--) goto retry; + if (err == SCSI_DH_IMM_RETRY) + goto retry; } if (err == SCSI_DH_OK) { h->state = RDAC_STATE_ACTIVE; diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig index b67661836c9f..d1dd1616f983 100644 --- a/drivers/scsi/hisi_sas/Kconfig +++ b/drivers/scsi/hisi_sas/Kconfig @@ -1,6 +1,6 @@ config SCSI_HISI_SAS tristate "HiSilicon SAS" - depends on HAS_DMA + depends on HAS_DMA && HAS_IOMEM depends on ARM64 || COMPILE_TEST select SCSI_SAS_LIBSAS select BLK_DEV_INTEGRITY diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 057fdeb720ac..eea24d7531cf 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1289,13 +1289,10 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, goto out; } - if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK) { - if (!(cmplt_hdr_data & CMPLT_HDR_CMD_CMPLT_MSK) || - !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) - ts->stat = SAS_DATA_OVERRUN; - else - slot_err_v1_hw(hisi_hba, task, slot); + if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK && + !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) { + slot_err_v1_hw(hisi_hba, task, slot); goto out; } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 52a87657c7dd..692a7570b5e1 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2204,7 +2204,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) /* Clear outstanding commands array. */ for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; - if (!req) + if (!req || !test_bit(que, ha->req_qid_map)) continue; req->out_ptr = (void *)(req->ring + req->length); *req->out_ptr = 0; @@ -2221,7 +2221,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) for (que = 0; que < ha->max_rsp_queues; que++) { rsp = ha->rsp_q_map[que]; - if (!rsp) + if (!rsp || !test_bit(que, ha->rsp_qid_map)) continue; rsp->in_ptr = (void *)(rsp->ring + rsp->length); *rsp->in_ptr = 0; @@ -4981,7 +4981,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) for (i = 1; i < ha->max_rsp_queues; i++) { rsp = ha->rsp_q_map[i]; - if (rsp) { + if (rsp && test_bit(i, ha->rsp_qid_map)) { rsp->options &= ~BIT_0; ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != QLA_SUCCESS) @@ -4996,8 +4996,8 @@ qla25xx_init_queues(struct qla_hw_data *ha) } for (i = 1; i < ha->max_req_queues; i++) { req = ha->req_q_map[i]; - if (req) { - /* Clear outstanding commands array. */ + if (req && test_bit(i, ha->req_qid_map)) { + /* Clear outstanding commands array. */ req->options &= ~BIT_0; ret = qla25xx_init_req_que(base_vha, req); if (ret != QLA_SUCCESS) diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index d4d65eb0e9b4..4af95479a9db 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3063,9 +3063,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) "MSI-X: Failed to enable support " "-- %d/%d\n Retry with %d vectors.\n", ha->msix_count, ret, ret); + ha->msix_count = ret; + ha->max_rsp_queues = ha->msix_count - 1; } - ha->msix_count = ret; - ha->max_rsp_queues = ha->msix_count - 1; ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_count, GFP_KERNEL); if (!ha->msix_entries) { diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c5dd594f6c31..cf7ba52bae66 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) /* Delete request queues */ for (cnt = 1; cnt < ha->max_req_queues; cnt++) { req = ha->req_q_map[cnt]; - if (req) { + if (req && test_bit(cnt, ha->req_qid_map)) { ret = qla25xx_delete_req_que(vha, req); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00ea, @@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) /* Delete response queues */ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { rsp = ha->rsp_q_map[cnt]; - if (rsp) { + if (rsp && test_bit(cnt, ha->rsp_qid_map)) { ret = qla25xx_delete_rsp_que(vha, rsp); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00eb, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f1788db43195..f6c7ce35b542 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -409,6 +409,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) int cnt; for (cnt = 0; cnt < ha->max_req_queues; cnt++) { + if (!test_bit(cnt, ha->req_qid_map)) + continue; + req = ha->req_q_map[cnt]; qla2x00_free_req_que(ha, req); } @@ -416,6 +419,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) ha->req_q_map = NULL; for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { + if (!test_bit(cnt, ha->rsp_qid_map)) + continue; + rsp = ha->rsp_q_map[cnt]; qla2x00_free_rsp_que(ha, rsp); } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 8075a4cdb45c..ee967becd257 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -105,7 +105,7 @@ static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, int fn, void *iocb, int flags); static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd - *cmd, struct atio_from_isp *atio, int ha_locked); + *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort); static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, struct qla_tgt_srr_imm *imm, int ha_lock); static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, @@ -1756,7 +1756,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0); else { - if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) + if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, mcmd->fc_tm_rsp, false); else @@ -2665,7 +2665,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, /* no need to terminate. FW already freed exchange. */ qlt_abort_cmd_on_host_reset(cmd->vha, cmd); else - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); return 0; } @@ -3173,7 +3173,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, } static void qlt_send_term_exchange(struct scsi_qla_host *vha, - struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) + struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked, + int ul_abort) { unsigned long flags = 0; int rc; @@ -3193,8 +3194,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, qlt_alloc_qfull_cmd(vha, atio, 0, 0); done: - if (cmd && (!cmd->aborted || - !cmd->cmd_sent_to_fw)) { + if (cmd && !ul_abort && !cmd->aborted) { if (cmd->sg_mapped) qlt_unmap_sg(vha, cmd); vha->hw->tgt.tgt_ops->free_cmd(cmd); @@ -3253,21 +3253,38 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) } -void qlt_abort_cmd(struct qla_tgt_cmd *cmd) +int qlt_abort_cmd(struct qla_tgt_cmd *cmd) { struct qla_tgt *tgt = cmd->tgt; struct scsi_qla_host *vha = tgt->vha; struct se_cmd *se_cmd = &cmd->se_cmd; + unsigned long flags; ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, "qla_target(%d): terminating exchange for aborted cmd=%p " "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, se_cmd->tag); + spin_lock_irqsave(&cmd->cmd_lock, flags); + if (cmd->aborted) { + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + /* + * It's normal to see 2 calls in this path: + * 1) XFER Rdy completion + CMD_T_ABORT + * 2) TCM TMR - drain_state_list + */ + ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, + "multiple abort. %p transport_state %x, t_state %x," + " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state, + cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags); + return EIO; + } cmd->aborted = 1; cmd->cmd_flags |= BIT_6; + spin_unlock_irqrestore(&cmd->cmd_lock, flags); - qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); + qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1); + return 0; } EXPORT_SYMBOL(qlt_abort_cmd); @@ -3282,6 +3299,9 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) BUG_ON(cmd->cmd_in_wq); + if (cmd->sg_mapped) + qlt_unmap_sg(cmd->vha, cmd); + if (!cmd->q_full) qlt_decr_num_pend_cmds(cmd->vha); @@ -3399,7 +3419,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, term = 1; if (term) - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); return term; } @@ -3580,12 +3600,13 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, case CTIO_PORT_LOGGED_OUT: case CTIO_PORT_UNAVAILABLE: { - int logged_out = (status & 0xFFFF); + int logged_out = + (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT; + ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, "qla_target(%d): CTIO with %s status %x " "received (state %x, se_cmd %p)\n", vha->vp_idx, - (logged_out == CTIO_PORT_LOGGED_OUT) ? - "PORT LOGGED OUT" : "PORT UNAVAILABLE", + logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE", status, cmd->state, se_cmd); if (logged_out && cmd->sess) { @@ -3754,6 +3775,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) goto out_term; } + spin_lock_init(&cmd->cmd_lock); cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cmd->se_cmd.tag = atio->u.isp24.exchange_addr; cmd->unpacked_lun = scsilun_to_int( @@ -3796,7 +3818,7 @@ out_term: */ cmd->cmd_flags |= BIT_2; spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); + qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0); qlt_decr_num_pend_cmds(vha); percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); @@ -3918,7 +3940,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work) out_term: spin_lock_irqsave(&ha->hardware_lock, flags); - qlt_send_term_exchange(vha, NULL, &op->atio, 1); + qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); kfree(op); @@ -3982,7 +4004,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, cmd->cmd_in_wq = 1; cmd->cmd_flags |= BIT_0; - cmd->se_cmd.cpuid = -1; + cmd->se_cmd.cpuid = ha->msix_count ? + ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND; spin_lock(&vha->cmd_list_lock); list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); @@ -3990,7 +4013,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, INIT_WORK(&cmd->work, qlt_do_work); if (ha->msix_count) { - cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid; if (cmd->atio.u.isp24.fcp_cmnd.rddata) queue_work_on(smp_processor_id(), qla_tgt_wq, &cmd->work); @@ -4771,7 +4793,7 @@ out_reject: dump_stack(); } else { cmd->cmd_flags |= BIT_9; - qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); + qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0); } spin_unlock_irqrestore(&ha->hardware_lock, flags); } @@ -4950,7 +4972,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, sctio, sctio->srr_id); list_del(&sctio->srr_list_entry); qlt_send_term_exchange(vha, sctio->cmd, - &sctio->cmd->atio, 1); + &sctio->cmd->atio, 1, 0); kfree(sctio); } } @@ -5123,7 +5145,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, atio->u.isp24.fcp_hdr.s_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); if (!sess) { - qlt_send_term_exchange(vha, NULL, atio, 1); + qlt_send_term_exchange(vha, NULL, atio, 1, 0); return 0; } /* Sending marker isn't necessary, since we called from ISR */ @@ -5406,7 +5428,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ qlt_send_busy(vha, atio, SAM_STAT_BUSY); #else - qlt_send_term_exchange(vha, NULL, atio, 1); + qlt_send_term_exchange(vha, NULL, atio, 1, 0); #endif if (!ha_locked) @@ -5523,7 +5545,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ qlt_send_busy(vha, atio, 0); #else - qlt_send_term_exchange(vha, NULL, atio, 1); + qlt_send_term_exchange(vha, NULL, atio, 1, 0); #endif } else { if (tgt->tgt_stop) { @@ -5532,7 +5554,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) "command to target, sending TERM " "EXCHANGE for rsp\n"); qlt_send_term_exchange(vha, NULL, - atio, 1); + atio, 1, 0); } else { ql_dbg(ql_dbg_tgt, vha, 0xe060, "qla_target(%d): Unable to send " @@ -5960,7 +5982,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, return; out_term: - qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0); + qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); if (sess) ha->tgt.tgt_ops->put_sess(sess); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 71b2865ba3c8..22a6a767fe07 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -943,6 +943,36 @@ struct qla_tgt_sess { qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; }; +typedef enum { + /* + * BIT_0 - Atio Arrival / schedule to work + * BIT_1 - qlt_do_work + * BIT_2 - qlt_do work failed + * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending + * BIT_4 - read respond/tcm_qla2xx_queue_data_in + * BIT_5 - status respond / tcm_qla2xx_queue_status + * BIT_6 - tcm request to abort/Term exchange. + * pre_xmit_response->qlt_send_term_exchange + * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response) + * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer) + * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange) + * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data + + * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd + * BIT_13 - Bad completion - + * qlt_ctio_do_completion --> qlt_term_ctio_exchange + * BIT_14 - Back end data received/sent. + * BIT_15 - SRR prepare ctio + * BIT_16 - complete free + * BIT_17 - flush - qlt_abort_cmd_on_host_reset + * BIT_18 - completion w/abort status + * BIT_19 - completion w/unknown status + * BIT_20 - tcm_qla2xxx_free_cmd + */ + CMD_FLAG_DATA_WORK = BIT_11, + CMD_FLAG_DATA_WORK_FREE = BIT_21, +} cmd_flags_t; + struct qla_tgt_cmd { struct se_cmd se_cmd; struct qla_tgt_sess *sess; @@ -952,6 +982,7 @@ struct qla_tgt_cmd { /* Sense buffer that will be mapped into outgoing status */ unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; + spinlock_t cmd_lock; /* to save extra sess dereferences */ unsigned int conf_compl_supported:1; unsigned int sg_mapped:1; @@ -986,30 +1017,8 @@ struct qla_tgt_cmd { uint64_t jiffies_at_alloc; uint64_t jiffies_at_free; - /* BIT_0 - Atio Arrival / schedule to work - * BIT_1 - qlt_do_work - * BIT_2 - qlt_do work failed - * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending - * BIT_4 - read respond/tcm_qla2xx_queue_data_in - * BIT_5 - status respond / tcm_qla2xx_queue_status - * BIT_6 - tcm request to abort/Term exchange. - * pre_xmit_response->qlt_send_term_exchange - * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response) - * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer) - * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange) - * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data - * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work - * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd - * BIT_13 - Bad completion - - * qlt_ctio_do_completion --> qlt_term_ctio_exchange - * BIT_14 - Back end data received/sent. - * BIT_15 - SRR prepare ctio - * BIT_16 - complete free - * BIT_17 - flush - qlt_abort_cmd_on_host_reset - * BIT_18 - completion w/abort status - * BIT_19 - completion w/unknown status - */ - uint32_t cmd_flags; + + cmd_flags_t cmd_flags; }; struct qla_tgt_sess_work_param { @@ -1148,7 +1157,7 @@ static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p) extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); -extern void qlt_abort_cmd(struct qla_tgt_cmd *); +extern int qlt_abort_cmd(struct qla_tgt_cmd *); extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index ddbe2e7ac14d..c3e622524604 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { for (i = 0; i < vha->hw->max_req_queues; i++) { struct req_que *req = vha->hw->req_q_map[i]; + + if (!test_bit(i, vha->hw->req_qid_map)) + continue; + if (req || !buf) { length = req ? req->length : REQUEST_ENTRY_CNT_24XX; @@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { for (i = 0; i < vha->hw->max_rsp_queues; i++) { struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + + if (!test_bit(i, vha->hw->rsp_qid_map)) + continue; + if (rsp || !buf) { length = rsp ? rsp->length : RESPONSE_ENTRY_CNT_MQ; @@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { for (i = 0; i < vha->hw->max_req_queues; i++) { struct req_que *req = vha->hw->req_q_map[i]; + + if (!test_bit(i, vha->hw->req_qid_map)) + continue; + if (req || !buf) { qla27xx_insert16(i, buf, len); qla27xx_insert16(1, buf, len); @@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { for (i = 0; i < vha->hw->max_rsp_queues; i++) { struct rsp_que *rsp = vha->hw->rsp_q_map[i]; + + if (!test_bit(i, vha->hw->rsp_qid_map)) + continue; + if (rsp || !buf) { qla27xx_insert16(i, buf, len); qla27xx_insert16(1, buf, len); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index faf0a126627f..1808a01cfb7e 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -298,6 +298,10 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) { cmd->vha->tgt_counters.core_qla_free_cmd++; cmd->cmd_in_wq = 1; + + BUG_ON(cmd->cmd_flags & BIT_20); + cmd->cmd_flags |= BIT_20; + INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); } @@ -374,6 +378,20 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + + if (cmd->aborted) { + /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task + * can get ahead of this cmd. tcm_qla2xxx_aborted_task + * already kick start the free. + */ + pr_debug("write_pending aborted cmd[%p] refcount %d " + "transport_state %x, t_state %x, se_cmd_flags %x\n", + cmd,cmd->se_cmd.cmd_kref.refcount.counter, + cmd->se_cmd.transport_state, + cmd->se_cmd.t_state, + cmd->se_cmd.se_cmd_flags); + return 0; + } cmd->cmd_flags |= BIT_3; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -405,7 +423,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, - 3 * HZ); + 50); return 0; } spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); @@ -444,6 +462,9 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, if (bidi) flags |= TARGET_SCF_BIDI_OP; + if (se_cmd->cpuid != WORK_CPU_UNBOUND) + flags |= TARGET_SCF_USE_CPUID; + sess = cmd->sess; if (!sess) { pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); @@ -465,13 +486,25 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, static void tcm_qla2xxx_handle_data_work(struct work_struct *work) { struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); + unsigned long flags; /* * Ensure that the complete FCP WRITE payload has been received. * Otherwise return an exception via CHECK_CONDITION status. */ cmd->cmd_in_wq = 0; - cmd->cmd_flags |= BIT_11; + + spin_lock_irqsave(&cmd->cmd_lock, flags); + cmd->cmd_flags |= CMD_FLAG_DATA_WORK; + if (cmd->aborted) { + cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + + tcm_qla2xxx_free_cmd(cmd); + return; + } + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + cmd->vha->tgt_counters.qla_core_ret_ctio++; if (!cmd->write_data_transferred) { /* @@ -546,6 +579,20 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + if (cmd->aborted) { + /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task + * can get ahead of this cmd. tcm_qla2xxx_aborted_task + * already kick start the free. + */ + pr_debug("queue_data_in aborted cmd[%p] refcount %d " + "transport_state %x, t_state %x, se_cmd_flags %x\n", + cmd,cmd->se_cmd.cmd_kref.refcount.counter, + cmd->se_cmd.transport_state, + cmd->se_cmd.t_state, + cmd->se_cmd.se_cmd_flags); + return 0; + } + cmd->cmd_flags |= BIT_4; cmd->bufflen = se_cmd->data_length; cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); @@ -637,11 +684,34 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) qlt_xmit_tm_rsp(mcmd); } + +#define DATA_WORK_NOT_FREE(_flags) \ + (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \ + CMD_FLAG_DATA_WORK) static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); - qlt_abort_cmd(cmd); + unsigned long flags; + + if (qlt_abort_cmd(cmd)) + return; + + spin_lock_irqsave(&cmd->cmd_lock, flags); + if ((cmd->state == QLA_TGT_STATE_NEW)|| + ((cmd->state == QLA_TGT_STATE_DATA_IN) && + DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) { + + cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE; + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + /* Cmd have not reached firmware. + * Use this trigger to free it. */ + tcm_qla2xxx_free_cmd(cmd); + return; + } + spin_unlock_irqrestore(&cmd->cmd_lock, flags); + return; + } static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 47b9d13f97b8..da2e068ee47d 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -205,6 +205,7 @@ static struct { {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, + {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES}, {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bb669d32ccd0..d749da765df1 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -761,7 +761,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd) break; default: - ret = BLKPREP_KILL; + ret = BLKPREP_INVALID; goto out; } @@ -839,7 +839,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd) int ret; if (sdkp->device->no_write_same) - return BLKPREP_KILL; + return BLKPREP_INVALID; BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 503ab8b46c0b..5e820674432c 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1261,7 +1261,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma) } sfp->mmap_called = 1; - vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; return 0; diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index 55627d097873..292c04eec9ad 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -42,6 +42,7 @@ #include <scsi/scsi_devinfo.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_transport_fc.h> +#include <scsi/scsi_transport.h> /* * All wire protocol details (storage protocol between the guest and the host) @@ -477,19 +478,18 @@ struct hv_host_device { struct storvsc_scan_work { struct work_struct work; struct Scsi_Host *host; - uint lun; + u8 lun; + u8 tgt_id; }; static void storvsc_device_scan(struct work_struct *work) { struct storvsc_scan_work *wrk; - uint lun; struct scsi_device *sdev; wrk = container_of(work, struct storvsc_scan_work, work); - lun = wrk->lun; - sdev = scsi_device_lookup(wrk->host, 0, 0, lun); + sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); if (!sdev) goto done; scsi_rescan_device(&sdev->sdev_gendev); @@ -540,7 +540,7 @@ static void storvsc_remove_lun(struct work_struct *work) if (!scsi_host_get(wrk->host)) goto done; - sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun); + sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun); if (sdev) { scsi_remove_device(sdev); @@ -940,6 +940,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb, wrk->host = host; wrk->lun = vm_srb->lun; + wrk->tgt_id = vm_srb->target_id; INIT_WORK(&wrk->work, process_err_fn); schedule_work(&wrk->work); } @@ -1770,6 +1771,11 @@ static int __init storvsc_drv_init(void) fc_transport_template = fc_attach_transport(&fc_transport_functions); if (!fc_transport_template) return -ENODEV; + + /* + * Install Hyper-V specific timeout handler. + */ + fc_transport_template->eh_timed_out = storvsc_eh_timed_out; #endif ret = vmbus_driver_register(&storvsc_drv); diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index cde5ff7529eb..d1a750760cf3 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -613,9 +613,10 @@ out: return err; } -static int ssb_bus_register(struct ssb_bus *bus, - ssb_invariants_func_t get_invariants, - unsigned long baseaddr) +static int __maybe_unused +ssb_bus_register(struct ssb_bus *bus, + ssb_invariants_func_t get_invariants, + unsigned long baseaddr) { int err; diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig index 58d4517e1836..b9519be90fda 100644 --- a/drivers/staging/iio/adc/Kconfig +++ b/drivers/staging/iio/adc/Kconfig @@ -6,6 +6,7 @@ menu "Analog to digital converters" config AD7606 tristate "Analog Devices AD7606 ADC driver" depends on GPIOLIB || COMPILE_TEST + depends on HAS_IOMEM select IIO_BUFFER select IIO_TRIGGERED_BUFFER help diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c index f129039bece3..69287108f793 100644 --- a/drivers/staging/iio/meter/ade7753.c +++ b/drivers/staging/iio/meter/ade7753.c @@ -217,8 +217,12 @@ error_ret: static int ade7753_reset(struct device *dev) { u16 val; + int ret; + + ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val); + if (ret) + return ret; - ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val); val |= BIT(6); /* Software Chip Reset */ return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val); @@ -343,8 +347,12 @@ error_ret: static int ade7753_stop_device(struct device *dev) { u16 val; + int ret; + + ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val); + if (ret) + return ret; - ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val); val |= BIT(4); /* AD converters can be turned off */ return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val); diff --git a/drivers/staging/speakup/Kconfig b/drivers/staging/speakup/Kconfig index efd6f4560d3e..7e8037e230b8 100644 --- a/drivers/staging/speakup/Kconfig +++ b/drivers/staging/speakup/Kconfig @@ -1,7 +1,7 @@ menu "Speakup console speech" config SPEAKUP - depends on VT + depends on VT && !MN10300 tristate "Speakup core" ---help--- This is the Speakup screen reader. Think of it as a diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 3327c49674d3..713c63d9681b 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -898,7 +898,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item, da->unmap_zeroes_data = flag; pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", da->da_dev, flag); - return 0; + return count; } /* diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index cacd97a8cbd0..da457e25717a 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -828,6 +828,50 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) return dev; } +/* + * Check if the underlying struct block_device request_queue supports + * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM + * in ATA and we need to set TPE=1 + */ +bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, + struct request_queue *q, int block_size) +{ + if (!blk_queue_discard(q)) + return false; + + attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / + block_size; + /* + * Currently hardcoded to 1 in Linux/SCSI code.. + */ + attrib->max_unmap_block_desc_count = 1; + attrib->unmap_granularity = q->limits.discard_granularity / block_size; + attrib->unmap_granularity_alignment = q->limits.discard_alignment / + block_size; + attrib->unmap_zeroes_data = q->limits.discard_zeroes_data; + return true; +} +EXPORT_SYMBOL(target_configure_unmap_from_queue); + +/* + * Convert from blocksize advertised to the initiator to the 512 byte + * units unconditionally used by the Linux block layer. + */ +sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) +{ + switch (dev->dev_attrib.block_size) { + case 4096: + return lb << 3; + case 2048: + return lb << 2; + case 1024: + return lb << 1; + default: + return lb; + } +} +EXPORT_SYMBOL(target_to_linux_sector); + int target_configure_device(struct se_device *dev) { struct se_hba *hba = dev->se_hba; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index e3195700211a..75f0f08b2a34 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev) " block_device blocks: %llu logical_block_size: %d\n", dev_size, div_u64(dev_size, fd_dev->fd_block_size), fd_dev->fd_block_size); - /* - * Check if the underlying struct block_device request_queue supports - * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM - * in ATA and we need to set TPE=1 - */ - if (blk_queue_discard(q)) { - dev->dev_attrib.max_unmap_lba_count = - q->limits.max_discard_sectors; - /* - * Currently hardcoded to 1 in Linux/SCSI code.. - */ - dev->dev_attrib.max_unmap_block_desc_count = 1; - dev->dev_attrib.unmap_granularity = - q->limits.discard_granularity >> 9; - dev->dev_attrib.unmap_granularity_alignment = - q->limits.discard_alignment; + + if (target_configure_unmap_from_queue(&dev->dev_attrib, q, + fd_dev->fd_block_size)) pr_debug("IFILE: BLOCK Discard support available," - " disabled by default\n"); - } + " disabled by default\n"); /* * Enable write same emulation for IBLOCK and use 0xFFFF as * the smaller WRITE_SAME(10) only has a two-byte block count. @@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) if (S_ISBLK(inode->i_mode)) { /* The backend is block device, use discard */ struct block_device *bdev = inode->i_bdev; + struct se_device *dev = cmd->se_dev; - ret = blkdev_issue_discard(bdev, lba, - nolb, GFP_KERNEL, 0); + ret = blkdev_issue_discard(bdev, + target_to_linux_sector(dev, lba), + target_to_linux_sector(dev, nolb), + GFP_KERNEL, 0); if (ret < 0) { pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", ret); diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 5a2899f9f50b..abe4eb997a84 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -121,29 +121,11 @@ static int iblock_configure_device(struct se_device *dev) dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); dev->dev_attrib.hw_queue_depth = q->nr_requests; - /* - * Check if the underlying struct block_device request_queue supports - * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM - * in ATA and we need to set TPE=1 - */ - if (blk_queue_discard(q)) { - dev->dev_attrib.max_unmap_lba_count = - q->limits.max_discard_sectors; - - /* - * Currently hardcoded to 1 in Linux/SCSI code.. - */ - dev->dev_attrib.max_unmap_block_desc_count = 1; - dev->dev_attrib.unmap_granularity = - q->limits.discard_granularity >> 9; - dev->dev_attrib.unmap_granularity_alignment = - q->limits.discard_alignment; - dev->dev_attrib.unmap_zeroes_data = - q->limits.discard_zeroes_data; - + if (target_configure_unmap_from_queue(&dev->dev_attrib, q, + dev->dev_attrib.hw_block_size)) pr_debug("IBLOCK: BLOCK Discard support available," - " disabled by default\n"); - } + " disabled by default\n"); + /* * Enable write same emulation for IBLOCK and use 0xFFFF as * the smaller WRITE_SAME(10) only has a two-byte block count. @@ -415,9 +397,13 @@ static sense_reason_t iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) { struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; + struct se_device *dev = cmd->se_dev; int ret; - ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); + ret = blkdev_issue_discard(bdev, + target_to_linux_sector(dev, lba), + target_to_linux_sector(dev, nolb), + GFP_KERNEL, 0); if (ret < 0) { pr_err("blkdev_issue_discard() failed: %d\n", ret); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -433,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd) struct scatterlist *sg; struct bio *bio; struct bio_list list; - sector_t block_lba = cmd->t_task_lba; - sector_t sectors = sbc_get_write_same_sectors(cmd); + struct se_device *dev = cmd->se_dev; + sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); + sector_t sectors = target_to_linux_sector(dev, + sbc_get_write_same_sectors(cmd)); if (cmd->prot_op) { pr_err("WRITE_SAME: Protection information with IBLOCK" @@ -648,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, enum dma_data_direction data_direction) { struct se_device *dev = cmd->se_dev; + sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba); struct iblock_req *ibr; struct bio *bio, *bio_start; struct bio_list list; struct scatterlist *sg; u32 sg_num = sgl_nents; - sector_t block_lba; unsigned bio_cnt; int rw = 0; int i; @@ -679,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, rw = READ; } - /* - * Convert the blocksize advertised to the initiator to the 512 byte - * units unconditionally used by the Linux block layer. - */ - if (dev->dev_attrib.block_size == 4096) - block_lba = (cmd->t_task_lba << 3); - else if (dev->dev_attrib.block_size == 2048) - block_lba = (cmd->t_task_lba << 2); - else if (dev->dev_attrib.block_size == 1024) - block_lba = (cmd->t_task_lba << 1); - else if (dev->dev_attrib.block_size == 512) - block_lba = cmd->t_task_lba; - else { - pr_err("Unsupported SCSI -> BLOCK LBA conversion:" - " %u\n", dev->dev_attrib.block_size); - return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - } - ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); if (!ibr) goto fail; diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index dae0750c2032..db4412fe6b8a 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -141,7 +141,6 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); -bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags); void transport_clear_lun_ref(struct se_lun *); void transport_send_task_abort(struct se_cmd *); sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index fcdcb117c60d..82a663ba9800 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr) if (dev) { spin_lock_irqsave(&dev->se_tmr_lock, flags); - list_del(&tmr->tmr_list); + list_del_init(&tmr->tmr_list); spin_unlock_irqrestore(&dev->se_tmr_lock, flags); } kfree(tmr); } -static void core_tmr_handle_tas_abort( - struct se_node_acl *tmr_nacl, - struct se_cmd *cmd, - int tas) +static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas) { - bool remove = true; + unsigned long flags; + bool remove = true, send_tas; /* * TASK ABORTED status (TAS) bit support */ - if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { + spin_lock_irqsave(&cmd->t_state_lock, flags); + send_tas = (cmd->transport_state & CMD_T_TAS); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + + if (send_tas) { remove = false; transport_send_task_abort(cmd); } @@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list, return 1; } +static bool __target_check_io_state(struct se_cmd *se_cmd, + struct se_session *tmr_sess, int tas) +{ + struct se_session *sess = se_cmd->se_sess; + + assert_spin_locked(&sess->sess_cmd_lock); + WARN_ON_ONCE(!irqs_disabled()); + /* + * If command already reached CMD_T_COMPLETE state within + * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, + * this se_cmd has been passed to fabric driver and will + * not be aborted. + * + * Otherwise, obtain a local se_cmd->cmd_kref now for TMR + * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as + * long as se_cmd->cmd_kref is still active unless zero. + */ + spin_lock(&se_cmd->t_state_lock); + if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) { + pr_debug("Attempted to abort io tag: %llu already complete or" + " fabric stop, skipping\n", se_cmd->tag); + spin_unlock(&se_cmd->t_state_lock); + return false; + } + if (sess->sess_tearing_down || se_cmd->cmd_wait_set) { + pr_debug("Attempted to abort io tag: %llu already shutdown," + " skipping\n", se_cmd->tag); + spin_unlock(&se_cmd->t_state_lock); + return false; + } + se_cmd->transport_state |= CMD_T_ABORTED; + + if ((tmr_sess != se_cmd->se_sess) && tas) + se_cmd->transport_state |= CMD_T_TAS; + + spin_unlock(&se_cmd->t_state_lock); + + return kref_get_unless_zero(&se_cmd->cmd_kref); +} + void core_tmr_abort_task( struct se_device *dev, struct se_tmr_req *tmr, @@ -130,34 +172,22 @@ void core_tmr_abort_task( if (tmr->ref_task_tag != ref_tag) continue; - if (!kref_get_unless_zero(&se_cmd->cmd_kref)) - continue; - printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", se_cmd->se_tfo->get_fabric_name(), ref_tag); - spin_lock(&se_cmd->t_state_lock); - if (se_cmd->transport_state & CMD_T_COMPLETE) { - printk("ABORT_TASK: ref_tag: %llu already complete," - " skipping\n", ref_tag); - spin_unlock(&se_cmd->t_state_lock); + if (!__target_check_io_state(se_cmd, se_sess, 0)) { spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - target_put_sess_cmd(se_cmd); - goto out; } - se_cmd->transport_state |= CMD_T_ABORTED; - spin_unlock(&se_cmd->t_state_lock); - list_del_init(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); cancel_work_sync(&se_cmd->work); transport_wait_for_tasks(se_cmd); - target_put_sess_cmd(se_cmd); transport_cmd_finish_abort(se_cmd, true); + target_put_sess_cmd(se_cmd); printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" " ref_tag: %llu\n", ref_tag); @@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list( struct list_head *preempt_and_abort_list) { LIST_HEAD(drain_tmr_list); + struct se_session *sess; struct se_tmr_req *tmr_p, *tmr_pp; struct se_cmd *cmd; unsigned long flags; + bool rc; /* * Release all pending and outgoing TMRs aside from the received * LUN_RESET tmr.. @@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list( if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) continue; + sess = cmd->se_sess; + if (WARN_ON_ONCE(!sess)) + continue; + + spin_lock(&sess->sess_cmd_lock); spin_lock(&cmd->t_state_lock); - if (!(cmd->transport_state & CMD_T_ACTIVE)) { + if (!(cmd->transport_state & CMD_T_ACTIVE) || + (cmd->transport_state & CMD_T_FABRIC_STOP)) { spin_unlock(&cmd->t_state_lock); + spin_unlock(&sess->sess_cmd_lock); continue; } if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { spin_unlock(&cmd->t_state_lock); + spin_unlock(&sess->sess_cmd_lock); continue; } + if (sess->sess_tearing_down || cmd->cmd_wait_set) { + spin_unlock(&cmd->t_state_lock); + spin_unlock(&sess->sess_cmd_lock); + continue; + } + cmd->transport_state |= CMD_T_ABORTED; spin_unlock(&cmd->t_state_lock); + rc = kref_get_unless_zero(&cmd->cmd_kref); + if (!rc) { + printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n"); + spin_unlock(&sess->sess_cmd_lock); + continue; + } + spin_unlock(&sess->sess_cmd_lock); + list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); } spin_unlock_irqrestore(&dev->se_tmr_lock, flags); @@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list( (preempt_and_abort_list) ? "Preempt" : "", tmr_p, tmr_p->function, tmr_p->response, cmd->t_state); + cancel_work_sync(&cmd->work); + transport_wait_for_tasks(cmd); + transport_cmd_finish_abort(cmd, 1); + target_put_sess_cmd(cmd); } } static void core_tmr_drain_state_list( struct se_device *dev, struct se_cmd *prout_cmd, - struct se_node_acl *tmr_nacl, + struct se_session *tmr_sess, int tas, struct list_head *preempt_and_abort_list) { LIST_HEAD(drain_task_list); + struct se_session *sess; struct se_cmd *cmd, *next; unsigned long flags; + int rc; /* * Complete outstanding commands with TASK_ABORTED SAM status. @@ -282,6 +342,16 @@ static void core_tmr_drain_state_list( if (prout_cmd == cmd) continue; + sess = cmd->se_sess; + if (WARN_ON_ONCE(!sess)) + continue; + + spin_lock(&sess->sess_cmd_lock); + rc = __target_check_io_state(cmd, tmr_sess, tas); + spin_unlock(&sess->sess_cmd_lock); + if (!rc) + continue; + list_move_tail(&cmd->state_list, &drain_task_list); cmd->state_active = false; } @@ -289,7 +359,7 @@ static void core_tmr_drain_state_list( while (!list_empty(&drain_task_list)) { cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); - list_del(&cmd->state_list); + list_del_init(&cmd->state_list); pr_debug("LUN_RESET: %s cmd: %p" " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" @@ -313,16 +383,11 @@ static void core_tmr_drain_state_list( * loop above, but we do it down here given that * cancel_work_sync may block. */ - if (cmd->t_state == TRANSPORT_COMPLETE) - cancel_work_sync(&cmd->work); - - spin_lock_irqsave(&cmd->t_state_lock, flags); - target_stop_cmd(cmd, &flags); - - cmd->transport_state |= CMD_T_ABORTED; - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + cancel_work_sync(&cmd->work); + transport_wait_for_tasks(cmd); - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); + core_tmr_handle_tas_abort(cmd, tas); + target_put_sess_cmd(cmd); } } @@ -334,6 +399,7 @@ int core_tmr_lun_reset( { struct se_node_acl *tmr_nacl = NULL; struct se_portal_group *tmr_tpg = NULL; + struct se_session *tmr_sess = NULL; int tas; /* * TASK_ABORTED status bit, this is configurable via ConfigFS @@ -352,8 +418,9 @@ int core_tmr_lun_reset( * or struct se_device passthrough.. */ if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { - tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; - tmr_tpg = tmr->task_cmd->se_sess->se_tpg; + tmr_sess = tmr->task_cmd->se_sess; + tmr_nacl = tmr_sess->se_node_acl; + tmr_tpg = tmr_sess->se_tpg; if (tmr_nacl && tmr_tpg) { pr_debug("LUN_RESET: TMR caller fabric: %s" " initiator port %s\n", @@ -366,7 +433,7 @@ int core_tmr_lun_reset( dev->transport->name, tas); core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); - core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, + core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas, preempt_and_abort_list); /* diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 9f3608e10f25..867bc6d0a68a 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -534,9 +534,6 @@ void transport_deregister_session(struct se_session *se_sess) } EXPORT_SYMBOL(transport_deregister_session); -/* - * Called with cmd->t_state_lock held. - */ static void target_remove_from_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -561,10 +558,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, { unsigned long flags; - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (write_pending) - cmd->t_state = TRANSPORT_WRITE_PENDING; - if (remove_from_lists) { target_remove_from_state_list(cmd); @@ -574,6 +567,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists, cmd->se_lun = NULL; } + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (write_pending) + cmd->t_state = TRANSPORT_WRITE_PENDING; + /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. @@ -627,6 +624,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd) void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) { + bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF); + if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) transport_lun_remove_cmd(cmd); /* @@ -638,7 +637,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) if (transport_cmd_check_stop_to_fabric(cmd)) return; - if (remove) + if (remove && ack_kref) transport_put_cmd(cmd); } @@ -694,19 +693,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) } /* - * See if we are waiting to complete for an exception condition. - */ - if (cmd->transport_state & CMD_T_REQUEST_STOP) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - complete(&cmd->task_stop_comp); - return; - } - - /* * Check for case where an explicit ABORT_TASK has been received * and transport_wait_for_tasks() will be waiting for completion.. */ - if (cmd->transport_state & CMD_T_ABORTED && + if (cmd->transport_state & CMD_T_ABORTED || cmd->transport_state & CMD_T_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete_all(&cmd->t_transport_stop_comp); @@ -721,10 +711,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - if (cmd->cpuid == -1) - queue_work(target_completion_wq, &cmd->work); - else + if (cmd->se_cmd_flags & SCF_USE_CPUID) queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); + else + queue_work(target_completion_wq, &cmd->work); } EXPORT_SYMBOL(target_complete_cmd); @@ -1203,7 +1193,6 @@ void transport_init_se_cmd( INIT_LIST_HEAD(&cmd->state_list); init_completion(&cmd->t_transport_stop_comp); init_completion(&cmd->cmd_wait_comp); - init_completion(&cmd->task_stop_comp); spin_lock_init(&cmd->t_state_lock); kref_init(&cmd->cmd_kref); cmd->transport_state = CMD_T_DEV_ACTIVE; @@ -1437,6 +1426,12 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess */ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, data_dir, task_attr, sense); + + if (flags & TARGET_SCF_USE_CPUID) + se_cmd->se_cmd_flags |= SCF_USE_CPUID; + else + se_cmd->cpuid = WORK_CPU_UNBOUND; + if (flags & TARGET_SCF_UNKNOWN_SIZE) se_cmd->unknown_data_length = 1; /* @@ -1635,33 +1630,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, EXPORT_SYMBOL(target_submit_tmr); /* - * If the cmd is active, request it to be stopped and sleep until it - * has completed. - */ -bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags) - __releases(&cmd->t_state_lock) - __acquires(&cmd->t_state_lock) -{ - bool was_active = false; - - if (cmd->transport_state & CMD_T_BUSY) { - cmd->transport_state |= CMD_T_REQUEST_STOP; - spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - - pr_debug("cmd %p waiting to complete\n", cmd); - wait_for_completion(&cmd->task_stop_comp); - pr_debug("cmd %p stopped successfully\n", cmd); - - spin_lock_irqsave(&cmd->t_state_lock, *flags); - cmd->transport_state &= ~CMD_T_REQUEST_STOP; - cmd->transport_state &= ~CMD_T_BUSY; - was_active = true; - } - - return was_active; -} - -/* * Handle SAM-esque emulation for generic transport request failures. */ void transport_generic_request_failure(struct se_cmd *cmd, @@ -1859,19 +1827,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd) return true; } +static int __transport_check_aborted_status(struct se_cmd *, int); + void target_execute_cmd(struct se_cmd *cmd) { /* - * If the received CDB has aleady been aborted stop processing it here. - */ - if (transport_check_aborted_status(cmd, 1)) - return; - - /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. + * + * If the received CDB has aleady been aborted stop processing it here. */ spin_lock_irq(&cmd->t_state_lock); + if (__transport_check_aborted_status(cmd, 1)) { + spin_unlock_irq(&cmd->t_state_lock); + return; + } if (cmd->transport_state & CMD_T_STOP) { pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", __func__, __LINE__, cmd->tag); @@ -2222,20 +2192,14 @@ static inline void transport_free_pages(struct se_cmd *cmd) } /** - * transport_release_cmd - free a command - * @cmd: command to free + * transport_put_cmd - release a reference to a command + * @cmd: command to release * - * This routine unconditionally frees a command, and reference counting - * or list removal must be done in the caller. + * This routine releases our reference to the command and frees it if possible. */ -static int transport_release_cmd(struct se_cmd *cmd) +static int transport_put_cmd(struct se_cmd *cmd) { BUG_ON(!cmd->se_tfo); - - if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) - core_tmr_release_req(cmd->se_tmr_req); - if (cmd->t_task_cdb != cmd->__t_task_cdb) - kfree(cmd->t_task_cdb); /* * If this cmd has been setup with target_get_sess_cmd(), drop * the kref and call ->release_cmd() in kref callback. @@ -2243,18 +2207,6 @@ static int transport_release_cmd(struct se_cmd *cmd) return target_put_sess_cmd(cmd); } -/** - * transport_put_cmd - release a reference to a command - * @cmd: command to release - * - * This routine releases our reference to the command and frees it if possible. - */ -static int transport_put_cmd(struct se_cmd *cmd) -{ - transport_free_pages(cmd); - return transport_release_cmd(cmd); -} - void *transport_kmap_data_sg(struct se_cmd *cmd) { struct scatterlist *sg = cmd->t_data_sg; @@ -2450,34 +2402,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd) } } -int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) +static bool +__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, + unsigned long *flags); + +static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) { unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); +} + +int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) +{ int ret = 0; + bool aborted = false, tas = false; if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) - transport_wait_for_tasks(cmd); + target_wait_free_cmd(cmd, &aborted, &tas); - ret = transport_release_cmd(cmd); + if (!aborted || tas) + ret = transport_put_cmd(cmd); } else { if (wait_for_tasks) - transport_wait_for_tasks(cmd); + target_wait_free_cmd(cmd, &aborted, &tas); /* * Handle WRITE failure case where transport_generic_new_cmd() * has already added se_cmd to state_list, but fabric has * failed command before I/O submission. */ - if (cmd->state_active) { - spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->state_active) target_remove_from_state_list(cmd); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - } if (cmd->se_lun) transport_lun_remove_cmd(cmd); - ret = transport_put_cmd(cmd); + if (!aborted || tas) + ret = transport_put_cmd(cmd); + } + /* + * If the task has been internally aborted due to TMR ABORT_TASK + * or LUN_RESET, target_core_tmr.c is responsible for performing + * the remaining calls to target_put_sess_cmd(), and not the + * callers of this function. + */ + if (aborted) { + pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); + wait_for_completion(&cmd->cmd_wait_comp); + cmd->se_tfo->release_cmd(cmd); + ret = 1; } return ret; } @@ -2517,26 +2493,46 @@ out: } EXPORT_SYMBOL(target_get_sess_cmd); +static void target_free_cmd_mem(struct se_cmd *cmd) +{ + transport_free_pages(cmd); + + if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + core_tmr_release_req(cmd->se_tmr_req); + if (cmd->t_task_cdb != cmd->__t_task_cdb) + kfree(cmd->t_task_cdb); +} + static void target_release_cmd_kref(struct kref *kref) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); struct se_session *se_sess = se_cmd->se_sess; unsigned long flags; + bool fabric_stop; spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (list_empty(&se_cmd->se_cmd_list)) { spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + target_free_cmd_mem(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd); return; } - if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { + + spin_lock(&se_cmd->t_state_lock); + fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); + spin_unlock(&se_cmd->t_state_lock); + + if (se_cmd->cmd_wait_set || fabric_stop) { + list_del_init(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + target_free_cmd_mem(se_cmd); complete(&se_cmd->cmd_wait_comp); return; } - list_del(&se_cmd->se_cmd_list); + list_del_init(&se_cmd->se_cmd_list); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + target_free_cmd_mem(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd); } @@ -2548,6 +2544,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd) struct se_session *se_sess = se_cmd->se_sess; if (!se_sess) { + target_free_cmd_mem(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd); return 1; } @@ -2564,6 +2561,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) { struct se_cmd *se_cmd; unsigned long flags; + int rc; spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (se_sess->sess_tearing_down) { @@ -2573,8 +2571,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) se_sess->sess_tearing_down = 1; list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); - list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) - se_cmd->cmd_wait_set = 1; + list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) { + rc = kref_get_unless_zero(&se_cmd->cmd_kref); + if (rc) { + se_cmd->cmd_wait_set = 1; + spin_lock(&se_cmd->t_state_lock); + se_cmd->transport_state |= CMD_T_FABRIC_STOP; + spin_unlock(&se_cmd->t_state_lock); + } + } spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); } @@ -2587,15 +2592,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess) { struct se_cmd *se_cmd, *tmp_cmd; unsigned long flags; + bool tas; list_for_each_entry_safe(se_cmd, tmp_cmd, &se_sess->sess_wait_list, se_cmd_list) { - list_del(&se_cmd->se_cmd_list); + list_del_init(&se_cmd->se_cmd_list); pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" " %d\n", se_cmd, se_cmd->t_state, se_cmd->se_tfo->get_cmd_state(se_cmd)); + spin_lock_irqsave(&se_cmd->t_state_lock, flags); + tas = (se_cmd->transport_state & CMD_T_TAS); + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); + + if (!target_put_sess_cmd(se_cmd)) { + if (tas) + target_put_sess_cmd(se_cmd); + } + wait_for_completion(&se_cmd->cmd_wait_comp); pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" " fabric state: %d\n", se_cmd, se_cmd->t_state, @@ -2617,53 +2632,75 @@ void transport_clear_lun_ref(struct se_lun *lun) wait_for_completion(&lun->lun_ref_comp); } -/** - * transport_wait_for_tasks - wait for completion to occur - * @cmd: command to wait - * - * Called from frontend fabric context to wait for storage engine - * to pause and/or release frontend generated struct se_cmd. - */ -bool transport_wait_for_tasks(struct se_cmd *cmd) +static bool +__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, + bool *aborted, bool *tas, unsigned long *flags) + __releases(&cmd->t_state_lock) + __acquires(&cmd->t_state_lock) { - unsigned long flags; - spin_lock_irqsave(&cmd->t_state_lock, flags); + assert_spin_locked(&cmd->t_state_lock); + WARN_ON_ONCE(!irqs_disabled()); + + if (fabric_stop) + cmd->transport_state |= CMD_T_FABRIC_STOP; + + if (cmd->transport_state & CMD_T_ABORTED) + *aborted = true; + + if (cmd->transport_state & CMD_T_TAS) + *tas = true; + if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && - !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) return false; - } if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && - !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) return false; - } - if (!(cmd->transport_state & CMD_T_ACTIVE)) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + if (!(cmd->transport_state & CMD_T_ACTIVE)) + return false; + + if (fabric_stop && *aborted) return false; - } cmd->transport_state |= CMD_T_STOP; - pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", - cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); + pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d," + " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag, + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); - spin_unlock_irqrestore(&cmd->t_state_lock, flags); + spin_unlock_irqrestore(&cmd->t_state_lock, *flags); wait_for_completion(&cmd->t_transport_stop_comp); - spin_lock_irqsave(&cmd->t_state_lock, flags); + spin_lock_irqsave(&cmd->t_state_lock, *flags); cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); - pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", - cmd->tag); + pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" + "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); + return true; +} + +/** + * transport_wait_for_tasks - wait for completion to occur + * @cmd: command to wait + * + * Called from frontend fabric context to wait for storage engine + * to pause and/or release frontend generated struct se_cmd. + */ +bool transport_wait_for_tasks(struct se_cmd *cmd) +{ + unsigned long flags; + bool ret, aborted = false, tas = false; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return true; + return ret; } EXPORT_SYMBOL(transport_wait_for_tasks); @@ -2845,28 +2882,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd, } EXPORT_SYMBOL(transport_send_check_condition_and_sense); -int transport_check_aborted_status(struct se_cmd *cmd, int send_status) +static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status) + __releases(&cmd->t_state_lock) + __acquires(&cmd->t_state_lock) { + assert_spin_locked(&cmd->t_state_lock); + WARN_ON_ONCE(!irqs_disabled()); + if (!(cmd->transport_state & CMD_T_ABORTED)) return 0; - /* * If cmd has been aborted but either no status is to be sent or it has * already been sent, just return */ - if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) + if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) { + if (send_status) + cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; return 1; + } - pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", - cmd->t_task_cdb[0], cmd->tag); + pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:" + " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag); cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; cmd->scsi_status = SAM_STAT_TASK_ABORTED; trace_target_cmd_complete(cmd); + + spin_unlock_irq(&cmd->t_state_lock); cmd->se_tfo->queue_status(cmd); + spin_lock_irq(&cmd->t_state_lock); return 1; } + +int transport_check_aborted_status(struct se_cmd *cmd, int send_status) +{ + int ret; + + spin_lock_irq(&cmd->t_state_lock); + ret = __transport_check_aborted_status(cmd, send_status); + spin_unlock_irq(&cmd->t_state_lock); + + return ret; +} EXPORT_SYMBOL(transport_check_aborted_status); void transport_send_task_abort(struct se_cmd *cmd) @@ -2888,11 +2946,17 @@ void transport_send_task_abort(struct se_cmd *cmd) */ if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->se_tfo->write_pending_status(cmd) != 0) { - cmd->transport_state |= CMD_T_ABORTED; + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + goto send_abort; + } cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } } +send_abort: cmd->scsi_status = SAM_STAT_TASK_ABORTED; transport_lun_remove_cmd(cmd); @@ -2909,8 +2973,17 @@ static void target_tmr_work(struct work_struct *work) struct se_cmd *cmd = container_of(work, struct se_cmd, work); struct se_device *dev = cmd->se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; + unsigned long flags; int ret; + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->transport_state & CMD_T_ABORTED) { + tmr->response = TMR_FUNCTION_REJECTED; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + goto check_stop; + } + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + switch (tmr->function) { case TMR_ABORT_TASK: core_tmr_abort_task(dev, tmr, cmd->se_sess); @@ -2943,9 +3016,17 @@ static void target_tmr_work(struct work_struct *work) break; } + spin_lock_irqsave(&cmd->t_state_lock, flags); + if (cmd->transport_state & CMD_T_ABORTED) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + goto check_stop; + } cmd->t_state = TRANSPORT_ISTATE_PROCESSING; + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + cmd->se_tfo->queue_tm_rsp(cmd); +check_stop: transport_cmd_check_stop_to_fabric(cmd); } diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index dd600e5ead71..94f5154ac788 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -903,7 +903,7 @@ static int tcmu_configure_device(struct se_device *dev) info->version = __stringify(TCMU_MAILBOX_VERSION); info->mem[0].name = "tcm-user command & data buffer"; - info->mem[0].addr = (phys_addr_t) udev->mb_addr; + info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; info->mem[0].size = TCMU_RING_SIZE; info->mem[0].memtype = UIO_MEM_VIRTUAL; diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 8cc4ac64a91c..7c92c09be213 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -195,7 +195,7 @@ config IMX_THERMAL passive trip is crossed. config SPEAR_THERMAL - bool "SPEAr thermal sensor driver" + tristate "SPEAr thermal sensor driver" depends on PLAT_SPEAR || COMPILE_TEST depends on OF help @@ -237,8 +237,8 @@ config DOVE_THERMAL framework. config DB8500_THERMAL - bool "DB8500 thermal management" - depends on ARCH_U8500 + tristate "DB8500 thermal management" + depends on MFD_DB8500_PRCMU default y help Adds DB8500 thermal management implementation according to the thermal diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index e3fbc5a5d88f..6ceac4f2d4b2 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -377,26 +377,28 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device, * get_load() - get load for a cpu since last updated * @cpufreq_device: &struct cpufreq_cooling_device for this cpu * @cpu: cpu number + * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus * * Return: The average load of cpu @cpu in percentage since this * function was last called. */ -static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu) +static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu, + int cpu_idx) { u32 load; u64 now, now_idle, delta_time, delta_idle; now_idle = get_cpu_idle_time(cpu, &now, 0); - delta_idle = now_idle - cpufreq_device->time_in_idle[cpu]; - delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu]; + delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx]; + delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx]; if (delta_time <= delta_idle) load = 0; else load = div64_u64(100 * (delta_time - delta_idle), delta_time); - cpufreq_device->time_in_idle[cpu] = now_idle; - cpufreq_device->time_in_idle_timestamp[cpu] = now; + cpufreq_device->time_in_idle[cpu_idx] = now_idle; + cpufreq_device->time_in_idle_timestamp[cpu_idx] = now; return load; } @@ -598,7 +600,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, u32 load; if (cpu_online(cpu)) - load = get_load(cpufreq_device, cpu); + load = get_load(cpufreq_device, cpu, i); else load = 0; diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index be4eedcb839a..9043f8f91852 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -475,14 +475,10 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data, sensor_np = of_node_get(dev->of_node); - for_each_child_of_node(np, child) { + for_each_available_child_of_node(np, child) { struct of_phandle_args sensor_specs; int ret, id; - /* Check whether child is enabled or not */ - if (!of_device_is_available(child)) - continue; - /* For now, thermal framework supports only 1 sensor per zone */ ret = of_parse_phandle_with_args(child, "thermal-sensors", "#thermal-sensor-cells", @@ -881,16 +877,12 @@ int __init of_parse_thermal_zones(void) return 0; /* Run successfully on systems without thermal DT */ } - for_each_child_of_node(np, child) { + for_each_available_child_of_node(np, child) { struct thermal_zone_device *zone; struct thermal_zone_params *tzp; int i, mask = 0; u32 prop; - /* Check whether child is enabled or not */ - if (!of_device_is_available(child)) - continue; - tz = thermal_of_build_thermal_zone(child); if (IS_ERR(tz)) { pr_err("failed to build thermal zone %s: %ld\n", @@ -968,13 +960,9 @@ void of_thermal_destroy_zones(void) return; } - for_each_child_of_node(np, child) { + for_each_available_child_of_node(np, child) { struct thermal_zone_device *zone; - /* Check whether child is enabled or not */ - if (!of_device_is_available(child)) - continue; - zone = thermal_zone_get_zone_by_name(child->name); if (IS_ERR(zone)) continue; diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 44b9c485157d..0e735acea33a 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -23,6 +23,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reboot.h> @@ -75,8 +76,10 @@ struct rcar_thermal_priv { #define rcar_has_irq_support(priv) ((priv)->common->base) #define rcar_id_to_shift(priv) ((priv)->id * 8) +#define USE_OF_THERMAL 1 static const struct of_device_id rcar_thermal_dt_ids[] = { { .compatible = "renesas,rcar-thermal", }, + { .compatible = "renesas,rcar-gen2-thermal", .data = (void *)USE_OF_THERMAL }, {}, }; MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids); @@ -200,9 +203,9 @@ err_out_unlock: return ret; } -static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) +static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv, + int *temp) { - struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); int tmp; int ret; @@ -226,6 +229,20 @@ static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) return 0; } +static int rcar_thermal_of_get_temp(void *data, int *temp) +{ + struct rcar_thermal_priv *priv = data; + + return rcar_thermal_get_current_temp(priv, temp); +} + +static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) +{ + struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone); + + return rcar_thermal_get_current_temp(priv, temp); +} + static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone, int trip, enum thermal_trip_type *type) { @@ -282,6 +299,10 @@ static int rcar_thermal_notify(struct thermal_zone_device *zone, return 0; } +static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = { + .get_temp = rcar_thermal_of_get_temp, +}; + static struct thermal_zone_device_ops rcar_thermal_zone_ops = { .get_temp = rcar_thermal_get_temp, .get_trip_type = rcar_thermal_get_trip_type, @@ -318,14 +339,20 @@ static void rcar_thermal_work(struct work_struct *work) priv = container_of(work, struct rcar_thermal_priv, work.work); - rcar_thermal_get_temp(priv->zone, &cctemp); + ret = rcar_thermal_get_current_temp(priv, &cctemp); + if (ret < 0) + return; + ret = rcar_thermal_update_temp(priv); if (ret < 0) return; rcar_thermal_irq_enable(priv); - rcar_thermal_get_temp(priv->zone, &nctemp); + ret = rcar_thermal_get_current_temp(priv, &nctemp); + if (ret < 0) + return; + if (nctemp != cctemp) thermal_zone_device_update(priv->zone); } @@ -403,6 +430,8 @@ static int rcar_thermal_probe(struct platform_device *pdev) struct rcar_thermal_priv *priv; struct device *dev = &pdev->dev; struct resource *res, *irq; + const struct of_device_id *of_id = of_match_device(rcar_thermal_dt_ids, dev); + unsigned long of_data = (unsigned long)of_id->data; int mres = 0; int i; int ret = -ENODEV; @@ -463,7 +492,13 @@ static int rcar_thermal_probe(struct platform_device *pdev) if (ret < 0) goto error_unregister; - priv->zone = thermal_zone_device_register("rcar_thermal", + if (of_data == USE_OF_THERMAL) + priv->zone = thermal_zone_of_sensor_register( + dev, i, priv, + &rcar_thermal_zone_of_ops); + else + priv->zone = thermal_zone_device_register( + "rcar_thermal", 1, 0, priv, &rcar_thermal_zone_ops, NULL, 0, idle); diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c index 534dd9136662..81b35aace9de 100644 --- a/drivers/thermal/spear_thermal.c +++ b/drivers/thermal/spear_thermal.c @@ -54,8 +54,7 @@ static struct thermal_zone_device_ops ops = { .get_temp = thermal_get_temp, }; -#ifdef CONFIG_PM -static int spear_thermal_suspend(struct device *dev) +static int __maybe_unused spear_thermal_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); @@ -72,7 +71,7 @@ static int spear_thermal_suspend(struct device *dev) return 0; } -static int spear_thermal_resume(struct device *dev) +static int __maybe_unused spear_thermal_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); @@ -94,7 +93,6 @@ static int spear_thermal_resume(struct device *dev) return 0; } -#endif static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend, spear_thermal_resume); diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c index 39a0fa8a4c0a..e991d55914db 100644 --- a/drivers/usb/dwc2/core.c +++ b/drivers/usb/dwc2/core.c @@ -572,12 +572,6 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host) set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE; clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE; - /* - * If the force mode bit is already set, don't set it. - */ - if ((gusbcfg & set) && !(gusbcfg & clear)) - return false; - gusbcfg &= ~clear; gusbcfg |= set; dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG); @@ -3278,9 +3272,6 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) /** * During device initialization, read various hardware configuration * registers and interpret the contents. - * - * This should be called during driver probe. It will perform a core - * soft reset in order to get the reset values of the parameters. */ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) { @@ -3288,7 +3279,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) unsigned width; u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4; u32 grxfsiz; - int retval; /* * Attempt to ensure this device is really a DWC_otg Controller. @@ -3308,10 +3298,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf, hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid); - retval = dwc2_core_reset(hsotg); - if (retval) - return retval; - hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1); hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2); hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3); diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 510f787434b3..690b9fd98b55 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -530,7 +530,13 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) return retval; - /* Reset the controller and detect hardware config values */ + /* + * Reset before dwc2_get_hwparams() then it could get power-on real + * reset value form registers. + */ + dwc2_core_reset_and_force_dr_mode(hsotg); + + /* Detect config values from hardware */ retval = dwc2_get_hwparams(hsotg); if (retval) goto error; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index af023a81a0b0..7d1dd82a95ac 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2789,6 +2789,7 @@ int dwc3_gadget_init(struct dwc3 *dwc) dwc->gadget.speed = USB_SPEED_UNKNOWN; dwc->gadget.sg_supported = true; dwc->gadget.name = "dwc3-gadget"; + dwc->gadget.is_otg = dwc->dr_mode == USB_DR_MODE_OTG; /* * FIXME We might be setting max_speed to <SUPER, however versions diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h index 04ce6b156b35..e0244fb3903d 100644 --- a/drivers/usb/host/xhci-ext-caps.h +++ b/drivers/usb/host/xhci-ext-caps.h @@ -112,12 +112,16 @@ static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id) offset = start; if (!start || start == XHCI_HCC_PARAMS_OFFSET) { val = readl(base + XHCI_HCC_PARAMS_OFFSET); + if (val == ~0) + return 0; offset = XHCI_HCC_EXT_CAPS(val) << 2; if (!offset) return 0; }; do { val = readl(base + offset); + if (val == ~0) + return 0; if (XHCI_EXT_CAPS_ID(val) == id && offset != start) return offset; diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index c30de7c39f44..73f763c4f5f5 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -275,8 +275,9 @@ static bool need_bw_sch(struct usb_host_endpoint *ep, return false; /* - * for LS & FS periodic endpoints which its device don't attach - * to TT are also ignored, root-hub will schedule them directly + * for LS & FS periodic endpoints which its device is not behind + * a TT are also ignored, root-hub will schedule them directly, + * but need set @bpkts field of endpoint context to 1. */ if (is_fs_or_ls(speed) && !has_tt) return false; @@ -339,8 +340,17 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)), usb_endpoint_dir_in(&ep->desc), ep); - if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) + if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) { + /* + * set @bpkts to 1 if it is LS or FS periodic endpoint, and its + * device does not connected through an external HS hub + */ + if (usb_endpoint_xfer_int(&ep->desc) + || usb_endpoint_xfer_isoc(&ep->desc)) + ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1)); + return 0; + } bw_index = get_bw_index(xhci, udev, ep); sch_bw = &sch_array[bw_index]; diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index c9ab6a44c34a..9532f5aef71b 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -696,9 +696,24 @@ static int xhci_mtk_remove(struct platform_device *dev) } #ifdef CONFIG_PM_SLEEP +/* + * if ip sleep fails, and all clocks are disabled, access register will hang + * AHB bus, so stop polling roothubs to avoid regs access on bus suspend. + * and no need to check whether ip sleep failed or not; this will cause SPM + * to wake up system immediately after system suspend complete if ip sleep + * fails, it is what we wanted. + */ static int xhci_mtk_suspend(struct device *dev) { struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev); + struct usb_hcd *hcd = mtk->hcd; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + + xhci_dbg(xhci, "%s: stop port polling\n", __func__); + clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); + del_timer_sync(&hcd->rh_timer); + clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); + del_timer_sync(&xhci->shared_hcd->rh_timer); xhci_mtk_host_disable(mtk); xhci_mtk_phy_power_off(mtk); @@ -710,11 +725,19 @@ static int xhci_mtk_suspend(struct device *dev) static int xhci_mtk_resume(struct device *dev) { struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev); + struct usb_hcd *hcd = mtk->hcd; + struct xhci_hcd *xhci = hcd_to_xhci(hcd); usb_wakeup_disable(mtk); xhci_mtk_clks_enable(mtk); xhci_mtk_phy_power_on(mtk); xhci_mtk_host_enable(mtk); + + xhci_dbg(xhci, "%s: restart port polling\n", __func__); + set_bit(HCD_FLAG_POLL_RH, &hcd->flags); + usb_hcd_poll_rh_status(hcd); + set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); + usb_hcd_poll_rh_status(xhci->shared_hcd); return 0; } diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 58c43ed7ff3b..f0640b7a1c42 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -28,7 +28,9 @@ #include "xhci.h" #include "xhci-trace.h" -#define PORT2_SSIC_CONFIG_REG2 0x883c +#define SSIC_PORT_NUM 2 +#define SSIC_PORT_CFG2 0x880c +#define SSIC_PORT_CFG2_OFFSET 0x30 #define PROG_DONE (1 << 30) #define SSIC_PORT_UNUSED (1 << 31) @@ -45,6 +47,7 @@ #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f +#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 static const char hcd_name[] = "xhci_hcd"; @@ -151,9 +154,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) { xhci->quirks |= XHCI_PME_STUCK_QUIRK; } + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { + xhci->quirks |= XHCI_SSIC_PORT_UNUSED; + } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_EJ168) { xhci->quirks |= XHCI_RESET_ON_RESUME; @@ -312,22 +320,20 @@ static void xhci_pci_remove(struct pci_dev *dev) * SSIC PORT need to be marked as "unused" before putting xHCI * into D3. After D3 exit, the SSIC port need to be marked as "used". * Without this change, xHCI might not enter D3 state. - * Make sure PME works on some Intel xHCI controllers by writing 1 to clear - * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 */ -static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) +static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); u32 val; void __iomem *reg; + int i; - if (pdev->vendor == PCI_VENDOR_ID_INTEL && - pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { - - reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; + for (i = 0; i < SSIC_PORT_NUM; i++) { + reg = (void __iomem *) xhci->cap_regs + + SSIC_PORT_CFG2 + + i * SSIC_PORT_CFG2_OFFSET; - /* Notify SSIC that SSIC profile programming is not done */ + /* Notify SSIC that SSIC profile programming is not done. */ val = readl(reg) & ~PROG_DONE; writel(val, reg); @@ -344,6 +350,17 @@ static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) writel(val, reg); readl(reg); } +} + +/* + * Make sure PME works on some Intel xHCI controllers by writing 1 to clear + * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 + */ +static void xhci_pme_quirk(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + void __iomem *reg; + u32 val; reg = (void __iomem *) xhci->cap_regs + 0x80a4; val = readl(reg); @@ -355,6 +372,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + int ret; /* * Systems with the TI redriver that loses port status change events @@ -364,9 +382,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) pdev->no_d3cold = true; if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_quirk(hcd, true); + xhci_pme_quirk(hcd); + + if (xhci->quirks & XHCI_SSIC_PORT_UNUSED) + xhci_ssic_port_unused_quirk(hcd, true); - return xhci_suspend(xhci, do_wakeup); + ret = xhci_suspend(xhci, do_wakeup); + if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED)) + xhci_ssic_port_unused_quirk(hcd, false); + + return ret; } static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) @@ -396,8 +421,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) if (pdev->vendor == PCI_VENDOR_ID_INTEL) usb_enable_intel_xhci_ports(pdev); + if (xhci->quirks & XHCI_SSIC_PORT_UNUSED) + xhci_ssic_port_unused_quirk(hcd, false); + if (xhci->quirks & XHCI_PME_STUCK_QUIRK) - xhci_pme_quirk(hcd, false); + xhci_pme_quirk(hcd); retval = xhci_resume(xhci, hibernated); return retval; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 770b6b088797..d39d6bf1d090 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -184,7 +184,8 @@ static int xhci_plat_probe(struct platform_device *pdev) struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); /* Just copy data for now */ - *priv = *priv_match; + if (priv_match) + *priv = *priv_match; } if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_MARVELL_ARMADA)) { diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index f1c21c40b4a6..3915657e6078 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -2193,10 +2193,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, } /* Fast path - was this the last TRB in the TD for this URB? */ } else if (event_trb == td->last_trb) { - if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX) - return finish_td(xhci, td, event_trb, event, ep, - status, false); - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) { td->urb->actual_length = td->urb->transfer_buffer_length - @@ -2248,12 +2244,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td, td->urb->actual_length += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) - EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); - - if (trb_comp_code == COMP_SHORT_TX) { - xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n"); - td->urb_length_set = true; - return 0; - } } return finish_td(xhci, td, event_trb, event, ep, status, false); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 26a44c0e969e..0c8087d3c313 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -1554,7 +1554,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "HW died, freeing TD."); urb_priv = urb->hcpriv; - for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { + for (i = urb_priv->td_cnt; + i < urb_priv->length && xhci->devs[urb->dev->slot_id]; + i++) { td = urb_priv->td[i]; if (!list_empty(&td->td_list)) list_del_init(&td->td_list); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 9be7348872ba..cc651383ce5a 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1631,6 +1631,7 @@ struct xhci_hcd { #define XHCI_BROKEN_STREAMS (1 << 19) #define XHCI_PME_STUCK_QUIRK (1 << 20) #define XHCI_MTK_HOST (1 << 21) +#define XHCI_SSIC_PORT_UNUSED (1 << 22) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c index b2685e75a683..3eaa4ba6867d 100644 --- a/drivers/usb/musb/ux500.c +++ b/drivers/usb/musb/ux500.c @@ -348,7 +348,9 @@ static int ux500_suspend(struct device *dev) struct ux500_glue *glue = dev_get_drvdata(dev); struct musb *musb = glue_to_musb(glue); - usb_phy_set_suspend(musb->xceiv, 1); + if (musb) + usb_phy_set_suspend(musb->xceiv, 1); + clk_disable_unprepare(glue->clk); return 0; @@ -366,7 +368,8 @@ static int ux500_resume(struct device *dev) return ret; } - usb_phy_set_suspend(musb->xceiv, 0); + if (musb) + usb_phy_set_suspend(musb->xceiv, 0); return 0; } diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c index 0d19a6d61a71..970a30e155cb 100644 --- a/drivers/usb/phy/phy-msm-usb.c +++ b/drivers/usb/phy/phy-msm-usb.c @@ -1599,6 +1599,8 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg) &motg->id.nb); if (ret < 0) { dev_err(&pdev->dev, "register ID notifier failed\n"); + extcon_unregister_notifier(motg->vbus.extcon, + EXTCON_USB, &motg->vbus.nb); return ret; } @@ -1660,15 +1662,6 @@ static int msm_otg_probe(struct platform_device *pdev) if (!motg) return -ENOMEM; - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) { - if (!np) - return -ENXIO; - ret = msm_otg_read_dt(pdev, motg); - if (ret) - return ret; - } - motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg), GFP_KERNEL); if (!motg->phy.otg) @@ -1710,6 +1703,15 @@ static int msm_otg_probe(struct platform_device *pdev) if (!motg->regs) return -ENOMEM; + pdata = dev_get_platdata(&pdev->dev); + if (!pdata) { + if (!np) + return -ENXIO; + ret = msm_otg_read_dt(pdev, motg); + if (ret) + return ret; + } + /* * NOTE: The PHYs can be multiplexed between the chipidea controller * and the dwc3 controller, using a single bit. It is important that @@ -1717,8 +1719,10 @@ static int msm_otg_probe(struct platform_device *pdev) */ if (motg->phy_number) { phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4); - if (!phy_select) - return -ENOMEM; + if (!phy_select) { + ret = -ENOMEM; + goto unregister_extcon; + } /* Enable second PHY with the OTG port */ writel(0x1, phy_select); } @@ -1728,7 +1732,8 @@ static int msm_otg_probe(struct platform_device *pdev) motg->irq = platform_get_irq(pdev, 0); if (motg->irq < 0) { dev_err(&pdev->dev, "platform_get_irq failed\n"); - return motg->irq; + ret = motg->irq; + goto unregister_extcon; } regs[0].supply = "vddcx"; @@ -1737,7 +1742,7 @@ static int msm_otg_probe(struct platform_device *pdev) ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs); if (ret) - return ret; + goto unregister_extcon; motg->vddcx = regs[0].consumer; motg->v3p3 = regs[1].consumer; @@ -1834,6 +1839,12 @@ disable_clks: clk_disable_unprepare(motg->clk); if (!IS_ERR(motg->core_clk)) clk_disable_unprepare(motg->core_clk); +unregister_extcon: + extcon_unregister_notifier(motg->id.extcon, + EXTCON_USB_HOST, &motg->id.nb); + extcon_unregister_notifier(motg->vbus.extcon, + EXTCON_USB, &motg->vbus.nb); + return ret; } diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index c2936dc48ca7..00bfea01be65 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c @@ -220,7 +220,7 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy) /* Return true if the vbus is there */ static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy) { - unsigned int vbus_value; + unsigned int vbus_value = 0; if (!mxs_phy->regmap_anatop) return false; diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index 0081725c6b5b..6b2a06d09f2b 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c @@ -152,7 +152,7 @@ static void lcdc_write(unsigned int val, unsigned int addr) struct da8xx_fb_par { struct device *dev; - resource_size_t p_palette_base; + dma_addr_t p_palette_base; unsigned char *v_palette_base; dma_addr_t vram_phys; unsigned long vram_size; @@ -1428,7 +1428,7 @@ static int fb_probe(struct platform_device *device) par->vram_virt = dma_alloc_coherent(NULL, par->vram_size, - (resource_size_t *) &par->vram_phys, + &par->vram_phys, GFP_KERNEL | GFP_DMA); if (!par->vram_virt) { dev_err(&device->dev, @@ -1448,7 +1448,7 @@ static int fb_probe(struct platform_device *device) /* allocate palette buffer */ par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, - (resource_size_t *)&par->p_palette_base, + &par->p_palette_base, GFP_KERNEL | GFP_DMA); if (!par->v_palette_base) { dev_err(&device->dev, diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c index 95873f26e39c..de2f3e793786 100644 --- a/drivers/video/fbdev/exynos/s6e8ax0.c +++ b/drivers/video/fbdev/exynos/s6e8ax0.c @@ -829,8 +829,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev) return 0; } -#ifdef CONFIG_PM -static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev) +static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev) { struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); @@ -843,7 +842,7 @@ static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev) return 0; } -static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) +static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) { struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); @@ -855,10 +854,6 @@ static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) return 0; } -#else -#define s6e8ax0_suspend NULL -#define s6e8ax0_resume NULL -#endif static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = { .name = "s6e8ax0", @@ -867,8 +862,8 @@ static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = { .power_on = s6e8ax0_power_on, .set_sequence = s6e8ax0_set_sequence, .probe = s6e8ax0_probe, - .suspend = s6e8ax0_suspend, - .resume = s6e8ax0_resume, + .suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL, + .resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL, }; static int s6e8ax0_init(void) diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c index cee88603efc9..bb2f1e866020 100644 --- a/drivers/video/fbdev/imxfb.c +++ b/drivers/video/fbdev/imxfb.c @@ -902,6 +902,21 @@ static int imxfb_probe(struct platform_device *pdev) goto failed_getclock; } + /* + * The LCDC controller does not have an enable bit. The + * controller starts directly when the clocks are enabled. + * If the clocks are enabled when the controller is not yet + * programmed with proper register values (enabled at the + * bootloader, for example) then it just goes into some undefined + * state. + * To avoid this issue, let's enable and disable LCDC IPG clock + * so that we force some kind of 'reset' to the LCDC block. + */ + ret = clk_prepare_enable(fbi->clk_ipg); + if (ret) + goto failed_getclock; + clk_disable_unprepare(fbi->clk_ipg); + fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(fbi->clk_ahb)) { ret = PTR_ERR(fbi->clk_ahb); diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index de54a4748065..b6f83d5df9fd 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c @@ -503,8 +503,7 @@ static int mmphw_probe(struct platform_device *pdev) ctrl->reg_base = devm_ioremap_nocache(ctrl->dev, res->start, resource_size(res)); if (ctrl->reg_base == NULL) { - dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__, - res->start, res->end); + dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res); ret = -ENOMEM; goto failed; } diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c index c9293aea8ec3..a970edc2a6f8 100644 --- a/drivers/video/fbdev/ocfb.c +++ b/drivers/video/fbdev/ocfb.c @@ -123,11 +123,11 @@ static int ocfb_setupfb(struct ocfb_dev *fbdev) /* Horizontal timings */ ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 | - (var->right_margin - 1) << 16 | (var->xres - 1)); + (var->left_margin - 1) << 16 | (var->xres - 1)); /* Vertical timings */ ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 | - (var->lower_margin - 1) << 16 | (var->yres - 1)); + (var->upper_margin - 1) << 16 | (var->yres - 1)); /* Total length of frame */ hlen = var->left_margin + var->right_margin + var->hsync_len + diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 4f0e7be0da34..0f6d8515ba4f 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -145,7 +145,8 @@ config MENF21BMC_WATCHDOG config TANGOX_WATCHDOG tristate "Sigma Designs SMP86xx/SMP87xx watchdog" select WATCHDOG_CORE - depends on ARCH_TANGOX || COMPILE_TEST + depends on ARCH_TANGO || COMPILE_TEST + depends on HAS_IOMEM help Support for the watchdog in Sigma Designs SMP86xx (tango3) and SMP87xx (tango4) family chips. @@ -618,6 +619,7 @@ config DIGICOLOR_WATCHDOG config LPC18XX_WATCHDOG tristate "LPC18xx/43xx Watchdog" depends on ARCH_LPC18XX || COMPILE_TEST + depends on HAS_IOMEM select WATCHDOG_CORE help Say Y here if to include support for the watchdog timer @@ -1374,6 +1376,7 @@ config BCM_KONA_WDT_DEBUG config BCM7038_WDT tristate "BCM7038 Watchdog" select WATCHDOG_CORE + depends on HAS_IOMEM help Watchdog driver for the built-in hardware in Broadcom 7038 SoCs. @@ -1383,6 +1386,7 @@ config IMGPDC_WDT tristate "Imagination Technologies PDC Watchdog Timer" depends on HAS_IOMEM depends on METAG || MIPS || COMPILE_TEST + select WATCHDOG_CORE help Driver for Imagination Technologies PowerDown Controller Watchdog Timer. diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index f36ca4be0720..ac5840d9689a 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c @@ -292,4 +292,4 @@ MODULE_PARM_DESC(nodelay, "Force selection of a timeout setting without initial delay " "(max6373/74 only, default=0)"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 1a11aedc4fe8..68952d9ccf83 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c @@ -608,7 +608,7 @@ static int usb_pcwd_probe(struct usb_interface *interface, struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct usb_pcwd_private *usb_pcwd = NULL; - int pipe, maxp; + int pipe; int retval = -ENOMEM; int got_fw_rev; unsigned char fw_rev_major, fw_rev_minor; @@ -641,7 +641,6 @@ static int usb_pcwd_probe(struct usb_interface *interface, /* get a handle to the interrupt data pipe */ pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress); - maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe)); /* allocate memory for our device and initialize it */ usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL); diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index 01d816251302..e7a715e82021 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -139,12 +139,11 @@ static int wdt_config(struct watchdog_device *wdd, bool ping) writel_relaxed(UNLOCK, wdt->base + WDTLOCK); writel_relaxed(wdt->load_val, wdt->base + WDTLOAD); + writel_relaxed(INT_MASK, wdt->base + WDTINTCLR); - if (!ping) { - writel_relaxed(INT_MASK, wdt->base + WDTINTCLR); + if (!ping) writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + WDTCONTROL); - } writel_relaxed(LOCK, wdt->base + WDTLOCK); diff --git a/fs/block_dev.c b/fs/block_dev.c index 7b9cd49622b1..39b3a174a425 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -1730,43 +1730,25 @@ static int blkdev_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return __dax_fault(vma, vmf, blkdev_get_block, NULL); } -static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, unsigned int flags) -{ - return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL); -} - -static void blkdev_vm_open(struct vm_area_struct *vma) +static int blkdev_dax_pfn_mkwrite(struct vm_area_struct *vma, + struct vm_fault *vmf) { - struct inode *bd_inode = bdev_file_inode(vma->vm_file); - struct block_device *bdev = I_BDEV(bd_inode); - - inode_lock(bd_inode); - bdev->bd_map_count++; - inode_unlock(bd_inode); + return dax_pfn_mkwrite(vma, vmf); } -static void blkdev_vm_close(struct vm_area_struct *vma) +static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, + pmd_t *pmd, unsigned int flags) { - struct inode *bd_inode = bdev_file_inode(vma->vm_file); - struct block_device *bdev = I_BDEV(bd_inode); - - inode_lock(bd_inode); - bdev->bd_map_count--; - inode_unlock(bd_inode); + return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL); } static const struct vm_operations_struct blkdev_dax_vm_ops = { - .open = blkdev_vm_open, - .close = blkdev_vm_close, .fault = blkdev_dax_fault, .pmd_fault = blkdev_dax_pmd_fault, - .pfn_mkwrite = blkdev_dax_fault, + .pfn_mkwrite = blkdev_dax_pfn_mkwrite, }; static const struct vm_operations_struct blkdev_default_vm_ops = { - .open = blkdev_vm_open, - .close = blkdev_vm_close, .fault = filemap_fault, .map_pages = filemap_map_pages, }; @@ -1774,18 +1756,14 @@ static const struct vm_operations_struct blkdev_default_vm_ops = { static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *bd_inode = bdev_file_inode(file); - struct block_device *bdev = I_BDEV(bd_inode); file_accessed(file); - inode_lock(bd_inode); - bdev->bd_map_count++; if (IS_DAX(bd_inode)) { vma->vm_ops = &blkdev_dax_vm_ops; vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; } else { vma->vm_ops = &blkdev_default_vm_ops; } - inode_unlock(bd_inode); return 0; } diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index b90cd3776f8e..f6dac40f87ff 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1406,7 +1406,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, read_extent_buffer(eb, dest + bytes_left, name_off, name_len); if (eb != eb_in) { - btrfs_tree_read_unlock_blocking(eb); + if (!path->skip_locking) + btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); } ret = btrfs_find_item(fs_root, path, parent, 0, @@ -1426,9 +1427,10 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, eb = path->nodes[0]; /* make sure we can use eb after releasing the path */ if (eb != eb_in) { - atomic_inc(&eb->refs); - btrfs_tree_read_lock(eb); - btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); + if (!path->skip_locking) + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); + path->nodes[0] = NULL; + path->locks[0] = 0; } btrfs_release_path(path); iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index c473c42d7d6c..3346cd8f9910 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -637,11 +637,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, faili = nr_pages - 1; cb->nr_pages = nr_pages; - /* In the parent-locked case, we only locked the range we are - * interested in. In all other cases, we can opportunistically - * cache decompressed data that goes beyond the requested range. */ - if (!(bio_flags & EXTENT_BIO_PARENT_LOCKED)) - add_ra_bio_pages(inode, em_start + em_len, cb); + add_ra_bio_pages(inode, em_start + em_len, cb); /* include any pages we added in add_ra-bio_pages */ uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 0be47e4b8136..b57daa895cea 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1689,7 +1689,7 @@ int btrfs_should_delete_dir_index(struct list_head *del_list, * */ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, - struct list_head *ins_list) + struct list_head *ins_list, bool *emitted) { struct btrfs_dir_item *di; struct btrfs_delayed_item *curr, *next; @@ -1733,6 +1733,7 @@ int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, if (over) return 1; + *emitted = true; } return 0; } diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h index f70119f25421..0167853c84ae 100644 --- a/fs/btrfs/delayed-inode.h +++ b/fs/btrfs/delayed-inode.h @@ -144,7 +144,7 @@ void btrfs_put_delayed_items(struct list_head *ins_list, int btrfs_should_delete_dir_index(struct list_head *del_list, u64 index); int btrfs_readdir_delayed_dir_index(struct dir_context *ctx, - struct list_head *ins_list); + struct list_head *ins_list, bool *emitted); /* for init */ int __init btrfs_delayed_inode_init(void); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 2e7c97a3f344..392592dc7010 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2897,12 +2897,11 @@ static int __do_readpage(struct extent_io_tree *tree, struct block_device *bdev; int ret; int nr = 0; - int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED; size_t pg_offset = 0; size_t iosize; size_t disk_io_size; size_t blocksize = inode->i_sb->s_blocksize; - unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED; + unsigned long this_bio_flag = 0; set_page_extent_mapped(page); @@ -2942,18 +2941,16 @@ static int __do_readpage(struct extent_io_tree *tree, kunmap_atomic(userpage); set_extent_uptodate(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); - if (!parent_locked) - unlock_extent_cached(tree, cur, - cur + iosize - 1, - &cached, GFP_NOFS); + unlock_extent_cached(tree, cur, + cur + iosize - 1, + &cached, GFP_NOFS); break; } em = __get_extent_map(inode, page, pg_offset, cur, end - cur + 1, get_extent, em_cached); if (IS_ERR_OR_NULL(em)) { SetPageError(page); - if (!parent_locked) - unlock_extent(tree, cur, end); + unlock_extent(tree, cur, end); break; } extent_offset = cur - em->start; @@ -3038,12 +3035,9 @@ static int __do_readpage(struct extent_io_tree *tree, set_extent_uptodate(tree, cur, cur + iosize - 1, &cached, GFP_NOFS); - if (parent_locked) - free_extent_state(cached); - else - unlock_extent_cached(tree, cur, - cur + iosize - 1, - &cached, GFP_NOFS); + unlock_extent_cached(tree, cur, + cur + iosize - 1, + &cached, GFP_NOFS); cur = cur + iosize; pg_offset += iosize; continue; @@ -3052,8 +3046,7 @@ static int __do_readpage(struct extent_io_tree *tree, if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1, NULL)) { check_page_uptodate(tree, page); - if (!parent_locked) - unlock_extent(tree, cur, cur + iosize - 1); + unlock_extent(tree, cur, cur + iosize - 1); cur = cur + iosize; pg_offset += iosize; continue; @@ -3063,8 +3056,7 @@ static int __do_readpage(struct extent_io_tree *tree, */ if (block_start == EXTENT_MAP_INLINE) { SetPageError(page); - if (!parent_locked) - unlock_extent(tree, cur, cur + iosize - 1); + unlock_extent(tree, cur, cur + iosize - 1); cur = cur + iosize; pg_offset += iosize; continue; @@ -3083,8 +3075,7 @@ static int __do_readpage(struct extent_io_tree *tree, *bio_flags = this_bio_flag; } else { SetPageError(page); - if (!parent_locked) - unlock_extent(tree, cur, cur + iosize - 1); + unlock_extent(tree, cur, cur + iosize - 1); } cur = cur + iosize; pg_offset += iosize; @@ -3213,20 +3204,6 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, return ret; } -int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page, - get_extent_t *get_extent, int mirror_num) -{ - struct bio *bio = NULL; - unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED; - int ret; - - ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num, - &bio_flags, READ, NULL); - if (bio) - ret = submit_one_bio(READ, bio, mirror_num, bio_flags); - return ret; -} - static noinline void update_nr_written(struct page *page, struct writeback_control *wbc, unsigned long nr_written) diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 0377413bd4b9..880d5292e972 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -29,7 +29,6 @@ */ #define EXTENT_BIO_COMPRESSED 1 #define EXTENT_BIO_TREE_LOG 2 -#define EXTENT_BIO_PARENT_LOCKED 4 #define EXTENT_BIO_FLAG_SHIFT 16 /* these are bit numbers for test/set bit */ @@ -210,8 +209,6 @@ static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end) int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end); int extent_read_full_page(struct extent_io_tree *tree, struct page *page, get_extent_t *get_extent, int mirror_num); -int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page, - get_extent_t *get_extent, int mirror_num); int __init extent_io_init(void); void extent_io_exit(void); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5f06eb1f4384..151b7c71b868 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5717,6 +5717,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) char *name_ptr; int name_len; int is_curr = 0; /* ctx->pos points to the current index? */ + bool emitted; /* FIXME, use a real flag for deciding about the key type */ if (root->fs_info->tree_root == root) @@ -5745,6 +5746,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) if (ret < 0) goto err; + emitted = false; while (1) { leaf = path->nodes[0]; slot = path->slots[0]; @@ -5824,6 +5826,7 @@ skip: if (over) goto nopos; + emitted = true; di_len = btrfs_dir_name_len(leaf, di) + btrfs_dir_data_len(leaf, di) + sizeof(*di); di_cur += di_len; @@ -5836,11 +5839,20 @@ next: if (key_type == BTRFS_DIR_INDEX_KEY) { if (is_curr) ctx->pos++; - ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list); + ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted); if (ret) goto nopos; } + /* + * If we haven't emitted any dir entry, we must not touch ctx->pos as + * it was was set to the termination value in previous call. We assume + * that "." and ".." were emitted if we reach this point and set the + * termination value as well for an empty directory. + */ + if (ctx->pos > 2 && !emitted) + goto nopos; + /* Reached end of directory/root. Bump pos past the last item. */ ctx->pos++; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 952172ca7e45..48aee9846329 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2794,24 +2794,29 @@ out: static struct page *extent_same_get_page(struct inode *inode, pgoff_t index) { struct page *page; - struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; page = grab_cache_page(inode->i_mapping, index); if (!page) - return NULL; + return ERR_PTR(-ENOMEM); if (!PageUptodate(page)) { - if (extent_read_full_page_nolock(tree, page, btrfs_get_extent, - 0)) - return NULL; + int ret; + + ret = btrfs_readpage(NULL, page); + if (ret) + return ERR_PTR(ret); lock_page(page); if (!PageUptodate(page)) { unlock_page(page); page_cache_release(page); - return NULL; + return ERR_PTR(-EIO); + } + if (page->mapping != inode->i_mapping) { + unlock_page(page); + page_cache_release(page); + return ERR_PTR(-EAGAIN); } } - unlock_page(page); return page; } @@ -2823,17 +2828,31 @@ static int gather_extent_pages(struct inode *inode, struct page **pages, pgoff_t index = off >> PAGE_CACHE_SHIFT; for (i = 0; i < num_pages; i++) { +again: pages[i] = extent_same_get_page(inode, index + i); - if (!pages[i]) - return -ENOMEM; + if (IS_ERR(pages[i])) { + int err = PTR_ERR(pages[i]); + + if (err == -EAGAIN) + goto again; + pages[i] = NULL; + return err; + } } return 0; } -static inline void lock_extent_range(struct inode *inode, u64 off, u64 len) +static int lock_extent_range(struct inode *inode, u64 off, u64 len, + bool retry_range_locking) { - /* do any pending delalloc/csum calc on src, one way or - another, and lock file content */ + /* + * Do any pending delalloc/csum calculations on inode, one way or + * another, and lock file content. + * The locking order is: + * + * 1) pages + * 2) range in the inode's io tree + */ while (1) { struct btrfs_ordered_extent *ordered; lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1); @@ -2851,8 +2870,11 @@ static inline void lock_extent_range(struct inode *inode, u64 off, u64 len) unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1); if (ordered) btrfs_put_ordered_extent(ordered); + if (!retry_range_locking) + return -EAGAIN; btrfs_wait_ordered_range(inode, off, len); } + return 0; } static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2) @@ -2877,15 +2899,24 @@ static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); } -static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, - struct inode *inode2, u64 loff2, u64 len) +static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1, + struct inode *inode2, u64 loff2, u64 len, + bool retry_range_locking) { + int ret; + if (inode1 < inode2) { swap(inode1, inode2); swap(loff1, loff2); } - lock_extent_range(inode1, loff1, len); - lock_extent_range(inode2, loff2, len); + ret = lock_extent_range(inode1, loff1, len, retry_range_locking); + if (ret) + return ret; + ret = lock_extent_range(inode2, loff2, len, retry_range_locking); + if (ret) + unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, + loff1 + len - 1); + return ret; } struct cmp_pages { @@ -2901,11 +2932,15 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp) for (i = 0; i < cmp->num_pages; i++) { pg = cmp->src_pages[i]; - if (pg) + if (pg) { + unlock_page(pg); page_cache_release(pg); + } pg = cmp->dst_pages[i]; - if (pg) + if (pg) { + unlock_page(pg); page_cache_release(pg); + } } kfree(cmp->src_pages); kfree(cmp->dst_pages); @@ -2966,6 +3001,8 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst, src_page = cmp->src_pages[i]; dst_page = cmp->dst_pages[i]; + ASSERT(PageLocked(src_page)); + ASSERT(PageLocked(dst_page)); addr = kmap_atomic(src_page); dst_addr = kmap_atomic(dst_page); @@ -3078,14 +3115,46 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, goto out_unlock; } +again: ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp); if (ret) goto out_unlock; if (same_inode) - lock_extent_range(src, same_lock_start, same_lock_len); + ret = lock_extent_range(src, same_lock_start, same_lock_len, + false); else - btrfs_double_extent_lock(src, loff, dst, dst_loff, len); + ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len, + false); + /* + * If one of the inodes has dirty pages in the respective range or + * ordered extents, we need to flush dellaloc and wait for all ordered + * extents in the range. We must unlock the pages and the ranges in the + * io trees to avoid deadlocks when flushing delalloc (requires locking + * pages) and when waiting for ordered extents to complete (they require + * range locking). + */ + if (ret == -EAGAIN) { + /* + * Ranges in the io trees already unlocked. Now unlock all + * pages before waiting for all IO to complete. + */ + btrfs_cmp_data_free(&cmp); + if (same_inode) { + btrfs_wait_ordered_range(src, same_lock_start, + same_lock_len); + } else { + btrfs_wait_ordered_range(src, loff, len); + btrfs_wait_ordered_range(dst, dst_loff, len); + } + goto again; + } + ASSERT(ret == 0); + if (WARN_ON(ret)) { + /* ranges in the io trees already unlocked */ + btrfs_cmp_data_free(&cmp); + return ret; + } /* pass original length for comparison so we stay within i_size */ ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp); @@ -3795,9 +3864,15 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, u64 lock_start = min_t(u64, off, destoff); u64 lock_len = max_t(u64, off, destoff) + len - lock_start; - lock_extent_range(src, lock_start, lock_len); + ret = lock_extent_range(src, lock_start, lock_len, true); } else { - btrfs_double_extent_lock(src, off, inode, destoff, len); + ret = btrfs_double_extent_lock(src, off, inode, destoff, len, + true); + } + ASSERT(ret == 0); + if (WARN_ON(ret)) { + /* ranges in the io trees already unlocked */ + goto out_unlock; } ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 86a9c383955e..eb9028e8cfc5 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -698,8 +698,8 @@ static void ceph_aio_retry_work(struct work_struct *work) req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2, false, GFP_NOFS); - if (IS_ERR(req)) { - ret = PTR_ERR(req); + if (!req) { + ret = -ENOMEM; req = orig_req; goto out; } @@ -716,7 +716,6 @@ static void ceph_aio_retry_work(struct work_struct *work) ceph_osdc_build_request(req, req->r_ops[0].extent.offset, snapc, CEPH_NOSNAP, &aio_req->mtime); - ceph_put_snap_context(snapc); ceph_osdc_put_request(orig_req); req->r_callback = ceph_aio_complete_req; @@ -731,6 +730,7 @@ out: ceph_aio_complete_req(req, NULL); } + ceph_put_snap_context(snapc); kfree(aio_work); } diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index a5b8eb69a8f4..6402eaf8ab95 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -1261,6 +1261,9 @@ COMPATIBLE_IOCTL(HCIUNBLOCKADDR) COMPATIBLE_IOCTL(HCIINQUIRY) COMPATIBLE_IOCTL(HCIUARTSETPROTO) COMPATIBLE_IOCTL(HCIUARTGETPROTO) +COMPATIBLE_IOCTL(HCIUARTGETDEVICE) +COMPATIBLE_IOCTL(HCIUARTSETFLAGS) +COMPATIBLE_IOCTL(HCIUARTGETFLAGS) COMPATIBLE_IOCTL(RFCOMMCREATEDEV) COMPATIBLE_IOCTL(RFCOMMRELEASEDEV) COMPATIBLE_IOCTL(RFCOMMGETDEVLIST) @@ -58,6 +58,26 @@ static void dax_unmap_atomic(struct block_device *bdev, blk_queue_exit(bdev->bd_queue); } +struct page *read_dax_sector(struct block_device *bdev, sector_t n) +{ + struct page *page = alloc_pages(GFP_KERNEL, 0); + struct blk_dax_ctl dax = { + .size = PAGE_SIZE, + .sector = n & ~((((int) PAGE_SIZE) / 512) - 1), + }; + long rc; + + if (!page) + return ERR_PTR(-ENOMEM); + + rc = dax_map_atomic(bdev, &dax); + if (rc < 0) + return ERR_PTR(rc); + memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE); + dax_unmap_atomic(bdev, &dax); + return page; +} + /* * dax_clear_blocks() is called from within transaction context from XFS, * and hence this means the stack from this point must follow GFP_NOFS @@ -338,7 +358,8 @@ static int dax_radix_entry(struct address_space *mapping, pgoff_t index, void *entry; WARN_ON_ONCE(pmd_entry && !dirty); - __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); + if (dirty) + __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); spin_lock_irq(&mapping->tree_lock); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index ae1dbcf47e97..cde60741cad2 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -94,6 +94,11 @@ /* Epoll private bits inside the event mask */ #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE) +#define EPOLLINOUT_BITS (POLLIN | POLLOUT) + +#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | POLLERR | POLLHUP | \ + EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE) + /* Maximum number of nesting allowed inside epoll sets */ #define EP_MAX_NESTS 4 @@ -1068,7 +1073,22 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k * wait list. */ if (waitqueue_active(&ep->wq)) { - ewake = 1; + if ((epi->event.events & EPOLLEXCLUSIVE) && + !((unsigned long)key & POLLFREE)) { + switch ((unsigned long)key & EPOLLINOUT_BITS) { + case POLLIN: + if (epi->event.events & POLLIN) + ewake = 1; + break; + case POLLOUT: + if (epi->event.events & POLLOUT) + ewake = 1; + break; + case 0: + ewake = 1; + break; + } + } wake_up_locked(&ep->wq); } if (waitqueue_active(&ep->poll_wait)) @@ -1875,9 +1895,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation. * Also, we do not currently supported nested exclusive wakeups. */ - if ((epds.events & EPOLLEXCLUSIVE) && (op == EPOLL_CTL_MOD || - (op == EPOLL_CTL_ADD && is_file_epoll(tf.file)))) - goto error_tgt_fput; + if (epds.events & EPOLLEXCLUSIVE) { + if (op == EPOLL_CTL_MOD) + goto error_tgt_fput; + if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) || + (epds.events & ~EPOLLEXCLUSIVE_OK_BITS))) + goto error_tgt_fput; + } /* * At this point it is safe to assume that the "private_data" contains @@ -1950,8 +1974,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, break; case EPOLL_CTL_MOD: if (epi) { - epds.events |= POLLERR | POLLHUP; - error = ep_modify(ep, epi, &epds); + if (!(epi->event.events & EPOLLEXCLUSIVE)) { + epds.events |= POLLERR | POLLHUP; + error = ep_modify(ep, epi, &epds); + } } else error = -ENOENT; break; diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 5bcd92d50e82..0cb1abd535e3 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1215,7 +1215,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task, hdr->pgio_mirror_idx + 1, &hdr->pgio_mirror_idx)) goto out_eagain; - set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + set_bit(NFS_LAYOUT_RETURN_REQUESTED, &hdr->lseg->pls_layout->plh_flags); pnfs_read_resend_pnfs(hdr); return task->tk_status; diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index 29898a9550fa..eb370460ce20 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -412,7 +412,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, OP_ILLEGAL, GFP_NOIO); if (!fail_return) { if (ff_layout_has_available_ds(lseg)) - set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lseg->pls_layout->plh_flags); else pnfs_error_mark_layout_for_return(ino, lseg); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index a3592cc34a20..482b6e94bb37 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -52,9 +52,7 @@ static DEFINE_SPINLOCK(pnfs_spinlock); */ static LIST_HEAD(pnfs_modules_tbl); -static int -pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid, - enum pnfs_iomode iomode, bool sync); +static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo); /* Return the registered pnfs layout driver module matching given id */ static struct pnfs_layoutdriver_type * @@ -243,6 +241,8 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo) { struct inode *inode = lo->plh_inode; + pnfs_layoutreturn_before_put_layout_hdr(lo); + if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) { if (!list_empty(&lo->plh_segs)) WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n"); @@ -345,58 +345,6 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo, rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq); } -/* Return true if layoutreturn is needed */ -static bool -pnfs_layout_need_return(struct pnfs_layout_hdr *lo, - struct pnfs_layout_segment *lseg) -{ - struct pnfs_layout_segment *s; - - if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) - return false; - - list_for_each_entry(s, &lo->plh_segs, pls_list) - if (s != lseg && test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) - return false; - - return true; -} - -static bool -pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo) -{ - if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) - return false; - lo->plh_return_iomode = 0; - pnfs_get_layout_hdr(lo); - clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags); - return true; -} - -static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, - struct pnfs_layout_hdr *lo, struct inode *inode) -{ - lo = lseg->pls_layout; - inode = lo->plh_inode; - - spin_lock(&inode->i_lock); - if (pnfs_layout_need_return(lo, lseg)) { - nfs4_stateid stateid; - enum pnfs_iomode iomode; - bool send; - - nfs4_stateid_copy(&stateid, &lo->plh_stateid); - iomode = lo->plh_return_iomode; - send = pnfs_prepare_layoutreturn(lo); - spin_unlock(&inode->i_lock); - if (send) { - /* Send an async layoutreturn so we dont deadlock */ - pnfs_send_layoutreturn(lo, &stateid, iomode, false); - } - } else - spin_unlock(&inode->i_lock); -} - void pnfs_put_lseg(struct pnfs_layout_segment *lseg) { @@ -410,15 +358,8 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg) atomic_read(&lseg->pls_refcount), test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); - /* Handle the case where refcount != 1 */ - if (atomic_add_unless(&lseg->pls_refcount, -1, 1)) - return; - lo = lseg->pls_layout; inode = lo->plh_inode; - /* Do we need a layoutreturn? */ - if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) - pnfs_layoutreturn_before_put_lseg(lseg, lo, inode); if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) { @@ -937,6 +878,17 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo) rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq); } +static bool +pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo) +{ + if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) + return false; + lo->plh_return_iomode = 0; + pnfs_get_layout_hdr(lo); + clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags); + return true; +} + static int pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid, enum pnfs_iomode iomode, bool sync) @@ -971,6 +923,48 @@ out: return status; } +/* Return true if layoutreturn is needed */ +static bool +pnfs_layout_need_return(struct pnfs_layout_hdr *lo) +{ + struct pnfs_layout_segment *s; + + if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) + return false; + + /* Defer layoutreturn until all lsegs are done */ + list_for_each_entry(s, &lo->plh_segs, pls_list) { + if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) + return false; + } + + return true; +} + +static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo) +{ + struct inode *inode= lo->plh_inode; + + if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) + return; + spin_lock(&inode->i_lock); + if (pnfs_layout_need_return(lo)) { + nfs4_stateid stateid; + enum pnfs_iomode iomode; + bool send; + + nfs4_stateid_copy(&stateid, &lo->plh_stateid); + iomode = lo->plh_return_iomode; + send = pnfs_prepare_layoutreturn(lo); + spin_unlock(&inode->i_lock); + if (send) { + /* Send an async layoutreturn so we dont deadlock */ + pnfs_send_layoutreturn(lo, &stateid, iomode, false); + } + } else + spin_unlock(&inode->i_lock); +} + /* * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr * when the layout segment list is empty. @@ -1091,7 +1085,7 @@ bool pnfs_roc(struct inode *ino) nfs4_stateid_copy(&stateid, &lo->plh_stateid); /* always send layoutreturn if being marked so */ - if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + if (test_and_clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) layoutreturn = pnfs_prepare_layoutreturn(lo); @@ -1772,7 +1766,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, pnfs_set_plh_return_iomode(lo, return_range->iomode); if (!mark_lseg_invalid(lseg, tmp_list)) remaining++; - set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, + set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags); } return remaining; diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 9f4e2a47f4aa..1ac1db5f6dad 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -94,8 +94,8 @@ enum { NFS_LAYOUT_RO_FAILED = 0, /* get ro layout failed stop trying */ NFS_LAYOUT_RW_FAILED, /* get rw layout failed stop trying */ NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */ - NFS_LAYOUT_RETURN, /* Return this layout ASAP */ - NFS_LAYOUT_RETURN_BEFORE_CLOSE, /* Return this layout before close */ + NFS_LAYOUT_RETURN, /* layoutreturn in progress */ + NFS_LAYOUT_RETURN_REQUESTED, /* Return this layout ASAP */ NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */ NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */ }; diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index a3cc6d2fc896..a76b9ea7722e 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1254,15 +1254,15 @@ static const struct file_operations o2hb_debug_fops = { void o2hb_exit(void) { - kfree(o2hb_db_livenodes); - kfree(o2hb_db_liveregions); - kfree(o2hb_db_quorumregions); - kfree(o2hb_db_failedregions); debugfs_remove(o2hb_debug_failedregions); debugfs_remove(o2hb_debug_quorumregions); debugfs_remove(o2hb_debug_liveregions); debugfs_remove(o2hb_debug_livenodes); debugfs_remove(o2hb_debug_dir); + kfree(o2hb_db_livenodes); + kfree(o2hb_db_liveregions); + kfree(o2hb_db_quorumregions); + kfree(o2hb_db_failedregions); } static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir, @@ -1438,13 +1438,15 @@ static void o2hb_region_release(struct config_item *item) kfree(reg->hr_slots); - kfree(reg->hr_db_regnum); - kfree(reg->hr_db_livenodes); debugfs_remove(reg->hr_debug_livenodes); debugfs_remove(reg->hr_debug_regnum); debugfs_remove(reg->hr_debug_elapsed_time); debugfs_remove(reg->hr_debug_pinned); debugfs_remove(reg->hr_debug_dir); + kfree(reg->hr_db_livenodes); + kfree(reg->hr_db_regnum); + kfree(reg->hr_debug_elapsed_time); + kfree(reg->hr_debug_pinned); spin_lock(&o2hb_live_lock); list_del(®->hr_all_item); diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index c5bdf02c213b..b94a425f0175 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -2367,6 +2367,8 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) break; } } + dlm_lockres_clear_refmap_bit(dlm, res, + dead_node); spin_unlock(&res->spinlock); continue; } diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 85d16c67c33e..fa95ab2d3674 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -259,23 +259,29 @@ static int do_maps_open(struct inode *inode, struct file *file, sizeof(struct proc_maps_private)); } -static pid_t pid_of_stack(struct proc_maps_private *priv, - struct vm_area_struct *vma, bool is_pid) +/* + * Indicate if the VMA is a stack for the given task; for + * /proc/PID/maps that is the stack of the main task. + */ +static int is_stack(struct proc_maps_private *priv, + struct vm_area_struct *vma, int is_pid) { - struct inode *inode = priv->inode; - struct task_struct *task; - pid_t ret = 0; + int stack = 0; + + if (is_pid) { + stack = vma->vm_start <= vma->vm_mm->start_stack && + vma->vm_end >= vma->vm_mm->start_stack; + } else { + struct inode *inode = priv->inode; + struct task_struct *task; - rcu_read_lock(); - task = pid_task(proc_pid(inode), PIDTYPE_PID); - if (task) { - task = task_of_stack(task, vma, is_pid); + rcu_read_lock(); + task = pid_task(proc_pid(inode), PIDTYPE_PID); if (task) - ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); + stack = vma_is_stack_for_task(vma, task); + rcu_read_unlock(); } - rcu_read_unlock(); - - return ret; + return stack; } static void @@ -335,8 +341,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) name = arch_vma_name(vma); if (!name) { - pid_t tid; - if (!mm) { name = "[vdso]"; goto done; @@ -348,21 +352,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) goto done; } - tid = pid_of_stack(priv, vma, is_pid); - if (tid != 0) { - /* - * Thread stack in /proc/PID/task/TID/maps or - * the main process stack. - */ - if (!is_pid || (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack)) { - name = "[stack]"; - } else { - /* Thread stack in /proc/PID/maps */ - seq_pad(m, ' '); - seq_printf(m, "[stack:%d]", tid); - } - } + if (is_stack(priv, vma, is_pid)) + name = "[stack]"; } done: @@ -1552,18 +1543,19 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long end, struct mm_walk *walk) { + pte_t huge_pte = huge_ptep_get(pte); struct numa_maps *md; struct page *page; - if (!pte_present(*pte)) + if (!pte_present(huge_pte)) return 0; - page = pte_page(*pte); + page = pte_page(huge_pte); if (!page) return 0; md = walk->private; - gather_stats(page, md, pte_dirty(*pte), 1); + gather_stats(page, md, pte_dirty(huge_pte), 1); return 0; } @@ -1617,19 +1609,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) seq_file_path(m, file, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_puts(m, " heap"); - } else { - pid_t tid = pid_of_stack(proc_priv, vma, is_pid); - if (tid != 0) { - /* - * Thread stack in /proc/PID/task/TID/maps or - * the main process stack. - */ - if (!is_pid || (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack)) - seq_puts(m, " stack"); - else - seq_printf(m, " stack:%d", tid); - } + } else if (is_stack(proc_priv, vma, is_pid)) { + seq_puts(m, " stack"); } if (is_vm_hugetlb_page(vma)) diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index e0d64c92e4f6..faacb0c0d857 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c @@ -123,23 +123,26 @@ unsigned long task_statm(struct mm_struct *mm, return size; } -static pid_t pid_of_stack(struct proc_maps_private *priv, - struct vm_area_struct *vma, bool is_pid) +static int is_stack(struct proc_maps_private *priv, + struct vm_area_struct *vma, int is_pid) { - struct inode *inode = priv->inode; - struct task_struct *task; - pid_t ret = 0; - - rcu_read_lock(); - task = pid_task(proc_pid(inode), PIDTYPE_PID); - if (task) { - task = task_of_stack(task, vma, is_pid); + struct mm_struct *mm = vma->vm_mm; + int stack = 0; + + if (is_pid) { + stack = vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack; + } else { + struct inode *inode = priv->inode; + struct task_struct *task; + + rcu_read_lock(); + task = pid_task(proc_pid(inode), PIDTYPE_PID); if (task) - ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info); + stack = vma_is_stack_for_task(vma, task); + rcu_read_unlock(); } - rcu_read_unlock(); - - return ret; + return stack; } /* @@ -181,21 +184,9 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, if (file) { seq_pad(m, ' '); seq_file_path(m, file, ""); - } else if (mm) { - pid_t tid = pid_of_stack(priv, vma, is_pid); - - if (tid != 0) { - seq_pad(m, ' '); - /* - * Thread stack in /proc/PID/task/TID/maps or - * the main process stack. - */ - if (!is_pid || (vma->vm_start <= mm->start_stack && - vma->vm_end >= mm->start_stack)) - seq_printf(m, "[stack]"); - else - seq_printf(m, "[stack:%d]", tid); - } + } else if (mm && is_stack(priv, vma, is_pid)) { + seq_pad(m, ' '); + seq_printf(m, "[stack]"); } seq_putc(m, '\n'); diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index da37beb76f6e..594f7e63b432 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -4491,7 +4491,7 @@ xlog_recover_process( * know precisely what failed. */ if (pass == XLOG_RECOVER_CRCPASS) { - if (rhead->h_crc && crc != le32_to_cpu(rhead->h_crc)) + if (rhead->h_crc && crc != rhead->h_crc) return -EFSBADCRC; return 0; } @@ -4502,7 +4502,7 @@ xlog_recover_process( * zero CRC check prevents warnings from being emitted when upgrading * the kernel from one that does not add CRCs by default. */ - if (crc != le32_to_cpu(rhead->h_crc)) { + if (crc != rhead->h_crc) { if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) { xfs_alert(log->l_mp, "log record CRC mismatch: found 0x%x, expected 0x%x.", diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index 717a29810473..dad8af3ebeb5 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -133,6 +133,5 @@ extern int acpi_get_psd_map(struct cpudata **); /* Methods to interact with the PCC mailbox controller. */ extern struct mbox_chan * pcc_mbox_request_channel(struct mbox_client *, unsigned int); -extern int mbox_send_message(struct mbox_chan *chan, void *mssg); #endif /* _CPPC_ACPI_H*/ diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h index 7bfb063029d8..461a0558bca4 100644 --- a/include/drm/drm_cache.h +++ b/include/drm/drm_cache.h @@ -35,4 +35,13 @@ void drm_clflush_pages(struct page *pages[], unsigned long num_pages); +static inline bool drm_arch_can_wc_memory(void) +{ +#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE) + return false; +#else + return true; +#endif +} + #endif diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h index 24ab1787b771..fdb47051d549 100644 --- a/include/drm/drm_dp_mst_helper.h +++ b/include/drm/drm_dp_mst_helper.h @@ -44,8 +44,6 @@ struct drm_dp_vcpi { /** * struct drm_dp_mst_port - MST port * @kref: reference count for this port. - * @guid_valid: for DP 1.2 devices if we have validated the GUID. - * @guid: guid for DP 1.2 device on this port. * @port_num: port number * @input: if this port is an input port. * @mcs: message capability status - DP 1.2 spec. @@ -70,10 +68,6 @@ struct drm_dp_vcpi { struct drm_dp_mst_port { struct kref kref; - /* if dpcd 1.2 device is on this port - its GUID info */ - bool guid_valid; - u8 guid[16]; - u8 port_num; bool input; bool mcs; @@ -110,10 +104,12 @@ struct drm_dp_mst_port { * @tx_slots: transmission slots for this device. * @last_seqno: last sequence number used to talk to this. * @link_address_sent: if a link address message has been sent to this device yet. + * @guid: guid for DP 1.2 branch device. port under this branch can be + * identified by port #. * * This structure represents an MST branch device, there is one - * primary branch device at the root, along with any others connected - * to downstream ports + * primary branch device at the root, along with any other branches connected + * to downstream port of parent branches. */ struct drm_dp_mst_branch { struct kref kref; @@ -132,6 +128,9 @@ struct drm_dp_mst_branch { struct drm_dp_sideband_msg_tx *tx_slots[2]; int last_seqno; bool link_address_sent; + + /* global unique identifier to identify branch devices */ + u8 guid[16]; }; @@ -406,11 +405,9 @@ struct drm_dp_payload { * @conn_base_id: DRM connector ID this mgr is connected to. * @down_rep_recv: msg receiver state for down replies. * @up_req_recv: msg receiver state for up requests. - * @lock: protects mst state, primary, guid, dpcd. + * @lock: protects mst state, primary, dpcd. * @mst_state: if this manager is enabled for an MST capable port. * @mst_primary: pointer to the primary branch device. - * @guid_valid: GUID valid for the primary branch device. - * @guid: GUID for primary port. * @dpcd: cache of DPCD for primary port. * @pbn_div: PBN to slots divisor. * @@ -432,13 +429,11 @@ struct drm_dp_mst_topology_mgr { struct drm_dp_sideband_msg_rx up_req_recv; /* pointer to info about the initial MST device */ - struct mutex lock; /* protects mst_state + primary + guid + dpcd */ + struct mutex lock; /* protects mst_state + primary + dpcd */ bool mst_state; struct drm_dp_mst_branch *mst_primary; - /* primary MST device GUID */ - bool guid_valid; - u8 guid[16]; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 sink_count; int pbn_div; diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h index d639049a613d..553210c02ee0 100644 --- a/include/drm/drm_fixed.h +++ b/include/drm/drm_fixed.h @@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B) #define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT) #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1) #define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK) +#define DRM_FIXED_EPSILON 1LL +#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON) static inline s64 drm_int2fixp(int a) { return ((s64)a) << DRM_FIXED_POINT; } -static inline int drm_fixp2int(int64_t a) +static inline int drm_fixp2int(s64 a) { return ((s64)a) >> DRM_FIXED_POINT; } -static inline unsigned drm_fixp_msbset(int64_t a) +static inline int drm_fixp2int_ceil(s64 a) +{ + if (a > 0) + return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE); + else + return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE); +} + +static inline unsigned drm_fixp_msbset(s64 a) { unsigned shift, sign = (a >> 63) & 1; @@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b) return result; } +static inline s64 drm_fixp_from_fraction(s64 a, s64 b) +{ + s64 res; + bool a_neg = a < 0; + bool b_neg = b < 0; + u64 a_abs = a_neg ? -a : a; + u64 b_abs = b_neg ? -b : b; + u64 rem; + + /* determine integer part */ + u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem); + + /* determine fractional part */ + { + u32 i = DRM_FIXED_POINT; + + do { + rem <<= 1; + res_abs <<= 1; + if (rem >= b_abs) { + res_abs |= 1; + rem -= b_abs; + } + } while (--i != 0); + } + + /* round up LSB */ + { + u64 summand = (rem << 1) >= b_abs; + + res_abs += summand; + } + + res = (s64) res_abs; + if (a_neg ^ b_neg) + res = -res; + return res; +} + static inline s64 drm_fixp_exp(s64 x) { s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 29189aeace19..4571ef1a12a9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -682,9 +682,12 @@ static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b) /* * q->prep_rq_fn return values */ -#define BLKPREP_OK 0 /* serve it */ -#define BLKPREP_KILL 1 /* fatal error, kill */ -#define BLKPREP_DEFER 2 /* leave on queue */ +enum { + BLKPREP_OK, /* serve it */ + BLKPREP_KILL, /* fatal error, kill, return -EIO */ + BLKPREP_DEFER, /* leave on queue */ + BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */ +}; extern unsigned long blk_max_low_pfn, blk_max_pfn; diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index f89b31d45cc8..c1ef6f14e7be 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -63,6 +63,18 @@ #define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49) // duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY #define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49) /* overlap w/ above */ +#define CEPH_FEATURE_MON_METADATA (1ULL<<50) +#define CEPH_FEATURE_OSD_BITWISE_HOBJ_SORT (1ULL<<51) /* can sort objs bitwise */ +#define CEPH_FEATURE_OSD_PROXY_WRITE_FEATURES (1ULL<<52) +#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 (1ULL<<53) +#define CEPH_FEATURE_OSD_HITSET_GMT (1ULL<<54) +#define CEPH_FEATURE_HAMMER_0_94_4 (1ULL<<55) +#define CEPH_FEATURE_NEW_OSDOP_ENCODING (1ULL<<56) /* New, v7 encoding */ +#define CEPH_FEATURE_MON_STATEFUL_SUB (1ULL<<57) /* stateful mon subscription */ +#define CEPH_FEATURE_MON_ROUTE_OSDMAP (1ULL<<57) /* peon sends osdmaps */ +#define CEPH_FEATURE_CRUSH_TUNABLES5 (1ULL<<58) /* chooseleaf stable mode */ +// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5 +#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING (1ULL<<58) /* New, v7 encoding */ /* * The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature @@ -108,7 +120,9 @@ static inline u64 ceph_sanitize_features(u64 features) CEPH_FEATURE_CRUSH_TUNABLES3 | \ CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \ CEPH_FEATURE_MSGR_KEEPALIVE2 | \ - CEPH_FEATURE_CRUSH_V4) + CEPH_FEATURE_CRUSH_V4 | \ + CEPH_FEATURE_CRUSH_TUNABLES5 | \ + CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING) #define CEPH_FEATURES_REQUIRED_DEFAULT \ (CEPH_FEATURE_NOSRCADDR | \ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 7f540f7f588d..789471dba6fb 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -127,6 +127,12 @@ struct cgroup_subsys_state { */ u64 serial_nr; + /* + * Incremented by online self and children. Used to guarantee that + * parents are not offlined before their children. + */ + atomic_t online_cnt; + /* percpu_ref killing and RCU release */ struct rcu_head rcu_head; struct work_struct destroy_work; diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 85a868ccb493..fea160ee5803 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask) task_unlock(current); } +extern void cpuset_post_attach_flush(void); + #else /* !CONFIG_CPUSETS */ static inline bool cpusets_enabled(void) { return false; } @@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq) return false; } +static inline void cpuset_post_attach_flush(void) +{ +} + #endif /* !CONFIG_CPUSETS */ #endif /* _LINUX_CPUSET_H */ diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 48b49305716b..be8f12b8f195 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -59,7 +59,8 @@ enum { CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */ CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10, CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11, - CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12 + CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12, + CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13 }; /* @@ -205,6 +206,11 @@ struct crush_map { * mappings line up a bit better with previous mappings. */ __u8 chooseleaf_vary_r; + /* if true, it makes chooseleaf firstn to return stable results (if + * no local retry) so that data migrations would be optimal when some + * device fails. */ + __u8 chooseleaf_stable; + #ifndef __KERNEL__ /* * version 0 (original) of straw_calc has various flaws. version 1 diff --git a/include/linux/dax.h b/include/linux/dax.h index 8204c3dc3800..818e45078929 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -14,6 +14,17 @@ int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, dax_iodone_t); int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, dax_iodone_t); + +#ifdef CONFIG_FS_DAX +struct page *read_dax_sector(struct block_device *bdev, sector_t n); +#else +static inline struct page *read_dax_sector(struct block_device *bdev, + sector_t n) +{ + return ERR_PTR(-ENXIO); +} +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, unsigned int flags, get_block_t, dax_iodone_t); diff --git a/include/linux/fs.h b/include/linux/fs.h index 1a2046275cdf..ae681002100a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -484,9 +484,6 @@ struct block_device { int bd_fsfreeze_count; /* Mutex for freeze */ struct mutex bd_fsfreeze_mutex; -#ifdef CONFIG_FS_DAX - int bd_map_count; -#endif }; /* @@ -2907,7 +2904,7 @@ extern void replace_mount_options(struct super_block *sb, char *options); static inline bool io_is_direct(struct file *filp) { - return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp)); + return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host); } static inline int iocb_flags(struct file *file) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 28ad5f6494b0..af1f2b24bbe4 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -547,16 +547,16 @@ static inline bool pm_suspended_storage(void) } #endif /* CONFIG_PM_SLEEP */ -#ifdef CONFIG_CMA - +#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range(unsigned long start, unsigned long end, unsigned migratetype); extern void free_contig_range(unsigned long pfn, unsigned nr_pages); +#endif +#ifdef CONFIG_CMA /* CMA stuff */ extern void init_cma_reserved_pageblock(struct page *page); - #endif #endif /* __LINUX_GFP_H */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 851821bfd553..bec2abbd7ab2 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -526,6 +526,7 @@ enum ata_lpm_policy { enum ata_lpm_hints { ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */ ATA_LPM_HIPM = (1 << 1), /* may use HIPM */ + ATA_LPM_WAKE_ONLY = (1 << 2), /* only wake up link */ }; /* forward declarations */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index c57e424d914b..4dca42fd32f5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -66,7 +66,7 @@ struct lock_class { /* * class-hash: */ - struct list_head hash_entry; + struct hlist_node hash_entry; /* * global list of all lock-classes: @@ -199,7 +199,7 @@ struct lock_chain { u8 irq_context; u8 depth; u16 base; - struct list_head entry; + struct hlist_node entry; u64 chain_key; }; diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 9ae48d4aeb5e..792c8981e633 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -51,7 +51,7 @@ enum mem_cgroup_stat_index { MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ MEM_CGROUP_STAT_NSTATS, /* default hierarchy stats */ - MEMCG_SOCK, + MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS, MEMCG_NR_STAT, }; diff --git a/include/linux/mm.h b/include/linux/mm.h index f1cd22f2df1a..516e14944339 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -201,11 +201,13 @@ extern unsigned int kobjsize(const void *objp); #endif #ifdef CONFIG_STACK_GROWSUP -#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) +#define VM_STACK VM_GROWSUP #else -#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) +#define VM_STACK VM_GROWSDOWN #endif +#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT) + /* * Special vmas that are non-mergable, non-mlock()able. * Note: mm/huge_memory.c VM_NO_THP depends on this definition. @@ -1341,8 +1343,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma, !vma_growsup(vma->vm_next, addr); } -extern struct task_struct *task_of_stack(struct task_struct *task, - struct vm_area_struct *vma, bool in_group); +int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d3ebb9d21a53..624b78b848b8 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -424,9 +424,9 @@ struct mm_struct { unsigned long total_vm; /* Total pages mapped */ unsigned long locked_vm; /* Pages that have PG_mlocked set */ unsigned long pinned_vm; /* Refcount permanently increased */ - unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */ - unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */ - unsigned long stack_vm; /* VM_GROWSUP/DOWN */ + unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ + unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ + unsigned long stack_vm; /* VM_STACK */ unsigned long def_flags; unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 33bb1b19273e..7b6c2cfee390 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -682,6 +682,12 @@ typedef struct pglist_data { */ unsigned long first_deferred_pfn; #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + spinlock_t split_queue_lock; + struct list_head split_queue; + unsigned long split_queue_len; +#endif } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) diff --git a/include/linux/module.h b/include/linux/module.h index 4560d8f1545d..2bb0c3085706 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -324,6 +324,12 @@ struct module_layout { #define __module_layout_align #endif +struct mod_kallsyms { + Elf_Sym *symtab; + unsigned int num_symtab; + char *strtab; +}; + struct module { enum module_state state; @@ -405,15 +411,10 @@ struct module { #endif #ifdef CONFIG_KALLSYMS - /* - * We keep the symbol and string tables for kallsyms. - * The core_* fields below are temporary, loader-only (they - * could really be discarded after module init). - */ - Elf_Sym *symtab, *core_symtab; - unsigned int num_symtab, core_num_syms; - char *strtab, *core_strtab; - + /* Protected by RCU and/or module_mutex: use rcu_dereference() */ + struct mod_kallsyms *kallsyms; + struct mod_kallsyms core_kallsyms; + /* Section attributes */ struct module_sect_attrs *sect_attrs; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5ac140dcb789..289c2314d766 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -512,7 +512,6 @@ static inline void napi_enable(struct napi_struct *n) clear_bit(NAPI_STATE_NPSVC, &n->state); } -#ifdef CONFIG_SMP /** * napi_synchronize - wait until NAPI is not running * @n: napi context @@ -523,12 +522,12 @@ static inline void napi_enable(struct napi_struct *n) */ static inline void napi_synchronize(const struct napi_struct *n) { - while (test_bit(NAPI_STATE_SCHED, &n->state)) - msleep(1); + if (IS_ENABLED(CONFIG_SMP)) + while (test_bit(NAPI_STATE_SCHED, &n->state)) + msleep(1); + else + barrier(); } -#else -# define napi_synchronize(n) barrier() -#endif enum netdev_queue_state_t { __QUEUE_STATE_DRV_XOFF, diff --git a/include/linux/of.h b/include/linux/of.h index dd10626a615f..dc6e39696b64 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -929,7 +929,7 @@ static inline int of_get_available_child_count(const struct device_node *np) return num; } -#ifdef CONFIG_OF +#if defined(CONFIG_OF) && !defined(MODULE) #define _OF_DECLARE(table, name, compat, fn, fn_type) \ static const struct of_device_id __of_table_##name \ __used __section(__##table##_of_table) \ diff --git a/include/linux/pfn.h b/include/linux/pfn.h index 2d8e49711b63..1132953235c0 100644 --- a/include/linux/pfn.h +++ b/include/linux/pfn.h @@ -10,7 +10,7 @@ * backing is indicated by flags in the high bits of the value. */ typedef struct { - unsigned long val; + u64 val; } pfn_t; #endif diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 0703b5360d31..94994810c7c0 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h @@ -9,14 +9,13 @@ * PFN_DEV - pfn is not covered by system memmap by default * PFN_MAP - pfn has a dynamic page mapping established by a device driver */ -#define PFN_FLAGS_MASK (((unsigned long) ~PAGE_MASK) \ - << (BITS_PER_LONG - PAGE_SHIFT)) -#define PFN_SG_CHAIN (1UL << (BITS_PER_LONG - 1)) -#define PFN_SG_LAST (1UL << (BITS_PER_LONG - 2)) -#define PFN_DEV (1UL << (BITS_PER_LONG - 3)) -#define PFN_MAP (1UL << (BITS_PER_LONG - 4)) - -static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, unsigned long flags) +#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) +#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1)) +#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) +#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) +#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) + +static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags) { pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; @@ -29,7 +28,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn) return __pfn_to_pfn_t(pfn, 0); } -extern pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags); +extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags); static inline bool pfn_t_has_page(pfn_t pfn) { @@ -48,7 +47,7 @@ static inline struct page *pfn_t_to_page(pfn_t pfn) return NULL; } -static inline dma_addr_t pfn_t_to_phys(pfn_t pfn) +static inline phys_addr_t pfn_t_to_phys(pfn_t pfn) { return PFN_PHYS(pfn_t_to_pfn(pfn)); } @@ -87,7 +86,7 @@ static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) #ifdef __HAVE_ARCH_PTE_DEVMAP static inline bool pfn_t_devmap(pfn_t pfn) { - const unsigned long flags = PFN_DEV|PFN_MAP; + const u64 flags = PFN_DEV|PFN_MAP; return (pfn.val & flags) == flags; } diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 7c88ad156a29..f54be7082207 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -379,12 +379,28 @@ void **radix_tree_next_chunk(struct radix_tree_root *root, struct radix_tree_iter *iter, unsigned flags); /** + * radix_tree_iter_retry - retry this chunk of the iteration + * @iter: iterator state + * + * If we iterate over a tree protected only by the RCU lock, a race + * against deletion or creation may result in seeing a slot for which + * radix_tree_deref_retry() returns true. If so, call this function + * and continue the iteration. + */ +static inline __must_check +void **radix_tree_iter_retry(struct radix_tree_iter *iter) +{ + iter->next_index = iter->index; + return NULL; +} + +/** * radix_tree_chunk_size - get current chunk size * * @iter: pointer to radix tree iterator * Returns: current chunk size */ -static __always_inline unsigned +static __always_inline long radix_tree_chunk_size(struct radix_tree_iter *iter) { return iter->next_index - iter->index; @@ -418,9 +434,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) return slot + offset + 1; } } else { - unsigned size = radix_tree_chunk_size(iter) - 1; + long size = radix_tree_chunk_size(iter); - while (size--) { + while (--size > 0) { slot++; iter->index++; if (likely(*slot)) diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index a7a06d1dcf9c..a0118d5929a9 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -152,6 +152,8 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, # define jiffies raid6_jiffies() # define printk printf +# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__) +# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__) # define GFP_KERNEL 0 # define __get_free_pages(x, y) ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \ PROT_READ|PROT_WRITE, \ diff --git a/include/linux/rmap.h b/include/linux/rmap.h index bdf597c4f0be..a07f42bedda3 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -109,20 +109,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma) __put_anon_vma(anon_vma); } -static inline void vma_lock_anon_vma(struct vm_area_struct *vma) -{ - struct anon_vma *anon_vma = vma->anon_vma; - if (anon_vma) - down_write(&anon_vma->root->rwsem); -} - -static inline void vma_unlock_anon_vma(struct vm_area_struct *vma) -{ - struct anon_vma *anon_vma = vma->anon_vma; - if (anon_vma) - up_write(&anon_vma->root->rwsem); -} - static inline void anon_vma_lock_write(struct anon_vma *anon_vma) { down_write(&anon_vma->root->rwsem); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 11f935c1a090..4ce9ff7086f4 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -299,6 +299,7 @@ struct sk_buff; #else #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1) #endif +extern int sysctl_max_skb_frags; typedef struct skb_frag_struct skb_frag_t; diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 0e32bc71245e..ca73c503b92a 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -311,6 +311,7 @@ enum { __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ __WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */ + __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */ WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ @@ -411,12 +412,12 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args) #define create_workqueue(name) \ - alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name)) + alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name)) #define create_freezable_workqueue(name) \ - alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \ - 1, (name)) + alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \ + WQ_MEM_RECLAIM, 1, (name)) #define create_singlethread_workqueue(name) \ - alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) + alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name) extern void destroy_workqueue(struct workqueue_struct *wq); diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index ef03ae56b1c1..8a0f55b6c2ba 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -533,7 +533,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, const unsigned int requested_sizes[]); int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb); int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb); -int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking); +int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, + bool nonblocking); int vb2_core_streamon(struct vb2_queue *q, unsigned int type); int vb2_core_streamoff(struct vb2_queue *q, unsigned int type); diff --git a/include/net/af_unix.h b/include/net/af_unix.h index 2a91a0561a47..9b4c418bebd8 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -6,8 +6,8 @@ #include <linux/mutex.h> #include <net/sock.h> -void unix_inflight(struct file *fp); -void unix_notinflight(struct file *fp); +void unix_inflight(struct user_struct *user, struct file *fp); +void unix_notinflight(struct user_struct *user, struct file *fp); void unix_gc(void); void wait_for_unix_gc(void); struct sock *unix_get_socket(struct file *filp); diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 52899291f401..5ee3c689c863 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -252,6 +252,12 @@ struct l2cap_conn_rsp { #define L2CAP_PSM_3DSP 0x0021 #define L2CAP_PSM_IPSP 0x0023 /* 6LoWPAN */ +#define L2CAP_PSM_DYN_START 0x1001 +#define L2CAP_PSM_DYN_END 0xffff +#define L2CAP_PSM_AUTO_END 0x10ff +#define L2CAP_PSM_LE_DYN_START 0x0080 +#define L2CAP_PSM_LE_DYN_END 0x00ff + /* channel identifier */ #define L2CAP_CID_SIGNALING 0x0001 #define L2CAP_CID_CONN_LESS 0x0002 diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h index 6816f0fa5693..30a56ab2ccfb 100644 --- a/include/net/dst_metadata.h +++ b/include/net/dst_metadata.h @@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb) return dst && !(dst->flags & DST_METADATA); } +static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a, + const struct sk_buff *skb_b) +{ + const struct metadata_dst *a, *b; + + if (!(skb_a->_skb_refdst | skb_b->_skb_refdst)) + return 0; + + a = (const struct metadata_dst *) skb_dst(skb_a); + b = (const struct metadata_dst *) skb_dst(skb_b); + + if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len) + return 1; + + return memcmp(&a->u.tun_info, &b->u.tun_info, + sizeof(a->u.tun_info) + a->u.tun_info.options_len); +} + struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags); struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags); diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index 877f682989b8..295d291269e2 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr) void ip6_route_input(struct sk_buff *skb); -struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, - struct flowi6 *fl6); +struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, + struct flowi6 *fl6, int flags); + +static inline struct dst_entry *ip6_route_output(struct net *net, + const struct sock *sk, + struct flowi6 *fl6) +{ + return ip6_route_output_flags(net, sk, fl6, 0); +} + struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, int flags); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 6db96ea0144f..dda9abf6b89c 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -230,6 +230,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, u8 *protocol, struct flowi4 *fl4); +int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu); struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev, diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 788ef58a66b9..62e17d1319ff 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -79,12 +79,10 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *proto); -#ifdef CONFIG_LOCKDEP -# define CONNTRACK_LOCKS 8 -#else -# define CONNTRACK_LOCKS 1024 -#endif +#define CONNTRACK_LOCKS 1024 + extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS]; +void nf_conntrack_lock(spinlock_t *lock); extern spinlock_t nf_conntrack_expect_lock; diff --git a/include/net/scm.h b/include/net/scm.h index 262532d111f5..59fa93c01d2a 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -21,6 +21,7 @@ struct scm_creds { struct scm_fp_list { short count; short max; + struct user_struct *user; struct file *fp[SCM_MAX_FD]; }; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 20e72129be1c..205630bb5010 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -756,7 +756,6 @@ struct sctp_transport { /* Reference counting. */ atomic_t refcnt; - __u32 dead:1, /* RTO-Pending : A flag used to track if one of the DATA * chunks sent to this address is currently being * used to compute a RTT. If this flag is 0, @@ -766,7 +765,7 @@ struct sctp_transport { * calculation completes (i.e. the DATA chunk * is SACK'd) clear this flag. */ - rto_pending:1, + __u32 rto_pending:1, /* * hb_sent : a flag that signals that we have a pending @@ -955,7 +954,7 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *, void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk); void sctp_transport_free(struct sctp_transport *); void sctp_transport_reset_timers(struct sctp_transport *); -void sctp_transport_hold(struct sctp_transport *); +int sctp_transport_hold(struct sctp_transport *); void sctp_transport_put(struct sctp_transport *); void sctp_transport_update_rto(struct sctp_transport *, __u32); void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32); diff --git a/include/net/sock.h b/include/net/sock.h index b9e7b3d863a0..f5ea148853e2 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1036,18 +1036,6 @@ struct proto { #ifdef SOCK_REFCNT_DEBUG atomic_t socks; #endif -#ifdef CONFIG_MEMCG_KMEM - /* - * cgroup specific init/deinit functions. Called once for all - * protocols that implement it, from cgroups populate function. - * This function has to setup any files the protocol want to - * appear in the kmem cgroup filesystem. - */ - int (*init_cgroup)(struct mem_cgroup *memcg, - struct cgroup_subsys *ss); - void (*destroy_cgroup)(struct mem_cgroup *memcg); - struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); -#endif int (*diag_destroy)(struct sock *sk, int err); }; diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h index 7dda3d7adba8..aecd30308d50 100644 --- a/include/net/sock_reuseport.h +++ b/include/net/sock_reuseport.h @@ -16,7 +16,7 @@ struct sock_reuseport { }; extern int reuseport_alloc(struct sock *sk); -extern int reuseport_add_sock(struct sock *sk, const struct sock *sk2); +extern int reuseport_add_sock(struct sock *sk, struct sock *sk2); extern void reuseport_detach_sock(struct sock *sk); extern struct sock *reuseport_select_sock(struct sock *sk, u32 hash, diff --git a/include/net/tcp.h b/include/net/tcp.h index 8ea19977ea53..ae6468f5c9f3 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -216,7 +216,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo); /* TCP thin-stream limits */ #define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */ -/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */ +/* TCP initial congestion window as per rfc6928 */ #define TCP_INIT_CWND 10 /* Bit Flags for sysctl_tcp_fastopen */ @@ -447,7 +447,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); -void tcp_req_err(struct sock *sk, u32 seq); +void tcp_req_err(struct sock *sk, u32 seq, bool abort); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *tcp_create_openreq_child(const struct sock *sk, struct request_sock *req, diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h index fdabbb4ddba9..f730b91e472f 100644 --- a/include/sound/rawmidi.h +++ b/include/sound/rawmidi.h @@ -167,6 +167,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream, int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count); int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream, unsigned char *buffer, int count); +int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream, + unsigned char *buffer, int count); +int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, + int count); /* main midi functions */ diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 56cf8e485ef2..28ee5c2e6bcd 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -94,5 +94,8 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); bool target_sense_desc_format(struct se_device *dev); +sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); +bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, + struct request_queue *q, int block_size); #endif /* TARGET_CORE_BACKEND_H */ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 5d82816cc4e3..e8c8c08bf575 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -140,6 +140,8 @@ enum se_cmd_flags_table { SCF_COMPARE_AND_WRITE = 0x00080000, SCF_COMPARE_AND_WRITE_POST = 0x00100000, SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, + SCF_ACK_KREF = 0x00400000, + SCF_USE_CPUID = 0x00800000, }; /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ @@ -187,6 +189,7 @@ enum target_sc_flags_table { TARGET_SCF_BIDI_OP = 0x01, TARGET_SCF_ACK_KREF = 0x02, TARGET_SCF_UNKNOWN_SIZE = 0x04, + TARGET_SCF_USE_CPUID = 0x08, }; /* fabric independent task management function values */ @@ -490,8 +493,9 @@ struct se_cmd { #define CMD_T_SENT (1 << 4) #define CMD_T_STOP (1 << 5) #define CMD_T_DEV_ACTIVE (1 << 7) -#define CMD_T_REQUEST_STOP (1 << 8) #define CMD_T_BUSY (1 << 9) +#define CMD_T_TAS (1 << 10) +#define CMD_T_FABRIC_STOP (1 << 11) spinlock_t t_state_lock; struct kref cmd_kref; struct completion t_transport_stop_comp; @@ -511,9 +515,6 @@ struct se_cmd { struct list_head state_list; - /* old task stop completion, consider merging with some of the above */ - struct completion task_stop_comp; - /* backend private data */ void *priv; diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 41e0433b4a83..149bec83a907 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -222,7 +222,6 @@ struct fsxattr { #define BLKSECDISCARD _IO(0x12,125) #define BLKROTATIONAL _IO(0x12,126) #define BLKZEROOUT _IO(0x12,127) -#define BLKDAXSET _IO(0x12,128) #define BLKDAXGET _IO(0x12,129) #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d1d3e8f57de9..2e7f7ab739e4 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta) /* adjust offset of jmps if necessary */ if (i < pos && i + insn->off + 1 > pos) insn->off += delta; - else if (i > pos && i + insn->off + 1 < pos) + else if (i > pos + delta && i + insn->off + 1 <= pos + delta) insn->off -= delta; } } diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c03a640ef6da..d27904c193da 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -58,6 +58,7 @@ #include <linux/kthread.h> #include <linux/delay.h> #include <linux/atomic.h> +#include <linux/cpuset.h> #include <net/sock.h> /* @@ -2739,6 +2740,7 @@ out_unlock_rcu: out_unlock_threadgroup: percpu_up_write(&cgroup_threadgroup_rwsem); cgroup_kn_unlock(of->kn); + cpuset_post_attach_flush(); return ret ?: nbytes; } @@ -4655,14 +4657,15 @@ static void css_free_work_fn(struct work_struct *work) if (ss) { /* css free path */ + struct cgroup_subsys_state *parent = css->parent; int id = css->id; - if (css->parent) - css_put(css->parent); - ss->css_free(css); cgroup_idr_remove(&ss->css_idr, id); cgroup_put(cgrp); + + if (parent) + css_put(parent); } else { /* cgroup free path */ atomic_dec(&cgrp->root->nr_cgrps); @@ -4758,6 +4761,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css, INIT_LIST_HEAD(&css->sibling); INIT_LIST_HEAD(&css->children); css->serial_nr = css_serial_nr_next++; + atomic_set(&css->online_cnt, 0); if (cgroup_parent(cgrp)) { css->parent = cgroup_css(cgroup_parent(cgrp), ss); @@ -4780,6 +4784,10 @@ static int online_css(struct cgroup_subsys_state *css) if (!ret) { css->flags |= CSS_ONLINE; rcu_assign_pointer(css->cgroup->subsys[ss->id], css); + + atomic_inc(&css->online_cnt); + if (css->parent) + atomic_inc(&css->parent->online_cnt); } return ret; } @@ -5017,10 +5025,15 @@ static void css_killed_work_fn(struct work_struct *work) container_of(work, struct cgroup_subsys_state, destroy_work); mutex_lock(&cgroup_mutex); - offline_css(css); - mutex_unlock(&cgroup_mutex); - css_put(css); + do { + offline_css(css); + css_put(css); + /* @css can't go away while we're holding cgroup_mutex */ + css = css->parent; + } while (css && atomic_dec_and_test(&css->online_cnt)); + + mutex_unlock(&cgroup_mutex); } /* css kill confirmation processing requires process context, bounce */ @@ -5029,8 +5042,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref) struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); - INIT_WORK(&css->destroy_work, css_killed_work_fn); - queue_work(cgroup_destroy_wq, &css->destroy_work); + if (atomic_dec_and_test(&css->online_cnt)) { + INIT_WORK(&css->destroy_work, css_killed_work_fn); + queue_work(cgroup_destroy_wq, &css->destroy_work); + } } /** diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 3e945fcd8179..41989ab4db57 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -287,6 +287,8 @@ static struct cpuset top_cpuset = { static DEFINE_MUTEX(cpuset_mutex); static DEFINE_SPINLOCK(callback_lock); +static struct workqueue_struct *cpuset_migrate_mm_wq; + /* * CPU / memory hotplug is handled asynchronously. */ @@ -972,31 +974,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, } /* - * cpuset_migrate_mm - * - * Migrate memory region from one set of nodes to another. - * - * Temporarilly set tasks mems_allowed to target nodes of migration, - * so that the migration code can allocate pages on these nodes. - * - * While the mm_struct we are migrating is typically from some - * other task, the task_struct mems_allowed that we are hacking - * is for our current task, which must allocate new pages for that - * migrating memory region. + * Migrate memory region from one set of nodes to another. This is + * performed asynchronously as it can be called from process migration path + * holding locks involved in process management. All mm migrations are + * performed in the queued order and can be waited for by flushing + * cpuset_migrate_mm_wq. */ +struct cpuset_migrate_mm_work { + struct work_struct work; + struct mm_struct *mm; + nodemask_t from; + nodemask_t to; +}; + +static void cpuset_migrate_mm_workfn(struct work_struct *work) +{ + struct cpuset_migrate_mm_work *mwork = + container_of(work, struct cpuset_migrate_mm_work, work); + + /* on a wq worker, no need to worry about %current's mems_allowed */ + do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL); + mmput(mwork->mm); + kfree(mwork); +} + static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, const nodemask_t *to) { - struct task_struct *tsk = current; - - tsk->mems_allowed = *to; + struct cpuset_migrate_mm_work *mwork; - do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); + mwork = kzalloc(sizeof(*mwork), GFP_KERNEL); + if (mwork) { + mwork->mm = mm; + mwork->from = *from; + mwork->to = *to; + INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn); + queue_work(cpuset_migrate_mm_wq, &mwork->work); + } else { + mmput(mm); + } +} - rcu_read_lock(); - guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed); - rcu_read_unlock(); +void cpuset_post_attach_flush(void) +{ + flush_workqueue(cpuset_migrate_mm_wq); } /* @@ -1097,7 +1119,8 @@ static void update_tasks_nodemask(struct cpuset *cs) mpol_rebind_mm(mm, &cs->mems_allowed); if (migrate) cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems); - mmput(mm); + else + mmput(mm); } css_task_iter_end(&it); @@ -1545,11 +1568,11 @@ static void cpuset_attach(struct cgroup_taskset *tset) * @old_mems_allowed is the right nodesets that we * migrate mm from. */ - if (is_memory_migrate(cs)) { + if (is_memory_migrate(cs)) cpuset_migrate_mm(mm, &oldcs->old_mems_allowed, &cpuset_attach_nodemask_to); - } - mmput(mm); + else + mmput(mm); } } @@ -1714,6 +1737,7 @@ out_unlock: mutex_unlock(&cpuset_mutex); kernfs_unbreak_active_protection(of->kn); css_put(&cs->css); + flush_workqueue(cpuset_migrate_mm_wq); return retval ?: nbytes; } @@ -2359,6 +2383,9 @@ void __init cpuset_init_smp(void) top_cpuset.effective_mems = node_states[N_MEMORY]; register_hotmemory_notifier(&cpuset_track_online_nodes_nb); + + cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0); + BUG_ON(!cpuset_migrate_mm_wq); } /** diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 60ace56618f6..7537e568dabe 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -292,7 +292,7 @@ LIST_HEAD(all_lock_classes); #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) #define classhashentry(key) (classhash_table + __classhashfn((key))) -static struct list_head classhash_table[CLASSHASH_SIZE]; +static struct hlist_head classhash_table[CLASSHASH_SIZE]; /* * We put the lock dependency chains into a hash-table as well, to cache @@ -303,7 +303,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE]; #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) -static struct list_head chainhash_table[CHAINHASH_SIZE]; +static struct hlist_head chainhash_table[CHAINHASH_SIZE]; /* * The hash key of the lock dependency chains is a hash itself too: @@ -666,7 +666,7 @@ static inline struct lock_class * look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) { struct lockdep_subclass_key *key; - struct list_head *hash_head; + struct hlist_head *hash_head; struct lock_class *class; #ifdef CONFIG_DEBUG_LOCKDEP @@ -719,7 +719,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return NULL; - list_for_each_entry_rcu(class, hash_head, hash_entry) { + hlist_for_each_entry_rcu(class, hash_head, hash_entry) { if (class->key == key) { /* * Huh! same key, different name? Did someone trample @@ -742,7 +742,7 @@ static inline struct lock_class * register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) { struct lockdep_subclass_key *key; - struct list_head *hash_head; + struct hlist_head *hash_head; struct lock_class *class; DEBUG_LOCKS_WARN_ON(!irqs_disabled()); @@ -774,7 +774,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) * We have to do the hash-walk again, to avoid races * with another CPU: */ - list_for_each_entry_rcu(class, hash_head, hash_entry) { + hlist_for_each_entry_rcu(class, hash_head, hash_entry) { if (class->key == key) goto out_unlock_set; } @@ -805,7 +805,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) * We use RCU's safe list-add method to make * parallel walking of the hash-list safe: */ - list_add_tail_rcu(&class->hash_entry, hash_head); + hlist_add_head_rcu(&class->hash_entry, hash_head); /* * Add it to the global list of classes: */ @@ -2017,7 +2017,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, u64 chain_key) { struct lock_class *class = hlock_class(hlock); - struct list_head *hash_head = chainhashentry(chain_key); + struct hlist_head *hash_head = chainhashentry(chain_key); struct lock_chain *chain; struct held_lock *hlock_curr; int i, j; @@ -2033,7 +2033,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, * We can walk it lock-free, because entries only get added * to the hash: */ - list_for_each_entry_rcu(chain, hash_head, entry) { + hlist_for_each_entry_rcu(chain, hash_head, entry) { if (chain->chain_key == chain_key) { cache_hit: debug_atomic_inc(chain_lookup_hits); @@ -2057,7 +2057,7 @@ cache_hit: /* * We have to walk the chain again locked - to avoid duplicates: */ - list_for_each_entry(chain, hash_head, entry) { + hlist_for_each_entry(chain, hash_head, entry) { if (chain->chain_key == chain_key) { graph_unlock(); goto cache_hit; @@ -2091,7 +2091,7 @@ cache_hit: } chain_hlocks[chain->base + j] = class - lock_classes; } - list_add_tail_rcu(&chain->entry, hash_head); + hlist_add_head_rcu(&chain->entry, hash_head); debug_atomic_inc(chain_lookup_misses); inc_chains(); @@ -3875,7 +3875,7 @@ void lockdep_reset(void) nr_process_chains = 0; debug_locks = 1; for (i = 0; i < CHAINHASH_SIZE; i++) - INIT_LIST_HEAD(chainhash_table + i); + INIT_HLIST_HEAD(chainhash_table + i); raw_local_irq_restore(flags); } @@ -3894,7 +3894,7 @@ static void zap_class(struct lock_class *class) /* * Unhash the class and remove it from the all_lock_classes list: */ - list_del_rcu(&class->hash_entry); + hlist_del_rcu(&class->hash_entry); list_del_rcu(&class->lock_entry); RCU_INIT_POINTER(class->key, NULL); @@ -3917,7 +3917,7 @@ static inline int within(const void *addr, void *start, unsigned long size) void lockdep_free_key_range(void *start, unsigned long size) { struct lock_class *class; - struct list_head *head; + struct hlist_head *head; unsigned long flags; int i; int locked; @@ -3930,9 +3930,7 @@ void lockdep_free_key_range(void *start, unsigned long size) */ for (i = 0; i < CLASSHASH_SIZE; i++) { head = classhash_table + i; - if (list_empty(head)) - continue; - list_for_each_entry_rcu(class, head, hash_entry) { + hlist_for_each_entry_rcu(class, head, hash_entry) { if (within(class->key, start, size)) zap_class(class); else if (within(class->name, start, size)) @@ -3962,7 +3960,7 @@ void lockdep_free_key_range(void *start, unsigned long size) void lockdep_reset_lock(struct lockdep_map *lock) { struct lock_class *class; - struct list_head *head; + struct hlist_head *head; unsigned long flags; int i, j; int locked; @@ -3987,9 +3985,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) locked = graph_lock(); for (i = 0; i < CLASSHASH_SIZE; i++) { head = classhash_table + i; - if (list_empty(head)) - continue; - list_for_each_entry_rcu(class, head, hash_entry) { + hlist_for_each_entry_rcu(class, head, hash_entry) { int match = 0; for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) @@ -4027,10 +4023,10 @@ void lockdep_init(void) return; for (i = 0; i < CLASSHASH_SIZE; i++) - INIT_LIST_HEAD(classhash_table + i); + INIT_HLIST_HEAD(classhash_table + i); for (i = 0; i < CHAINHASH_SIZE; i++) - INIT_LIST_HEAD(chainhash_table + i); + INIT_HLIST_HEAD(chainhash_table + i); lockdep_initialized = 1; } diff --git a/kernel/memremap.c b/kernel/memremap.c index e517a16cb426..2c468dea60bc 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -150,7 +150,7 @@ void devm_memunmap(struct device *dev, void *addr) } EXPORT_SYMBOL(devm_memunmap); -pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags) +pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags) { return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); } @@ -183,7 +183,11 @@ EXPORT_SYMBOL(put_zone_device_page); static void pgmap_radix_release(struct resource *res) { - resource_size_t key; + resource_size_t key, align_start, align_size, align_end; + + align_start = res->start & ~(SECTION_SIZE - 1); + align_size = ALIGN(resource_size(res), SECTION_SIZE); + align_end = align_start + align_size - 1; mutex_lock(&pgmap_lock); for (key = res->start; key <= res->end; key += SECTION_SIZE) @@ -226,12 +230,11 @@ static void devm_memremap_pages_release(struct device *dev, void *data) percpu_ref_put(pgmap->ref); } - pgmap_radix_release(res); - /* pages are dead and unused, undo the arch mapping */ align_start = res->start & ~(SECTION_SIZE - 1); align_size = ALIGN(resource_size(res), SECTION_SIZE); arch_remove_memory(align_start, align_size); + pgmap_radix_release(res); dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, "%s: failed to free all reserved pages\n", __func__); } @@ -267,7 +270,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, { int is_ram = region_intersects(res->start, resource_size(res), "System RAM"); - resource_size_t key, align_start, align_size; + resource_size_t key, align_start, align_size, align_end; struct dev_pagemap *pgmap; struct page_map *page_map; unsigned long pfn; @@ -309,7 +312,10 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, mutex_lock(&pgmap_lock); error = 0; - for (key = res->start; key <= res->end; key += SECTION_SIZE) { + align_start = res->start & ~(SECTION_SIZE - 1); + align_size = ALIGN(resource_size(res), SECTION_SIZE); + align_end = align_start + align_size - 1; + for (key = align_start; key <= align_end; key += SECTION_SIZE) { struct dev_pagemap *dup; rcu_read_lock(); @@ -336,8 +342,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (nid < 0) nid = numa_mem_id(); - align_start = res->start & ~(SECTION_SIZE - 1); - align_size = ALIGN(resource_size(res), SECTION_SIZE); error = arch_add_memory(nid, align_start, align_size, true); if (error) goto err_add_memory; diff --git a/kernel/module.c b/kernel/module.c index 8358f4697c0c..9537da37ce87 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -303,6 +303,9 @@ struct load_info { struct _ddebug *debug; unsigned int num_debug; bool sig_ok; +#ifdef CONFIG_KALLSYMS + unsigned long mod_kallsyms_init_off; +#endif struct { unsigned int sym, str, mod, vers, info, pcpu; } index; @@ -2480,10 +2483,21 @@ static void layout_symtab(struct module *mod, struct load_info *info) strsect->sh_flags |= SHF_ALLOC; strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect, info->index.str) | INIT_OFFSET_MASK; - mod->init_layout.size = debug_align(mod->init_layout.size); pr_debug("\t%s\n", info->secstrings + strsect->sh_name); + + /* We'll tack temporary mod_kallsyms on the end. */ + mod->init_layout.size = ALIGN(mod->init_layout.size, + __alignof__(struct mod_kallsyms)); + info->mod_kallsyms_init_off = mod->init_layout.size; + mod->init_layout.size += sizeof(struct mod_kallsyms); + mod->init_layout.size = debug_align(mod->init_layout.size); } +/* + * We use the full symtab and strtab which layout_symtab arranged to + * be appended to the init section. Later we switch to the cut-down + * core-only ones. + */ static void add_kallsyms(struct module *mod, const struct load_info *info) { unsigned int i, ndst; @@ -2492,29 +2506,34 @@ static void add_kallsyms(struct module *mod, const struct load_info *info) char *s; Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; - mod->symtab = (void *)symsec->sh_addr; - mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); + /* Set up to point into init section. */ + mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off; + + mod->kallsyms->symtab = (void *)symsec->sh_addr; + mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym); /* Make sure we get permanent strtab: don't use info->strtab. */ - mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; + mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr; /* Set types up while we still have access to sections. */ - for (i = 0; i < mod->num_symtab; i++) - mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); - - mod->core_symtab = dst = mod->core_layout.base + info->symoffs; - mod->core_strtab = s = mod->core_layout.base + info->stroffs; - src = mod->symtab; - for (ndst = i = 0; i < mod->num_symtab; i++) { + for (i = 0; i < mod->kallsyms->num_symtab; i++) + mod->kallsyms->symtab[i].st_info + = elf_type(&mod->kallsyms->symtab[i], info); + + /* Now populate the cut down core kallsyms for after init. */ + mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs; + mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs; + src = mod->kallsyms->symtab; + for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) { if (i == 0 || is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum, info->index.pcpu)) { dst[ndst] = src[i]; - dst[ndst++].st_name = s - mod->core_strtab; - s += strlcpy(s, &mod->strtab[src[i].st_name], + dst[ndst++].st_name = s - mod->core_kallsyms.strtab; + s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name], KSYM_NAME_LEN) + 1; } } - mod->core_num_syms = ndst; + mod->core_kallsyms.num_symtab = ndst; } #else static inline void layout_symtab(struct module *mod, struct load_info *info) @@ -3263,9 +3282,8 @@ static noinline int do_init_module(struct module *mod) module_put(mod); trim_init_extable(mod); #ifdef CONFIG_KALLSYMS - mod->num_symtab = mod->core_num_syms; - mod->symtab = mod->core_symtab; - mod->strtab = mod->core_strtab; + /* Switch to core kallsyms now init is done: kallsyms may be walking! */ + rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms); #endif mod_tree_remove_init(mod); disable_ro_nx(&mod->init_layout); @@ -3496,7 +3514,7 @@ static int load_module(struct load_info *info, const char __user *uargs, /* Module is ready to execute: parsing args may do that. */ after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, - -32768, 32767, NULL, + -32768, 32767, mod, unknown_module_param_cb); if (IS_ERR(after_dashes)) { err = PTR_ERR(after_dashes); @@ -3627,6 +3645,11 @@ static inline int is_arm_mapping_symbol(const char *str) && (str[2] == '\0' || str[2] == '.'); } +static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum) +{ + return kallsyms->strtab + kallsyms->symtab[symnum].st_name; +} + static const char *get_ksymbol(struct module *mod, unsigned long addr, unsigned long *size, @@ -3634,6 +3657,7 @@ static const char *get_ksymbol(struct module *mod, { unsigned int i, best = 0; unsigned long nextval; + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); /* At worse, next value is at end of module */ if (within_module_init(addr, mod)) @@ -3643,32 +3667,32 @@ static const char *get_ksymbol(struct module *mod, /* Scan for closest preceding symbol, and next symbol. (ELF starts real symbols at 1). */ - for (i = 1; i < mod->num_symtab; i++) { - if (mod->symtab[i].st_shndx == SHN_UNDEF) + for (i = 1; i < kallsyms->num_symtab; i++) { + if (kallsyms->symtab[i].st_shndx == SHN_UNDEF) continue; /* We ignore unnamed symbols: they're uninformative * and inserted at a whim. */ - if (mod->symtab[i].st_value <= addr - && mod->symtab[i].st_value > mod->symtab[best].st_value - && *(mod->strtab + mod->symtab[i].st_name) != '\0' - && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) + if (*symname(kallsyms, i) == '\0' + || is_arm_mapping_symbol(symname(kallsyms, i))) + continue; + + if (kallsyms->symtab[i].st_value <= addr + && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value) best = i; - if (mod->symtab[i].st_value > addr - && mod->symtab[i].st_value < nextval - && *(mod->strtab + mod->symtab[i].st_name) != '\0' - && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) - nextval = mod->symtab[i].st_value; + if (kallsyms->symtab[i].st_value > addr + && kallsyms->symtab[i].st_value < nextval) + nextval = kallsyms->symtab[i].st_value; } if (!best) return NULL; if (size) - *size = nextval - mod->symtab[best].st_value; + *size = nextval - kallsyms->symtab[best].st_value; if (offset) - *offset = addr - mod->symtab[best].st_value; - return mod->strtab + mod->symtab[best].st_name; + *offset = addr - kallsyms->symtab[best].st_value; + return symname(kallsyms, best); } /* For kallsyms to ask for address resolution. NULL means not found. Careful @@ -3758,19 +3782,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { + struct mod_kallsyms *kallsyms; + if (mod->state == MODULE_STATE_UNFORMED) continue; - if (symnum < mod->num_symtab) { - *value = mod->symtab[symnum].st_value; - *type = mod->symtab[symnum].st_info; - strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, - KSYM_NAME_LEN); + kallsyms = rcu_dereference_sched(mod->kallsyms); + if (symnum < kallsyms->num_symtab) { + *value = kallsyms->symtab[symnum].st_value; + *type = kallsyms->symtab[symnum].st_info; + strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN); strlcpy(module_name, mod->name, MODULE_NAME_LEN); *exported = is_exported(name, *value, mod); preempt_enable(); return 0; } - symnum -= mod->num_symtab; + symnum -= kallsyms->num_symtab; } preempt_enable(); return -ERANGE; @@ -3779,11 +3805,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, static unsigned long mod_find_symname(struct module *mod, const char *name) { unsigned int i; + struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms); - for (i = 0; i < mod->num_symtab; i++) - if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 && - mod->symtab[i].st_info != 'U') - return mod->symtab[i].st_value; + for (i = 0; i < kallsyms->num_symtab; i++) + if (strcmp(name, symname(kallsyms, i)) == 0 && + kallsyms->symtab[i].st_info != 'U') + return kallsyms->symtab[i].st_value; return 0; } @@ -3822,11 +3849,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, module_assert_mutex(); list_for_each_entry(mod, &modules, list) { + /* We hold module_mutex: no need for rcu_dereference_sched */ + struct mod_kallsyms *kallsyms = mod->kallsyms; + if (mod->state == MODULE_STATE_UNFORMED) continue; - for (i = 0; i < mod->num_symtab; i++) { - ret = fn(data, mod->strtab + mod->symtab[i].st_name, - mod, mod->symtab[i].st_value); + for (i = 0; i < kallsyms->num_symtab; i++) { + ret = fn(data, symname(kallsyms, i), + mod, kallsyms->symtab[i].st_value); if (ret != 0) return ret; } diff --git a/kernel/signal.c b/kernel/signal.c index f3f1f7a972fd..0508544c8ced 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3508,8 +3508,10 @@ static int sigsuspend(sigset_t *set) current->saved_sigmask = current->blocked; set_current_blocked(set); - __set_current_state(TASK_INTERRUPTIBLE); - schedule(); + while (!signal_pending(current)) { + __set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } set_restore_sigmask(); return -ERESTARTNOHAND; } diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index dda9e6742950..202df6cffcca 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -126,6 +126,13 @@ check_stack(unsigned long ip, unsigned long *stack) } /* + * Some archs may not have the passed in ip in the dump. + * If that happens, we need to show everything. + */ + if (i == stack_trace_max.nr_entries) + i = 0; + + /* * Now find where in the stack these are. */ x = 0; diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 61a0264e28f9..7ff5dc7d2ac5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -301,7 +301,23 @@ static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ static LIST_HEAD(workqueues); /* PR: list of all workqueues */ static bool workqueue_freezing; /* PL: have wqs started freezing? */ -static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */ +/* PL: allowable cpus for unbound wqs and work items */ +static cpumask_var_t wq_unbound_cpumask; + +/* CPU where unbound work was last round robin scheduled from this CPU */ +static DEFINE_PER_CPU(int, wq_rr_cpu_last); + +/* + * Local execution of unbound work items is no longer guaranteed. The + * following always forces round-robin CPU selection on unbound work items + * to uncover usages which depend on it. + */ +#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU +static bool wq_debug_force_rr_cpu = true; +#else +static bool wq_debug_force_rr_cpu = false; +#endif +module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); /* the per-cpu worker pools */ static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], @@ -570,6 +586,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, int node) { assert_rcu_or_wq_mutex_or_pool_mutex(wq); + + /* + * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a + * delayed item is pending. The plan is to keep CPU -> NODE + * mapping valid and stable across CPU on/offlines. Once that + * happens, this workaround can be removed. + */ + if (unlikely(node == NUMA_NO_NODE)) + return wq->dfl_pwq; + return rcu_dereference_raw(wq->numa_pwq_tbl[node]); } @@ -1298,6 +1324,39 @@ static bool is_chained_work(struct workqueue_struct *wq) return worker && worker->current_pwq->wq == wq; } +/* + * When queueing an unbound work item to a wq, prefer local CPU if allowed + * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to + * avoid perturbing sensitive tasks. + */ +static int wq_select_unbound_cpu(int cpu) +{ + static bool printed_dbg_warning; + int new_cpu; + + if (likely(!wq_debug_force_rr_cpu)) { + if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) + return cpu; + } else if (!printed_dbg_warning) { + pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n"); + printed_dbg_warning = true; + } + + if (cpumask_empty(wq_unbound_cpumask)) + return cpu; + + new_cpu = __this_cpu_read(wq_rr_cpu_last); + new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask); + if (unlikely(new_cpu >= nr_cpu_ids)) { + new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask); + if (unlikely(new_cpu >= nr_cpu_ids)) + return cpu; + } + __this_cpu_write(wq_rr_cpu_last, new_cpu); + + return new_cpu; +} + static void __queue_work(int cpu, struct workqueue_struct *wq, struct work_struct *work) { @@ -1323,7 +1382,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, return; retry: if (req_cpu == WORK_CPU_UNBOUND) - cpu = raw_smp_processor_id(); + cpu = wq_select_unbound_cpu(raw_smp_processor_id()); /* pwq which will be used unless @work is executing elsewhere */ if (!(wq->flags & WQ_UNBOUND)) @@ -1464,13 +1523,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, timer_stats_timer_set_start_info(&dwork->timer); dwork->wq = wq; - /* timer isn't guaranteed to run in this cpu, record earlier */ - if (cpu == WORK_CPU_UNBOUND) - cpu = raw_smp_processor_id(); dwork->cpu = cpu; timer->expires = jiffies + delay; - add_timer_on(timer, cpu); + if (unlikely(cpu != WORK_CPU_UNBOUND)) + add_timer_on(timer, cpu); + else + add_timer(timer); } /** @@ -2355,7 +2414,8 @@ static void check_flush_dependency(struct workqueue_struct *target_wq, WARN_ONCE(current->flags & PF_MEMALLOC, "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf", current->pid, current->comm, target_wq->name, target_func); - WARN_ONCE(worker && (worker->current_pwq->wq->flags & WQ_MEM_RECLAIM), + WARN_ONCE(worker && ((worker->current_pwq->wq->flags & + (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM), "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf", worker->current_pwq->wq->name, worker->current_func, target_wq->name, target_func); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index ecb9e75614bf..8bfd1aca7a3d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1400,6 +1400,21 @@ config RCU_EQS_DEBUG endmenu # "RCU Debugging" +config DEBUG_WQ_FORCE_RR_CPU + bool "Force round-robin CPU selection for unbound work items" + depends on DEBUG_KERNEL + default n + help + Workqueue used to implicitly guarantee that work items queued + without explicit CPU specified are put on the local CPU. This + guarantee is no longer true and while local CPU is still + preferred work items may be put on foreign CPUs. Kernel + parameter "workqueue.debug_force_rr_cpu" is added to force + round-robin CPU selection to flush out usages which depend on the + now broken guarantee. This config option enables the debug + feature by default. When enabled, memory and cache locality will + be impacted. + config DEBUG_BLOCK_EXT_DEVT bool "Force extended block device numbers and spread them" depends on DEBUG_KERNEL diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 49518fb48cab..e07c1ba9ba13 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -18,6 +18,8 @@ config UBSAN_SANITIZE_ALL This option activates instrumentation for the entire kernel. If you don't enable this option, you have to explicitly specify UBSAN_SANITIZE := y for the files/directories you want to check for UB. + Enabling this option will get kernel image size increased + significantly. config UBSAN_ALIGNMENT bool "Enable checking of pointers alignment" @@ -25,5 +27,5 @@ config UBSAN_ALIGNMENT default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS help This option enables detection of unaligned memory accesses. - Enabling this option on architectures that support unalligned + Enabling this option on architectures that support unaligned accesses may produce a lot of false positives. diff --git a/lib/dump_stack.c b/lib/dump_stack.c index 6745c6230db3..c30d07e99dba 100644 --- a/lib/dump_stack.c +++ b/lib/dump_stack.c @@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1); asmlinkage __visible void dump_stack(void) { + unsigned long flags; int was_locked; int old; int cpu; @@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void) * Permit this cpu to perform nested stack dumps while serialising * against other CPUs */ - preempt_disable(); - retry: + local_irq_save(flags); cpu = smp_processor_id(); old = atomic_cmpxchg(&dump_lock, -1, cpu); if (old == -1) { @@ -43,6 +43,7 @@ retry: } else if (old == cpu) { was_locked = 1; } else { + local_irq_restore(flags); cpu_relax(); goto retry; } @@ -52,7 +53,7 @@ retry: if (!was_locked) atomic_set(&dump_lock, -1); - preempt_enable(); + local_irq_restore(flags); } #else asmlinkage __visible void dump_stack(void) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index fcf5d98574ce..6b79e9026e24 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1019,9 +1019,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, return 0; radix_tree_for_each_slot(slot, root, &iter, first_index) { - results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); + results[ret] = rcu_dereference_raw(*slot); if (!results[ret]) continue; + if (radix_tree_is_indirect_ptr(results[ret])) { + slot = radix_tree_iter_retry(&iter); + continue; + } if (++ret == max_items) break; } @@ -1098,9 +1102,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, return 0; radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { - results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot)); + results[ret] = rcu_dereference_raw(*slot); if (!results[ret]) continue; + if (radix_tree_is_indirect_ptr(results[ret])) { + slot = radix_tree_iter_retry(&iter); + continue; + } if (++ret == max_items) break; } diff --git a/lib/scatterlist.c b/lib/scatterlist.c index bafa9933fa76..004fc70fc56a 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -598,9 +598,9 @@ EXPORT_SYMBOL(sg_miter_next); * * Description: * Stops mapping iterator @miter. @miter should have been started - * started using sg_miter_start(). A stopped iteration can be - * resumed by calling sg_miter_next() on it. This is useful when - * resources (kmap) need to be released during iteration. + * using sg_miter_start(). A stopped iteration can be resumed by + * calling sg_miter_next() on it. This is useful when resources (kmap) + * need to be released during iteration. * * Context: * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c index 98866a770770..25b5cbfb7615 100644 --- a/lib/test-string_helpers.c +++ b/lib/test-string_helpers.c @@ -327,36 +327,67 @@ out: } #define string_get_size_maxbuf 16 -#define test_string_get_size_one(size, blk_size, units, exp_result) \ +#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \ do { \ - BUILD_BUG_ON(sizeof(exp_result) >= string_get_size_maxbuf); \ - __test_string_get_size((size), (blk_size), (units), \ - (exp_result)); \ + BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \ + BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \ + __test_string_get_size((size), (blk_size), (exp_result10), \ + (exp_result2)); \ } while (0) -static __init void __test_string_get_size(const u64 size, const u64 blk_size, - const enum string_size_units units, - const char *exp_result) +static __init void test_string_get_size_check(const char *units, + const char *exp, + char *res, + const u64 size, + const u64 blk_size) { - char buf[string_get_size_maxbuf]; - - string_get_size(size, blk_size, units, buf, sizeof(buf)); - if (!memcmp(buf, exp_result, strlen(exp_result) + 1)) + if (!memcmp(res, exp, strlen(exp) + 1)) return; - buf[sizeof(buf) - 1] = '\0'; - pr_warn("Test 'test_string_get_size_one' failed!\n"); - pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %d\n", + res[string_get_size_maxbuf - 1] = '\0'; + + pr_warn("Test 'test_string_get_size' failed!\n"); + pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n", size, blk_size, units); - pr_warn("expected: '%s', got '%s'\n", exp_result, buf); + pr_warn("expected: '%s', got '%s'\n", exp, res); +} + +static __init void __test_string_get_size(const u64 size, const u64 blk_size, + const char *exp_result10, + const char *exp_result2) +{ + char buf10[string_get_size_maxbuf]; + char buf2[string_get_size_maxbuf]; + + string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10)); + string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2)); + + test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10, + size, blk_size); + + test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2, + size, blk_size); } static __init void test_string_get_size(void) { - test_string_get_size_one(16384, 512, STRING_UNITS_2, "8.00 MiB"); - test_string_get_size_one(8192, 4096, STRING_UNITS_10, "32.7 MB"); - test_string_get_size_one(1, 512, STRING_UNITS_10, "512 B"); + /* small values */ + test_string_get_size_one(0, 512, "0 B", "0 B"); + test_string_get_size_one(1, 512, "512 B", "512 B"); + test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB"); + + /* normal values */ + test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB"); + test_string_get_size_one(500118192, 512, "256 GB", "238 GiB"); + test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB"); + + /* weird block sizes */ + test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB"); + + /* huge values */ + test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB"); + test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB"); } static int __init test_string_helpers_init(void) diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 48ff9c36644d..f44e178e6ede 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -1590,22 +1590,23 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, return buf; } case 'K': - /* - * %pK cannot be used in IRQ context because its test - * for CAP_SYSLOG would be meaningless. - */ - if (kptr_restrict && (in_irq() || in_serving_softirq() || - in_nmi())) { - if (spec.field_width == -1) - spec.field_width = default_width; - return string(buf, end, "pK-error", spec); - } - switch (kptr_restrict) { case 0: /* Always print %pK values */ break; case 1: { + const struct cred *cred; + + /* + * kptr_restrict==1 cannot be used in IRQ context + * because its test for CAP_SYSLOG would be meaningless. + */ + if (in_irq() || in_serving_softirq() || in_nmi()) { + if (spec.field_width == -1) + spec.field_width = default_width; + return string(buf, end, "pK-error", spec); + } + /* * Only print the real pointer value if the current * process has CAP_SYSLOG and is running with the @@ -1615,8 +1616,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, * leak pointer values if a binary opens a file using * %pK and then elevates privileges before reading it. */ - const struct cred *cred = current_cred(); - + cred = current_cred(); if (!has_capability_noaudit(current, CAP_SYSLOG) || !uid_eq(cred->euid, cred->uid) || !gid_eq(cred->egid, cred->gid)) diff --git a/mm/Kconfig b/mm/Kconfig index 97a4e06b15c0..03cbfa072f42 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -624,7 +624,7 @@ config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT bool config DEFERRED_STRUCT_PAGE_INIT - bool "Defer initialisation of struct pages to kswapd" + bool "Defer initialisation of struct pages to kthreads" default n depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT depends on MEMORY_HOTPLUG @@ -633,9 +633,10 @@ config DEFERRED_STRUCT_PAGE_INIT single thread. On very large machines this can take a considerable amount of time. If this option is set, large machines will bring up a subset of memmap at boot and then initialise the rest in parallel - when kswapd starts. This has a potential performance impact on - processes running early in the lifetime of the systemm until kswapd - finishes the initialisation. + by starting one-off "pgdatinitX" kernel thread for each node X. This + has a potential performance impact on processes running early in the + lifetime of the system until these kthreads finish the + initialisation. config IDLE_PAGE_TRACKING bool "Enable idle page tracking" diff --git a/mm/backing-dev.c b/mm/backing-dev.c index cc5d29d2da9b..c554d173a65f 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -328,7 +328,7 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, return 0; out_destroy_stat: - while (--i) + while (i--) percpu_counter_destroy(&wb->stat[i]); fprop_local_destroy_percpu(&wb->completions); out_put_cong: @@ -989,7 +989,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout) * here rather than calling cond_resched(). */ if (current->flags & PF_WQ_WORKER) - schedule_timeout(1); + schedule_timeout_uninterruptible(1); else cond_resched(); diff --git a/mm/filemap.c b/mm/filemap.c index bc943867d68c..23edccecadb0 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1890,6 +1890,7 @@ EXPORT_SYMBOL(generic_file_read_iter); * page_cache_read - adds requested page to the page cache if not already there * @file: file to read * @offset: page index + * @gfp_mask: memory allocation flags * * This adds the requested page to the page cache if it isn't already there, * and schedules an I/O to read in its contents from disk. @@ -430,10 +430,8 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) * Anon pages in shared mappings are surprising: now * just reject it. */ - if (!is_cow_mapping(vm_flags)) { - WARN_ON_ONCE(vm_flags & VM_MAYWRITE); + if (!is_cow_mapping(vm_flags)) return -EFAULT; - } } } else if (!(vm_flags & VM_READ)) { if (!(gup_flags & FOLL_FORCE)) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index fd3a07b3e6f4..08fc0ba2207e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -138,9 +138,6 @@ static struct khugepaged_scan khugepaged_scan = { .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), }; -static DEFINE_SPINLOCK(split_queue_lock); -static LIST_HEAD(split_queue); -static unsigned long split_queue_len; static struct shrinker deferred_split_shrinker; static void set_recommended_min_free_kbytes(void) @@ -861,7 +858,8 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, return false; entry = mk_pmd(zero_page, vma->vm_page_prot); entry = pmd_mkhuge(entry); - pgtable_trans_huge_deposit(mm, pmd, pgtable); + if (pgtable) + pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, haddr, pmd, entry); atomic_long_inc(&mm->nr_ptes); return true; @@ -1039,13 +1037,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, spinlock_t *dst_ptl, *src_ptl; struct page *src_page; pmd_t pmd; - pgtable_t pgtable; + pgtable_t pgtable = NULL; int ret; - ret = -ENOMEM; - pgtable = pte_alloc_one(dst_mm, addr); - if (unlikely(!pgtable)) - goto out; + if (!vma_is_dax(vma)) { + ret = -ENOMEM; + pgtable = pte_alloc_one(dst_mm, addr); + if (unlikely(!pgtable)) + goto out; + } dst_ptl = pmd_lock(dst_mm, dst_pmd); src_ptl = pmd_lockptr(src_mm, src_pmd); @@ -1076,7 +1076,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, goto out_unlock; } - if (pmd_trans_huge(pmd)) { + if (!vma_is_dax(vma)) { /* thp accounting separate from pmd_devmap accounting */ src_page = pmd_page(pmd); VM_BUG_ON_PAGE(!PageHead(src_page), src_page); @@ -3358,6 +3358,7 @@ int total_mapcount(struct page *page) int split_huge_page_to_list(struct page *page, struct list_head *list) { struct page *head = compound_head(page); + struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); struct anon_vma *anon_vma; int count, mapcount, ret; bool mlocked; @@ -3401,19 +3402,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) lru_add_drain(); /* Prevent deferred_split_scan() touching ->_count */ - spin_lock_irqsave(&split_queue_lock, flags); + spin_lock_irqsave(&pgdata->split_queue_lock, flags); count = page_count(head); mapcount = total_mapcount(head); if (!mapcount && count == 1) { if (!list_empty(page_deferred_list(head))) { - split_queue_len--; + pgdata->split_queue_len--; list_del(page_deferred_list(head)); } - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); __split_huge_page(page, list); ret = 0; } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) { - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); pr_alert("total_mapcount: %u, page_count(): %u\n", mapcount, count); if (PageTail(page)) @@ -3421,7 +3422,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) dump_page(page, "total_mapcount(head) > 0"); BUG(); } else { - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); unfreeze_page(anon_vma, head); ret = -EBUSY; } @@ -3436,64 +3437,65 @@ out: void free_transhuge_page(struct page *page) { + struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); unsigned long flags; - spin_lock_irqsave(&split_queue_lock, flags); + spin_lock_irqsave(&pgdata->split_queue_lock, flags); if (!list_empty(page_deferred_list(page))) { - split_queue_len--; + pgdata->split_queue_len--; list_del(page_deferred_list(page)); } - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); free_compound_page(page); } void deferred_split_huge_page(struct page *page) { + struct pglist_data *pgdata = NODE_DATA(page_to_nid(page)); unsigned long flags; VM_BUG_ON_PAGE(!PageTransHuge(page), page); - spin_lock_irqsave(&split_queue_lock, flags); + spin_lock_irqsave(&pgdata->split_queue_lock, flags); if (list_empty(page_deferred_list(page))) { - list_add_tail(page_deferred_list(page), &split_queue); - split_queue_len++; + list_add_tail(page_deferred_list(page), &pgdata->split_queue); + pgdata->split_queue_len++; } - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); } static unsigned long deferred_split_count(struct shrinker *shrink, struct shrink_control *sc) { - /* - * Split a page from split_queue will free up at least one page, - * at most HPAGE_PMD_NR - 1. We don't track exact number. - * Let's use HPAGE_PMD_NR / 2 as ballpark. - */ - return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2; + struct pglist_data *pgdata = NODE_DATA(sc->nid); + return ACCESS_ONCE(pgdata->split_queue_len); } static unsigned long deferred_split_scan(struct shrinker *shrink, struct shrink_control *sc) { + struct pglist_data *pgdata = NODE_DATA(sc->nid); unsigned long flags; LIST_HEAD(list), *pos, *next; struct page *page; int split = 0; - spin_lock_irqsave(&split_queue_lock, flags); - list_splice_init(&split_queue, &list); - + spin_lock_irqsave(&pgdata->split_queue_lock, flags); /* Take pin on all head pages to avoid freeing them under us */ - list_for_each_safe(pos, next, &list) { + list_for_each_safe(pos, next, &pgdata->split_queue) { page = list_entry((void *)pos, struct page, mapping); page = compound_head(page); - /* race with put_compound_page() */ - if (!get_page_unless_zero(page)) { + if (get_page_unless_zero(page)) { + list_move(page_deferred_list(page), &list); + } else { + /* We lost race with put_compound_page() */ list_del_init(page_deferred_list(page)); - split_queue_len--; + pgdata->split_queue_len--; } + if (!--sc->nr_to_scan) + break; } - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); list_for_each_safe(pos, next, &list) { page = list_entry((void *)pos, struct page, mapping); @@ -3505,17 +3507,24 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, put_page(page); } - spin_lock_irqsave(&split_queue_lock, flags); - list_splice_tail(&list, &split_queue); - spin_unlock_irqrestore(&split_queue_lock, flags); + spin_lock_irqsave(&pgdata->split_queue_lock, flags); + list_splice_tail(&list, &pgdata->split_queue); + spin_unlock_irqrestore(&pgdata->split_queue_lock, flags); - return split * HPAGE_PMD_NR / 2; + /* + * Stop shrinker if we didn't split any page, but the queue is empty. + * This can happen if pages were freed under us. + */ + if (!split && list_empty(&pgdata->split_queue)) + return SHRINK_STOP; + return split; } static struct shrinker deferred_split_shrinker = { .count_objects = deferred_split_count, .scan_objects = deferred_split_scan, .seeks = DEFAULT_SEEKS, + .flags = SHRINKER_NUMA_AWARE, }; #ifdef CONFIG_DEBUG_FS diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 12908dcf5831..06ae13e869d0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1001,7 +1001,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed) ((node = hstate_next_node_to_free(hs, mask)) || 1); \ nr_nodes--) -#if defined(CONFIG_CMA) && defined(CONFIG_X86_64) +#if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)) static void destroy_compound_gigantic_page(struct page *page, unsigned int order) { @@ -1214,8 +1214,8 @@ void free_huge_page(struct page *page) set_page_private(page, 0); page->mapping = NULL; - BUG_ON(page_count(page)); - BUG_ON(page_mapcount(page)); + VM_BUG_ON_PAGE(page_count(page), page); + VM_BUG_ON_PAGE(page_mapcount(page), page); restore_reserve = PagePrivate(page); ClearPagePrivate(page); @@ -1286,6 +1286,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order) set_page_count(p, 0); set_compound_head(p, page); } + atomic_set(compound_mapcount_ptr(page), -1); } /* diff --git a/mm/internal.h b/mm/internal.h index ed8b5ffcf9b1..a38a21ebddb4 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -216,6 +216,37 @@ static inline bool is_cow_mapping(vm_flags_t flags) return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; } +/* + * These three helpers classifies VMAs for virtual memory accounting. + */ + +/* + * Executable code area - executable, not writable, not stack + */ +static inline bool is_exec_mapping(vm_flags_t flags) +{ + return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC; +} + +/* + * Stack area - atomatically grows in one direction + * + * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous: + * do_mmap() forbids all other combinations. + */ +static inline bool is_stack_mapping(vm_flags_t flags) +{ + return (flags & VM_STACK) == VM_STACK; +} + +/* + * Data area - private, writable, not stack + */ +static inline bool is_data_mapping(vm_flags_t flags) +{ + return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE; +} + /* mm/util.c */ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent); diff --git a/mm/memblock.c b/mm/memblock.c index d2ed81e59a94..dd7989929f13 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1448,7 +1448,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) * Remaining API functions */ -phys_addr_t __init memblock_phys_mem_size(void) +phys_addr_t __init_memblock memblock_phys_mem_size(void) { return memblock.memory.total_size; } diff --git a/mm/memory.c b/mm/memory.c index 30991f83d0bf..635451abc8f7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1591,10 +1591,15 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP * without pte special, it would there be refcounted as a normal page. */ - if (!HAVE_PTE_SPECIAL && pfn_t_valid(pfn)) { + if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) { struct page *page; - page = pfn_t_to_page(pfn); + /* + * At this point we are committed to insert_page() + * regardless of whether the caller specified flags that + * result in pfn_t_has_page() == false. + */ + page = pfn_to_page(pfn_t_to_pfn(pfn)); return insert_page(vma, addr, page, vma->vm_page_prot); } return insert_pfn(vma, addr, pfn, vma->vm_page_prot); @@ -2232,11 +2237,6 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, page_cache_get(old_page); - /* - * Only catch write-faults on shared writable pages, - * read-only shared pages can get COWed by - * get_user_pages(.write=1, .force=1). - */ if (vma->vm_ops && vma->vm_ops->page_mkwrite) { int tmp; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 27d135408a22..4c4187c0e1de 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -548,8 +548,7 @@ retry: goto retry; } - if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) - migrate_page_add(page, qp->pagelist, flags); + migrate_page_add(page, qp->pagelist, flags); } pte_unmap_unlock(pte - 1, ptl); cond_resched(); @@ -625,7 +624,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, unsigned long endvma = vma->vm_end; unsigned long flags = qp->flags; - if (vma->vm_flags & VM_PFNMAP) + if (!vma_migratable(vma)) return 1; if (endvma > end) @@ -644,16 +643,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end, if (flags & MPOL_MF_LAZY) { /* Similar to task_numa_work, skip inaccessible VMAs */ - if (vma_migratable(vma) && - vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) + if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) change_prot_numa(vma, start, endvma); return 1; } - if ((flags & MPOL_MF_STRICT) || - ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) && - vma_migratable(vma))) - /* queue pages from current vma */ + /* queue pages from current vma */ + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) return 0; return 1; } diff --git a/mm/mmap.c b/mm/mmap.c index 84b12624ceb0..2f2415a7a688 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -42,6 +42,7 @@ #include <linux/memory.h> #include <linux/printk.h> #include <linux/userfaultfd_k.h> +#include <linux/moduleparam.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> @@ -69,6 +70,8 @@ const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX; int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS; #endif +static bool ignore_rlimit_data = true; +core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644); static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, @@ -387,8 +390,9 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma) } #ifdef CONFIG_DEBUG_VM_RB -static int browse_rb(struct rb_root *root) +static int browse_rb(struct mm_struct *mm) { + struct rb_root *root = &mm->mm_rb; int i = 0, j, bug = 0; struct rb_node *nd, *pn = NULL; unsigned long prev = 0, pend = 0; @@ -411,12 +415,14 @@ static int browse_rb(struct rb_root *root) vma->vm_start, vma->vm_end); bug = 1; } + spin_lock(&mm->page_table_lock); if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { pr_emerg("free gap %lx, correct %lx\n", vma->rb_subtree_gap, vma_compute_subtree_gap(vma)); bug = 1; } + spin_unlock(&mm->page_table_lock); i++; pn = nd; prev = vma->vm_start; @@ -453,12 +459,16 @@ static void validate_mm(struct mm_struct *mm) struct vm_area_struct *vma = mm->mmap; while (vma) { + struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; - vma_lock_anon_vma(vma); - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) - anon_vma_interval_tree_verify(avc); - vma_unlock_anon_vma(vma); + if (anon_vma) { + anon_vma_lock_read(anon_vma); + list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) + anon_vma_interval_tree_verify(avc); + anon_vma_unlock_read(anon_vma); + } + highest_address = vma->vm_end; vma = vma->vm_next; i++; @@ -472,7 +482,7 @@ static void validate_mm(struct mm_struct *mm) mm->highest_vm_end, highest_address); bug = 1; } - i = browse_rb(&mm->mm_rb); + i = browse_rb(mm); if (i != mm->map_count) { if (i != -1) pr_emerg("map_count %d rb %d\n", mm->map_count, i); @@ -2139,32 +2149,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns int expand_upwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; - int error; + int error = 0; if (!(vma->vm_flags & VM_GROWSUP)) return -EFAULT; - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ + /* Guard against wrapping around to address 0. */ + if (address < PAGE_ALIGN(address+4)) + address = PAGE_ALIGN(address+4); + else + return -ENOMEM; + + /* We must make sure the anon_vma is allocated. */ if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; - vma_lock_anon_vma(vma); /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. - * Also guard against wrapping around to address 0. */ - if (address < PAGE_ALIGN(address+4)) - address = PAGE_ALIGN(address+4); - else { - vma_unlock_anon_vma(vma); - return -ENOMEM; - } - error = 0; + anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address > vma->vm_end) { @@ -2182,7 +2187,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. - * vma_lock_anon_vma() doesn't help here, as + * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard @@ -2205,7 +2210,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) } } } - vma_unlock_anon_vma(vma); + anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; @@ -2221,25 +2226,21 @@ int expand_downwards(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; int error; - /* - * We must make sure the anon_vma is allocated - * so that the anon_vma locking is not a noop. - */ - if (unlikely(anon_vma_prepare(vma))) - return -ENOMEM; - address &= PAGE_MASK; error = security_mmap_addr(address); if (error) return error; - vma_lock_anon_vma(vma); + /* We must make sure the anon_vma is allocated. */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; /* * vma->vm_start/vm_end cannot change under us because the caller * is required to hold the mmap_sem in read mode. We need the * anon_vma lock to serialize against concurrent expand_stacks. */ + anon_vma_lock_write(vma->anon_vma); /* Somebody else might have raced and expanded it already */ if (address < vma->vm_start) { @@ -2257,7 +2258,7 @@ int expand_downwards(struct vm_area_struct *vma, * updates, but we only hold a shared mmap_sem * lock here, so we need to protect against * concurrent vma expansions. - * vma_lock_anon_vma() doesn't help here, as + * anon_vma_lock_write() doesn't help here, as * we don't guarantee that all growable vmas * in a mm share the same root anon vma. * So, we reuse mm->page_table_lock to guard @@ -2278,7 +2279,7 @@ int expand_downwards(struct vm_area_struct *vma, } } } - vma_unlock_anon_vma(vma); + anon_vma_unlock_write(vma->anon_vma); khugepaged_enter_vma_merge(vma, vma->vm_flags); validate_mm(mm); return error; @@ -2982,9 +2983,17 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages) if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) return false; - if ((flags & (VM_WRITE | VM_SHARED | (VM_STACK_FLAGS & - (VM_GROWSUP | VM_GROWSDOWN)))) == VM_WRITE) - return mm->data_vm + npages <= rlimit(RLIMIT_DATA); + if (is_data_mapping(flags) && + mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { + if (ignore_rlimit_data) + pr_warn_once("%s (%d): VmData %lu exceed data ulimit " + "%lu. Will be forbidden soon.\n", + current->comm, current->pid, + (mm->data_vm + npages) << PAGE_SHIFT, + rlimit(RLIMIT_DATA)); + else + return false; + } return true; } @@ -2993,11 +3002,11 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages) { mm->total_vm += npages; - if ((flags & (VM_EXEC | VM_WRITE)) == VM_EXEC) + if (is_exec_mapping(flags)) mm->exec_vm += npages; - else if (flags & (VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN))) + else if (is_stack_mapping(flags)) mm->stack_vm += npages; - else if ((flags & (VM_WRITE | VM_SHARED)) == VM_WRITE) + else if (is_data_mapping(flags)) mm->data_vm += npages; } diff --git a/mm/mprotect.c b/mm/mprotect.c index 8eb7bb40dc40..f7cb3d4d9c2e 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -160,9 +160,11 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, } if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { - if (next - addr != HPAGE_PMD_SIZE) + if (next - addr != HPAGE_PMD_SIZE) { split_huge_pmd(vma, pmd, addr); - else { + if (pmd_none(*pmd)) + continue; + } else { int nr_ptes = change_huge_pmd(vma, pmd, addr, newprot, prot_numa); diff --git a/mm/mremap.c b/mm/mremap.c index d77946a997f7..8eeba02fc991 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -210,6 +210,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, } } split_huge_pmd(vma, old_pmd, old_addr); + if (pmd_none(*old_pmd)) + continue; VM_BUG_ON(pmd_trans_huge(*old_pmd)); } if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 63358d9f9aa9..838ca8bb64f7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5210,6 +5210,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat) pgdat->numabalancing_migrate_nr_pages = 0; pgdat->numabalancing_migrate_next_window = jiffies; #endif +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + spin_lock_init(&pgdat->split_queue_lock); + INIT_LIST_HEAD(&pgdat->split_queue); + pgdat->split_queue_len = 0; +#endif init_waitqueue_head(&pgdat->kswapd_wait); init_waitqueue_head(&pgdat->pfmemalloc_wait); pgdat_page_ext_init(pgdat); @@ -6615,7 +6620,7 @@ bool is_pageblock_removable_nolock(struct page *page) return !has_unmovable_pages(zone, page, 0, true); } -#ifdef CONFIG_CMA +#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA) static unsigned long pfn_max_align_down(unsigned long pfn) { diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index 9d4767698a1c..06a005b979a7 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -90,9 +90,9 @@ pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, * ARCHes with special requirements for evicting THP backing TLB entries can * implement this. Otherwise also, it can help optimize normal TLB flush in * THP regime. stock flush_tlb_range() typically has optimization to nuke the - * entire TLB TLB if flush span is greater than a threshhold, which will + * entire TLB if flush span is greater than a threshold, which will * likely be true for a single huge page. Thus a single thp flush will - * invalidate the entire TLB which is not desitable. + * invalidate the entire TLB which is not desirable. * e.g. see arch/arc: flush_pmd_tlb_range */ #define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) @@ -195,7 +195,9 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, VM_BUG_ON(address & ~HPAGE_PMD_MASK); VM_BUG_ON(pmd_trans_huge(*pmdp)); pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); - flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + + /* collapse entails shooting down ptes not pmd */ + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); return pmd; } #endif diff --git a/mm/util.c b/mm/util.c index c108a6542d05..4fb14ca5a419 100644 --- a/mm/util.c +++ b/mm/util.c @@ -230,36 +230,11 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, } /* Check if the vma is being used as a stack by this task */ -static int vm_is_stack_for_task(struct task_struct *t, - struct vm_area_struct *vma) +int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t) { return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); } -/* - * Check if the vma is being used as a stack. - * If is_group is non-zero, check in the entire thread group or else - * just check in the current task. Returns the task_struct of the task - * that the vma is stack for. Must be called under rcu_read_lock(). - */ -struct task_struct *task_of_stack(struct task_struct *task, - struct vm_area_struct *vma, bool in_group) -{ - if (vm_is_stack_for_task(task, vma)) - return task; - - if (in_group) { - struct task_struct *t; - - for_each_thread(task, t) { - if (vm_is_stack_for_task(t, vma)) - return t; - } - } - - return NULL; -} - #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) void arch_pick_mmap_layout(struct mm_struct *mm) { diff --git a/mm/vmpressure.c b/mm/vmpressure.c index 9a6c0704211c..149fdf6c5c56 100644 --- a/mm/vmpressure.c +++ b/mm/vmpressure.c @@ -248,9 +248,8 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, if (tree) { spin_lock(&vmpr->sr_lock); - vmpr->tree_scanned += scanned; + scanned = vmpr->tree_scanned += scanned; vmpr->tree_reclaimed += reclaimed; - scanned = vmpr->scanned; spin_unlock(&vmpr->sr_lock); if (scanned < vmpressure_win) diff --git a/mm/vmscan.c b/mm/vmscan.c index eb3dd37ccd7c..71b1c29948db 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1443,7 +1443,7 @@ int isolate_lru_page(struct page *page) int ret = -EBUSY; VM_BUG_ON_PAGE(!page_count(page), page); - VM_BUG_ON_PAGE(PageTail(page), page); + WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"); if (PageLRU(page)) { struct zone *zone = page_zone(page); diff --git a/mm/vmstat.c b/mm/vmstat.c index 40b2c74ddf16..084c6725b373 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1396,10 +1396,15 @@ static void vmstat_update(struct work_struct *w) * Counters were updated so we expect more updates * to occur in the future. Keep on running the * update worker thread. + * If we were marked on cpu_stat_off clear the flag + * so that vmstat_shepherd doesn't schedule us again. */ - queue_delayed_work_on(smp_processor_id(), vmstat_wq, - this_cpu_ptr(&vmstat_work), - round_jiffies_relative(sysctl_stat_interval)); + if (!cpumask_test_and_clear_cpu(smp_processor_id(), + cpu_stat_off)) { + queue_delayed_work_on(smp_processor_id(), vmstat_wq, + this_cpu_ptr(&vmstat_work), + round_jiffies_relative(sysctl_stat_interval)); + } } else { /* * We did not update any counters so the app may be in @@ -1417,18 +1422,6 @@ static void vmstat_update(struct work_struct *w) * until the diffs stay at zero. The function is used by NOHZ and can only be * invoked when tick processing is not active. */ -void quiet_vmstat(void) -{ - if (system_state != SYSTEM_RUNNING) - return; - - do { - if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off)) - cancel_delayed_work(this_cpu_ptr(&vmstat_work)); - - } while (refresh_cpu_vm_stats(false)); -} - /* * Check if the diffs for a certain cpu indicate that * an update is needed. @@ -1452,6 +1445,30 @@ static bool need_update(int cpu) return false; } +void quiet_vmstat(void) +{ + if (system_state != SYSTEM_RUNNING) + return; + + /* + * If we are already in hands of the shepherd then there + * is nothing for us to do here. + */ + if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off)) + return; + + if (!need_update(smp_processor_id())) + return; + + /* + * Just refresh counters and do not care about the pending delayed + * vmstat_update. It doesn't fire that often to matter and canceling + * it would be too expensive from this path. + * vmstat_shepherd will take care about that for us. + */ + refresh_cpu_vm_stats(false); +} + /* * Shepherd worker thread that checks the @@ -1469,18 +1486,25 @@ static void vmstat_shepherd(struct work_struct *w) get_online_cpus(); /* Check processors whose vmstat worker threads have been disabled */ - for_each_cpu(cpu, cpu_stat_off) - if (need_update(cpu) && - cpumask_test_and_clear_cpu(cpu, cpu_stat_off)) - - queue_delayed_work_on(cpu, vmstat_wq, - &per_cpu(vmstat_work, cpu), 0); + for_each_cpu(cpu, cpu_stat_off) { + struct delayed_work *dw = &per_cpu(vmstat_work, cpu); + if (need_update(cpu)) { + if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off)) + queue_delayed_work_on(cpu, vmstat_wq, dw, 0); + } else { + /* + * Cancel the work if quiet_vmstat has put this + * cpu on cpu_stat_off because the work item might + * be still scheduled + */ + cancel_delayed_work(dw); + } + } put_online_cpus(); schedule_delayed_work(&shepherd, round_jiffies_relative(sysctl_stat_interval)); - } static void __init start_shepherd_timer(void) @@ -1488,7 +1512,7 @@ static void __init start_shepherd_timer(void) int cpu; for_each_possible_cpu(cpu) - INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu), + INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu), vmstat_update); if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL)) diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index d040365ba98e..8a4cc2f7f0db 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c @@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, /* check that it's our buffer */ if (lowpan_is_ipv6(*skb_network_header(skb))) { + /* Pull off the 1-byte of 6lowpan header. */ + skb_pull(skb, 1); + /* Copy the packet so that the IPv6 header is * properly aligned. */ @@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, local_skb->protocol = htons(ETH_P_IPV6); local_skb->pkt_type = PACKET_HOST; + local_skb->dev = dev; skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); @@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, if (!local_skb) goto drop; + local_skb->dev = dev; + ret = iphc_decompress(local_skb, dev, chan); if (ret < 0) { kfree_skb(local_skb); @@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev, local_skb->protocol = htons(ETH_P_IPV6); local_skb->pkt_type = PACKET_HOST; - local_skb->dev = dev; if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) { diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index 41b5f3813f02..c78ee2dc9323 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -688,21 +688,29 @@ static u8 update_white_list(struct hci_request *req) * command to remove it from the controller. */ list_for_each_entry(b, &hdev->le_white_list, list) { - struct hci_cp_le_del_from_white_list cp; + /* If the device is neither in pend_le_conns nor + * pend_le_reports then remove it from the whitelist. + */ + if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, + &b->bdaddr, b->bdaddr_type) && + !hci_pend_le_action_lookup(&hdev->pend_le_reports, + &b->bdaddr, b->bdaddr_type)) { + struct hci_cp_le_del_from_white_list cp; + + cp.bdaddr_type = b->bdaddr_type; + bacpy(&cp.bdaddr, &b->bdaddr); - if (hci_pend_le_action_lookup(&hdev->pend_le_conns, - &b->bdaddr, b->bdaddr_type) || - hci_pend_le_action_lookup(&hdev->pend_le_reports, - &b->bdaddr, b->bdaddr_type)) { - white_list_entries++; + hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, + sizeof(cp), &cp); continue; } - cp.bdaddr_type = b->bdaddr_type; - bacpy(&cp.bdaddr, &b->bdaddr); + if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { + /* White list can not be used with RPAs */ + return 0x00; + } - hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, - sizeof(cp), &cp); + white_list_entries++; } /* Since all no longer valid white list entries have been diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 39a5149f3010..eb4f5f24cbe3 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -197,10 +197,20 @@ int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) chan->sport = psm; err = 0; } else { - u16 p; + u16 p, start, end, incr; + + if (chan->src_type == BDADDR_BREDR) { + start = L2CAP_PSM_DYN_START; + end = L2CAP_PSM_AUTO_END; + incr = 2; + } else { + start = L2CAP_PSM_LE_DYN_START; + end = L2CAP_PSM_LE_DYN_END; + incr = 1; + } err = -EINVAL; - for (p = 0x1001; p < 0x1100; p += 2) + for (p = start; p <= end; p += incr) if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) { chan->psm = cpu_to_le16(p); chan->sport = cpu_to_le16(p); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 1bb551527044..e4cae72895a7 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -58,7 +58,7 @@ static int l2cap_validate_bredr_psm(u16 psm) return -EINVAL; /* Restrict usage of well-known PSMs */ - if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) + if (psm < L2CAP_PSM_DYN_START && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; return 0; @@ -67,11 +67,11 @@ static int l2cap_validate_bredr_psm(u16 psm) static int l2cap_validate_le_psm(u16 psm) { /* Valid LE_PSM ranges are defined only until 0x00ff */ - if (psm > 0x00ff) + if (psm > L2CAP_PSM_LE_DYN_END) return -EINVAL; /* Restrict fixed, SIG assigned PSM values to CAP_NET_BIND_SERVICE */ - if (psm <= 0x007f && !capable(CAP_NET_BIND_SERVICE)) + if (psm < L2CAP_PSM_LE_DYN_START && !capable(CAP_NET_BIND_SERVICE)) return -EACCES; return 0; @@ -125,6 +125,9 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) goto done; } + bacpy(&chan->src, &la.l2_bdaddr); + chan->src_type = la.l2_bdaddr_type; + if (la.l2_cid) err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid)); else @@ -156,9 +159,6 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) break; } - bacpy(&chan->src, &la.l2_bdaddr); - chan->src_type = la.l2_bdaddr_type; - if (chan->psm && bdaddr_type_is_le(chan->src_type)) chan->mode = L2CAP_MODE_LE_FLOWCTL; diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index ffed8a1d4f27..4b175df35184 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -1072,22 +1072,6 @@ static void smp_notify_keys(struct l2cap_conn *conn) hcon->dst_type = smp->remote_irk->addr_type; queue_work(hdev->workqueue, &conn->id_addr_update_work); } - - /* When receiving an indentity resolving key for - * a remote device that does not use a resolvable - * private address, just remove the key so that - * it is possible to use the controller white - * list for scanning. - * - * Userspace will have been told to not store - * this key at this point. So it is safe to - * just remove it. - */ - if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) { - list_del_rcu(&smp->remote_irk->list); - kfree_rcu(smp->remote_irk, rcu); - smp->remote_irk = NULL; - } } if (smp->csrk) { diff --git a/net/bridge/br.c b/net/bridge/br.c index a1abe4936fe1..3addc05b9a16 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = { .notifier_call = br_device_event }; +/* called with RTNL */ static int br_switchdev_event(struct notifier_block *unused, unsigned long event, void *ptr) { @@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused, struct switchdev_notifier_fdb_info *fdb_info; int err = NOTIFY_DONE; - rtnl_lock(); p = br_port_get_rtnl(dev); if (!p) goto out; @@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused, } out: - rtnl_unlock(); return err; } diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 393bfb22d5bb..5fcfb98f309e 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -403,6 +403,7 @@ static int is_out(const struct crush_map *map, * @local_retries: localized retries * @local_fallback_retries: localized fallback retries * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose) + * @stable: stable mode starts rep=0 in the recursive call for all replicas * @vary_r: pass r to recursive calls * @out2: second output vector for leaf items (if @recurse_to_leaf) * @parent_r: r value passed from the parent @@ -419,6 +420,7 @@ static int crush_choose_firstn(const struct crush_map *map, unsigned int local_fallback_retries, int recurse_to_leaf, unsigned int vary_r, + unsigned int stable, int *out2, int parent_r) { @@ -433,13 +435,13 @@ static int crush_choose_firstn(const struct crush_map *map, int collide, reject; int count = out_size; - dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d\n", + dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d stable %d\n", recurse_to_leaf ? "_LEAF" : "", bucket->id, x, outpos, numrep, tries, recurse_tries, local_retries, local_fallback_retries, - parent_r); + parent_r, stable); - for (rep = outpos; rep < numrep && count > 0 ; rep++) { + for (rep = stable ? 0 : outpos; rep < numrep && count > 0 ; rep++) { /* keep trying until we get a non-out, non-colliding item */ ftotal = 0; skip_rep = 0; @@ -512,13 +514,14 @@ static int crush_choose_firstn(const struct crush_map *map, if (crush_choose_firstn(map, map->buckets[-1-item], weight, weight_max, - x, outpos+1, 0, + x, stable ? 1 : outpos+1, 0, out2, outpos, count, recurse_tries, 0, local_retries, local_fallback_retries, 0, vary_r, + stable, NULL, sub_r) <= outpos) /* didn't get leaf */ @@ -816,6 +819,7 @@ int crush_do_rule(const struct crush_map *map, int choose_local_fallback_retries = map->choose_local_fallback_tries; int vary_r = map->chooseleaf_vary_r; + int stable = map->chooseleaf_stable; if ((__u32)ruleno >= map->max_rules) { dprintk(" bad ruleno %d\n", ruleno); @@ -835,7 +839,8 @@ int crush_do_rule(const struct crush_map *map, case CRUSH_RULE_TAKE: if ((curstep->arg1 >= 0 && curstep->arg1 < map->max_devices) || - (-1-curstep->arg1 < map->max_buckets && + (-1-curstep->arg1 >= 0 && + -1-curstep->arg1 < map->max_buckets && map->buckets[-1-curstep->arg1])) { w[0] = curstep->arg1; wsize = 1; @@ -869,6 +874,11 @@ int crush_do_rule(const struct crush_map *map, vary_r = curstep->arg1; break; + case CRUSH_RULE_SET_CHOOSELEAF_STABLE: + if (curstep->arg1 >= 0) + stable = curstep->arg1; + break; + case CRUSH_RULE_CHOOSELEAF_FIRSTN: case CRUSH_RULE_CHOOSE_FIRSTN: firstn = 1; @@ -888,6 +898,7 @@ int crush_do_rule(const struct crush_map *map, osize = 0; for (i = 0; i < wsize; i++) { + int bno; /* * see CRUSH_N, CRUSH_N_MINUS macros. * basically, numrep <= 0 means relative to @@ -900,6 +911,13 @@ int crush_do_rule(const struct crush_map *map, continue; } j = 0; + /* make sure bucket id is valid */ + bno = -1 - w[i]; + if (bno < 0 || bno >= map->max_buckets) { + /* w[i] is probably CRUSH_ITEM_NONE */ + dprintk(" bad w[i] %d\n", w[i]); + continue; + } if (firstn) { int recurse_tries; if (choose_leaf_tries) @@ -911,7 +929,7 @@ int crush_do_rule(const struct crush_map *map, recurse_tries = choose_tries; osize += crush_choose_firstn( map, - map->buckets[-1-w[i]], + map->buckets[bno], weight, weight_max, x, numrep, curstep->arg2, @@ -923,6 +941,7 @@ int crush_do_rule(const struct crush_map *map, choose_local_fallback_retries, recurse_to_leaf, vary_r, + stable, c+osize, 0); } else { @@ -930,7 +949,7 @@ int crush_do_rule(const struct crush_map *map, numrep : (result_max-osize)); crush_choose_indep( map, - map->buckets[-1-w[i]], + map->buckets[bno], weight, weight_max, x, out_size, numrep, curstep->arg2, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f8f235930d88..3534e12683d3 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1770,6 +1770,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg) u32 osdmap_epoch; int already_completed; u32 bytes; + u8 decode_redir; unsigned int i; tid = le64_to_cpu(msg->hdr.tid); @@ -1841,6 +1842,15 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg) p += 8 + 4; /* skip replay_version */ p += 8; /* skip user_version */ + if (le16_to_cpu(msg->hdr.version) >= 7) + ceph_decode_8_safe(&p, end, decode_redir, bad_put); + else + decode_redir = 1; + } else { + decode_redir = 0; + } + + if (decode_redir) { err = ceph_redirect_decode(&p, end, &redir); if (err) goto bad_put; diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 7d8f581d9f1f..243574c8cf33 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -342,23 +342,32 @@ static struct crush_map *crush_decode(void *pbyval, void *end) c->choose_local_tries = ceph_decode_32(p); c->choose_local_fallback_tries = ceph_decode_32(p); c->choose_total_tries = ceph_decode_32(p); - dout("crush decode tunable choose_local_tries = %d", + dout("crush decode tunable choose_local_tries = %d\n", c->choose_local_tries); - dout("crush decode tunable choose_local_fallback_tries = %d", + dout("crush decode tunable choose_local_fallback_tries = %d\n", c->choose_local_fallback_tries); - dout("crush decode tunable choose_total_tries = %d", + dout("crush decode tunable choose_total_tries = %d\n", c->choose_total_tries); ceph_decode_need(p, end, sizeof(u32), done); c->chooseleaf_descend_once = ceph_decode_32(p); - dout("crush decode tunable chooseleaf_descend_once = %d", + dout("crush decode tunable chooseleaf_descend_once = %d\n", c->chooseleaf_descend_once); ceph_decode_need(p, end, sizeof(u8), done); c->chooseleaf_vary_r = ceph_decode_8(p); - dout("crush decode tunable chooseleaf_vary_r = %d", + dout("crush decode tunable chooseleaf_vary_r = %d\n", c->chooseleaf_vary_r); + /* skip straw_calc_version, allowed_bucket_algs */ + ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done); + *p += sizeof(u8) + sizeof(u32); + + ceph_decode_need(p, end, sizeof(u8), done); + c->chooseleaf_stable = ceph_decode_8(p); + dout("crush decode tunable chooseleaf_stable = %d\n", + c->chooseleaf_stable); + done: dout("crush_decode success\n"); return c; diff --git a/net/core/dev.c b/net/core/dev.c index cc9e3652cf93..8cba3d852f25 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4351,6 +4351,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb) diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; diffs |= p->vlan_tci ^ skb->vlan_tci; + diffs |= skb_metadata_dst_cmp(p, skb); if (maclen == ETH_HLEN) diffs |= compare_ether_header(skb_mac_header(p), skb_mac_header(skb)); @@ -4548,10 +4549,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) break; case GRO_MERGED_FREE: - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) { + skb_dst_drop(skb); kmem_cache_free(skbuff_head_cache, skb); - else + } else { __kfree_skb(skb); + } break; case GRO_HELD: diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index d79699c9d1b9..eab81bc80e5c 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -208,7 +208,6 @@ ip: case htons(ETH_P_IPV6): { const struct ipv6hdr *iph; struct ipv6hdr _iph; - __be32 flow_label; ipv6: iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph); @@ -230,8 +229,12 @@ ipv6: key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; } - flow_label = ip6_flowlabel(iph); - if (flow_label) { + if ((dissector_uses_key(flow_dissector, + FLOW_DISSECTOR_KEY_FLOW_LABEL) || + (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) && + ip6_flowlabel(iph)) { + __be32 flow_label = ip6_flowlabel(iph); + if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_FLOW_LABEL)) { key_tags = skb_flow_dissector_target(flow_dissector, diff --git a/net/core/scm.c b/net/core/scm.c index 14596fb37172..2696aefdc148 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) *fplp = fpl; fpl->count = 0; fpl->max = SCM_MAX_FD; + fpl->user = NULL; } fpp = &fpl->fp[fpl->count]; @@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) *fpp++ = file; fpl->count++; } + + if (!fpl->user) + fpl->user = get_uid(current_user()); + return num; } @@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm) scm->fp = NULL; for (i=fpl->count-1; i>=0; i--) fput(fpl->fp[i]); + free_uid(fpl->user); kfree(fpl); } } @@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl) for (i = 0; i < fpl->count; i++) get_file(fpl->fp[i]); new_fpl->max = new_fpl->count; + new_fpl->user = get_uid(fpl->user); } return new_fpl; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b2df375ec9c2..5bf88f58bee7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -79,6 +79,8 @@ struct kmem_cache *skbuff_head_cache __read_mostly; static struct kmem_cache *skbuff_fclone_cache __read_mostly; +int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; +EXPORT_SYMBOL(sysctl_max_skb_frags); /** * skb_panic - private function for out-of-line support diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index 1df98c557440..e92b759d906c 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -93,10 +93,17 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) * @sk2: Socket belonging to the existing reuseport group. * May return ENOMEM and not add socket to group under memory pressure. */ -int reuseport_add_sock(struct sock *sk, const struct sock *sk2) +int reuseport_add_sock(struct sock *sk, struct sock *sk2) { struct sock_reuseport *reuse; + if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { + int err = reuseport_alloc(sk2); + + if (err) + return err; + } + spin_lock_bh(&reuseport_lock); reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, lockdep_is_held(&reuseport_lock)), diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 95b6139d710c..a6beb7b6ae55 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -26,6 +26,7 @@ static int zero = 0; static int one = 1; static int min_sndbuf = SOCK_MIN_SNDBUF; static int min_rcvbuf = SOCK_MIN_RCVBUF; +static int max_skb_frags = MAX_SKB_FRAGS; static int net_msg_warn; /* Unused, but still a sysctl */ @@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, + { + .procname = "max_skb_frags", + .data = &sysctl_max_skb_frags, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + .extra2 = &max_skb_frags, + }, { } }; diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index c22920525e5d..775824720b6b 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig @@ -353,6 +353,7 @@ config INET_ESP select CRYPTO_CBC select CRYPTO_SHA1 select CRYPTO_DES + select CRYPTO_ECHAINIV ---help--- Support for IPsec ESP. diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 7aea0ccb6be6..d07fc076bea0 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1394,9 +1394,10 @@ found: struct fib_info *fi = fa->fa_info; int nhsel, err; - if ((index >= (1ul << fa->fa_slen)) && - ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen != KEYLENGTH))) - continue; + if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) { + if (index >= (1ul << fa->fa_slen)) + continue; + } if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) continue; if (fi->fib_dead) diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 8bb8e7ad8548..6029157a19ed 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -361,13 +361,20 @@ struct sock *inet_diag_find_one_icsk(struct net *net, req->id.idiag_dport, req->id.idiag_src[0], req->id.idiag_sport, req->id.idiag_if); #if IS_ENABLED(CONFIG_IPV6) - else if (req->sdiag_family == AF_INET6) - sk = inet6_lookup(net, hashinfo, - (struct in6_addr *)req->id.idiag_dst, - req->id.idiag_dport, - (struct in6_addr *)req->id.idiag_src, - req->id.idiag_sport, - req->id.idiag_if); + else if (req->sdiag_family == AF_INET6) { + if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) && + ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src)) + sk = inet_lookup(net, hashinfo, req->id.idiag_dst[3], + req->id.idiag_dport, req->id.idiag_src[3], + req->id.idiag_sport, req->id.idiag_if); + else + sk = inet6_lookup(net, hashinfo, + (struct in6_addr *)req->id.idiag_dst, + req->id.idiag_dport, + (struct in6_addr *)req->id.idiag_src, + req->id.idiag_sport, + req->id.idiag_if); + } #endif else return ERR_PTR(-EINVAL); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 3f00810b7288..187c6fcc3027 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user) struct ipq *qp; IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); + skb_orphan(skb); /* Lookup (or create) queue header */ qp = ip_find(net, ip_hdr(skb), user, vif); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 7c51c4e1661f..56fdf4e0dce4 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1240,6 +1240,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name, err = ipgre_newlink(net, dev, tb, NULL); if (err < 0) goto out; + + /* openvswitch users expect packet sizes to be unrestricted, + * so set the largest MTU we can. + */ + err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false); + if (err) + goto out; + return dev; out: free_netdev(dev); diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index b1209b63381f..d77eb0c3b684 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -316,7 +316,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; - if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) { + if (sysctl_ip_early_demux && + !skb_dst(skb) && + !skb->sk && + !ip_is_fragment(iph)) { const struct net_protocol *ipprot; int protocol = iph->protocol; diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index c7bd72e9b544..89e8861e05fc 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -943,17 +943,31 @@ done: } EXPORT_SYMBOL_GPL(ip_tunnel_ioctl); -int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu) +int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict) { struct ip_tunnel *tunnel = netdev_priv(dev); int t_hlen = tunnel->hlen + sizeof(struct iphdr); + int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; - if (new_mtu < 68 || - new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen) + if (new_mtu < 68) return -EINVAL; + + if (new_mtu > max_mtu) { + if (strict) + return -EINVAL; + + new_mtu = max_mtu; + } + dev->mtu = new_mtu; return 0; } +EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu); + +int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu) +{ + return __ip_tunnel_change_mtu(dev, new_mtu, true); +} EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu); static void ip_tunnel_dev_free(struct net_device *dev) diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 67f7c9de0b16..2ed9dd2b5f2f 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -143,7 +143,11 @@ static char dhcp_client_identifier[253] __initdata; /* Persistent data: */ +#ifdef IPCONFIG_DYNAMIC static int ic_proto_used; /* Protocol used, if any */ +#else +#define ic_proto_used 0 +#endif static __be32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */ static u8 ic_domain[64]; /* DNS (not NIS) domain name */ diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c index 6fb869f646bf..a04dee536b8e 100644 --- a/net/ipv4/netfilter/nf_defrag_ipv4.c +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c @@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb, { int err; - skb_orphan(skb); - local_bh_disable(); err = ip_defrag(net, skb, user); local_bh_enable(); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index fd17eec93525..0c36ef4a3f86 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -279,6 +279,7 @@ #include <asm/uaccess.h> #include <asm/ioctls.h> +#include <asm/unaligned.h> #include <net/busy_poll.h> int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; @@ -939,7 +940,7 @@ new_segment: i = skb_shinfo(skb)->nr_frags; can_coalesce = skb_can_coalesce(skb, i, page, offset); - if (!can_coalesce && i >= MAX_SKB_FRAGS) { + if (!can_coalesce && i >= sysctl_max_skb_frags) { tcp_mark_push(tp, skb); goto new_segment; } @@ -1212,7 +1213,7 @@ new_segment: if (!skb_can_coalesce(skb, i, pfrag->page, pfrag->offset)) { - if (i == MAX_SKB_FRAGS || !sg) { + if (i == sysctl_max_skb_frags || !sg) { tcp_mark_push(tp, skb); goto new_segment; } @@ -2638,6 +2639,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) const struct inet_connection_sock *icsk = inet_csk(sk); u32 now = tcp_time_stamp; unsigned int start; + u64 rate64; u32 rate; memset(info, 0, sizeof(*info)); @@ -2703,15 +2705,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_total_retrans = tp->total_retrans; rate = READ_ONCE(sk->sk_pacing_rate); - info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL; + rate64 = rate != ~0U ? rate : ~0ULL; + put_unaligned(rate64, &info->tcpi_pacing_rate); rate = READ_ONCE(sk->sk_max_pacing_rate); - info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL; + rate64 = rate != ~0U ? rate : ~0ULL; + put_unaligned(rate64, &info->tcpi_max_pacing_rate); do { start = u64_stats_fetch_begin_irq(&tp->syncp); - info->tcpi_bytes_acked = tp->bytes_acked; - info->tcpi_bytes_received = tp->bytes_received; + put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked); + put_unaligned(tp->bytes_received, &info->tcpi_bytes_received); } while (u64_stats_fetch_retry_irq(&tp->syncp, start)); info->tcpi_segs_out = tp->segs_out; info->tcpi_segs_in = tp->segs_in; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0003d409fec5..1c2a73406261 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2164,8 +2164,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; - int cnt, oldcnt; - int err; + int cnt, oldcnt, lost; unsigned int mss; /* Use SACK to deduce losses of new sequences sent during recovery */ const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; @@ -2205,9 +2204,10 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) break; mss = tcp_skb_mss(skb); - err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, - mss, GFP_ATOMIC); - if (err < 0) + /* If needed, chop off the prefix to mark as lost. */ + lost = (packets - oldcnt) * mss; + if (lost < skb->len && + tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0) break; cnt = packets; } @@ -2366,8 +2366,6 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) tp->snd_ssthresh = tp->prior_ssthresh; tcp_ecn_withdraw_cwr(tp); } - } else { - tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); } tp->snd_cwnd_stamp = tcp_time_stamp; tp->undo_marker = 0; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5ced3e4013e3..7f6ff037adaf 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -311,7 +311,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk) /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ -void tcp_req_err(struct sock *sk, u32 seq) +void tcp_req_err(struct sock *sk, u32 seq, bool abort) { struct request_sock *req = inet_reqsk(sk); struct net *net = sock_net(sk); @@ -323,7 +323,7 @@ void tcp_req_err(struct sock *sk, u32 seq) if (seq != tcp_rsk(req)->snt_isn) { NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); - } else { + } else if (abort) { /* * Still in SYN_RECV, just remove it silently. * There is no good way to pass the error to the newly @@ -383,7 +383,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) } seq = ntohl(th->seq); if (sk->sk_state == TCP_NEW_SYN_RECV) - return tcp_req_err(sk, seq); + return tcp_req_err(sk, seq, + type == ICMP_PARAMETERPROB || + type == ICMP_TIME_EXCEEDED || + (type == ICMP_DEST_UNREACH && + (code == ICMP_NET_UNREACH || + code == ICMP_HOST_UNREACH))); bh_lock_sock(sk); /* If too many ICMPs get dropped on busy @@ -707,7 +712,8 @@ release_sk1: outside socket context is ugly, certainly. What can I do? */ -static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, +static void tcp_v4_send_ack(struct net *net, + struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, int reply_flags, u8 tos) @@ -722,7 +728,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, ]; } rep; struct ip_reply_arg arg; - struct net *net = dev_net(skb_dst(skb)->dev); memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&arg, 0, sizeof(arg)); @@ -784,7 +789,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); - tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, + tcp_v4_send_ack(sock_net(sk), skb, + tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_time_stamp + tcptw->tw_ts_offset, tcptw->tw_ts_recent, @@ -803,8 +809,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ - tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ? - tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, + u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : + tcp_sk(sk)->snd_nxt; + + tcp_v4_send_ack(sock_net(sk), skb, seq, tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd, tcp_time_stamp, req->ts_recent, diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index dc45b538e237..be0b21852b13 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -499,6 +499,7 @@ static struct sock *udp4_lib_lookup2(struct net *net, struct sock *sk, *result; struct hlist_nulls_node *node; int score, badness, matches = 0, reuseport = 0; + bool select_ok = true; u32 hash = 0; begin: @@ -512,14 +513,18 @@ begin: badness = score; reuseport = sk->sk_reuseport; if (reuseport) { - struct sock *sk2; hash = udp_ehashfn(net, daddr, hnum, saddr, sport); - sk2 = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (sk2) { - result = sk2; - goto found; + if (select_ok) { + struct sock *sk2; + + sk2 = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + if (sk2) { + result = sk2; + select_ok = false; + goto found; + } } matches = 1; } @@ -563,6 +568,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr, unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness, matches = 0, reuseport = 0; + bool select_ok = true; u32 hash = 0; rcu_read_lock(); @@ -601,14 +607,18 @@ begin: badness = score; reuseport = sk->sk_reuseport; if (reuseport) { - struct sock *sk2; hash = udp_ehashfn(net, daddr, hnum, saddr, sport); - sk2 = reuseport_select_sock(sk, hash, skb, + if (select_ok) { + struct sock *sk2; + + sk2 = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); - if (sk2) { - result = sk2; - goto found; + if (sk2) { + result = sk2; + select_ok = false; + goto found; + } } matches = 1; } diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index bb7dabe2ebbf..40c897515ddc 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -69,6 +69,7 @@ config INET6_ESP select CRYPTO_CBC select CRYPTO_SHA1 select CRYPTO_DES + select CRYPTO_ECHAINIV ---help--- Support for IPsec ESP. diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 38eeddedfc21..9efd9ffdc34c 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -3538,6 +3538,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) { struct inet6_dev *idev = ifp->idev; struct net_device *dev = idev->dev; + bool notify = false; addrconf_join_solict(dev, &ifp->addr); @@ -3583,7 +3584,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) /* Because optimistic nodes can use this address, * notify listeners. If DAD fails, RTM_DELADDR is sent. */ - ipv6_ifa_notify(RTM_NEWADDR, ifp); + notify = true; } } @@ -3591,6 +3592,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) out: spin_unlock(&ifp->lock); read_unlock_bh(&idev->lock); + if (notify) + ipv6_ifa_notify(RTM_NEWADDR, ifp); } static void addrconf_dad_start(struct inet6_ifaddr *ifp) diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 517c55b01ba8..428162155280 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -162,6 +162,9 @@ ipv4_connected: fl6.fl6_dport = inet->inet_dport; fl6.fl6_sport = inet->inet_sport; + if (!fl6.flowi6_oif) + fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; + if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST)) fl6.flowi6_oif = np->mcast_oif; diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 1f9ebe3cbb4a..dc2db4f7b182 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen) } spin_lock_bh(&ip6_sk_fl_lock); for (sflp = &np->ipv6_fl_list; - (sfl = rcu_dereference(*sflp)) != NULL; + (sfl = rcu_dereference_protected(*sflp, + lockdep_is_held(&ip6_sk_fl_lock))) != NULL; sflp = &sfl->next) { if (sfl->fl->label == freq.flr_label) { if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK)) np->flow_label &= ~IPV6_FLOWLABEL_MASK; - *sflp = rcu_dereference(sfl->next); + *sflp = sfl->next; spin_unlock_bh(&ip6_sk_fl_lock); fl_release(sfl->fl); kfree_rcu(sfl, rcu); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 23de98f976d5..a163102f1803 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, struct rt6_info *rt; #endif int err; + int flags = 0; /* The correct way to handle this would be to do * ip6_route_get_saddr, and then ip6_route_output; however, @@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, dst_release(*dst); *dst = NULL; } + + if (fl6->flowi6_oif) + flags |= RT6_LOOKUP_F_IFACE; } if (!*dst) - *dst = ip6_route_output(net, sk, fl6); + *dst = ip6_route_output_flags(net, sk, fl6, flags); err = (*dst)->error; if (err) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 3c8834bc822d..ed446639219c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1183,11 +1183,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); } -struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, - struct flowi6 *fl6) +struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, + struct flowi6 *fl6, int flags) { struct dst_entry *dst; - int flags = 0; bool any_src; dst = l3mdev_rt6_dst_by_oif(net, fl6); @@ -1208,7 +1207,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk, return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output); } -EXPORT_SYMBOL(ip6_route_output); +EXPORT_SYMBOL_GPL(ip6_route_output_flags); struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) { diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index e794ef66a401..2066d1c25a11 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -201,14 +201,14 @@ static int ipip6_tunnel_create(struct net_device *dev) if ((__force u16)t->parms.i_flags & SIT_ISATAP) dev->priv_flags |= IFF_ISATAP; + dev->rtnl_link_ops = &sit_link_ops; + err = register_netdevice(dev); if (err < 0) goto out; ipip6_tunnel_clone_6rd(dev, sitn); - dev->rtnl_link_ops = &sit_link_ops; - dev_hold(dev); ipip6_tunnel_link(sitn, t); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 006396e31cb0..1a5a70fb8551 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -327,6 +327,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, struct tcp_sock *tp; __u32 seq, snd_una; struct sock *sk; + bool fatal; int err; sk = __inet6_lookup_established(net, &tcp_hashinfo, @@ -345,8 +346,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, return; } seq = ntohl(th->seq); + fatal = icmpv6_err_convert(type, code, &err); if (sk->sk_state == TCP_NEW_SYN_RECV) - return tcp_req_err(sk, seq); + return tcp_req_err(sk, seq, fatal); bh_lock_sock(sk); if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) @@ -400,7 +402,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, goto out; } - icmpv6_err_convert(type, code, &err); /* Might be for an request_sock */ switch (sk->sk_state) { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 5d2c2afffe7b..22e28a44e3c8 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -257,6 +257,7 @@ static struct sock *udp6_lib_lookup2(struct net *net, struct sock *sk, *result; struct hlist_nulls_node *node; int score, badness, matches = 0, reuseport = 0; + bool select_ok = true; u32 hash = 0; begin: @@ -270,14 +271,18 @@ begin: badness = score; reuseport = sk->sk_reuseport; if (reuseport) { - struct sock *sk2; hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); - sk2 = reuseport_select_sock(sk, hash, skb, - sizeof(struct udphdr)); - if (sk2) { - result = sk2; - goto found; + if (select_ok) { + struct sock *sk2; + + sk2 = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); + if (sk2) { + result = sk2; + select_ok = false; + goto found; + } } matches = 1; } @@ -321,6 +326,7 @@ struct sock *__udp6_lib_lookup(struct net *net, unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness, matches = 0, reuseport = 0; + bool select_ok = true; u32 hash = 0; rcu_read_lock(); @@ -358,14 +364,18 @@ begin: badness = score; reuseport = sk->sk_reuseport; if (reuseport) { - struct sock *sk2; hash = udp6_ehashfn(net, daddr, hnum, saddr, sport); - sk2 = reuseport_select_sock(sk, hash, skb, + if (select_ok) { + struct sock *sk2; + + sk2 = reuseport_select_sock(sk, hash, skb, sizeof(struct udphdr)); - if (sk2) { - result = sk2; - goto found; + if (sk2) { + result = sk2; + select_ok = false; + goto found; + } } matches = 1; } diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c index 3c4caa60c926..5728e76ca6d5 100644 --- a/net/irda/ircomm/ircomm_param.c +++ b/net/irda/ircomm/ircomm_param.c @@ -134,11 +134,10 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush) return -1; } skb_put(skb, count); + pr_debug("%s(), skb->len=%d\n", __func__, skb->len); spin_unlock_irqrestore(&self->spinlock, flags); - pr_debug("%s(), skb->len=%d\n", __func__ , skb->len); - if (flush) { /* ircomm_tty_do_softint will take care of the rest */ schedule_work(&self->tqueue); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index ef50a94d3eb7..fc3598a922b0 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr, if (!addr || addr->sa_family != AF_IUCV) return -EINVAL; + if (addr_len < sizeof(struct sockaddr_iucv)) + return -EINVAL; + lock_sock(sk); if (sk->sk_state != IUCV_OPEN) { err = -EBADFD; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index f7fc0e00497f..978d3bc31df7 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -1733,7 +1733,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) if (sdata->vif.type != NL80211_IFTYPE_ADHOC) continue; sdata->u.ibss.last_scan_completed = jiffies; - ieee80211_queue_work(&local->hw, &sdata->work); } mutex_unlock(&local->iflist_mtx); } diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 6bcf0faa4a89..8190bf27ebff 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -248,6 +248,7 @@ static void ieee80211_restart_work(struct work_struct *work) /* wait for scan work complete */ flush_workqueue(local->workqueue); + flush_work(&local->sched_scan_stopped_work); WARN(test_bit(SCAN_HW_SCANNING, &local->scanning), "%s called with hardware scan in progress\n", __func__); @@ -256,6 +257,11 @@ static void ieee80211_restart_work(struct work_struct *work) list_for_each_entry(sdata, &local->interfaces, list) flush_delayed_work(&sdata->dec_tailroom_needed_wk); ieee80211_scan_cancel(local); + + /* make sure any new ROC will consider local->in_reconfig */ + flush_delayed_work(&local->roc_work); + flush_work(&local->hw_roc_done); + ieee80211_reconfig(local); rtnl_unlock(); } diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index fa28500f28fd..6f85b6ab8e51 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1370,17 +1370,6 @@ out: sdata_unlock(sdata); } -void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) -{ - struct ieee80211_sub_if_data *sdata; - - rcu_read_lock(); - list_for_each_entry_rcu(sdata, &local->interfaces, list) - if (ieee80211_vif_is_mesh(&sdata->vif) && - ieee80211_sdata_running(sdata)) - ieee80211_queue_work(&local->hw, &sdata->work); - rcu_read_unlock(); -} void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) { diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index a1596344c3ba..4a8019f79fb2 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h @@ -362,14 +362,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; } -void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); - void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata); void ieee80211s_stop(void); #else -static inline void -ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) { return false; } static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 1c342e2592c4..bfbb1acafdd1 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4005,8 +4005,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR)) ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.monitor_work); - /* and do all the other regular work too */ - ieee80211_queue_work(&sdata->local->hw, &sdata->work); } } diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 8b2f4eaac2ba..55a9c5b94ce1 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -252,14 +252,11 @@ static bool ieee80211_recalc_sw_work(struct ieee80211_local *local, static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc, unsigned long start_time) { - struct ieee80211_local *local = roc->sdata->local; - if (WARN_ON(roc->notified)) return; roc->start_time = start_time; roc->started = true; - roc->hw_begun = true; if (roc->mgmt_tx_cookie) { if (!WARN_ON(!roc->frame)) { @@ -274,9 +271,6 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc, } roc->notified = true; - - if (!local->ops->remain_on_channel) - ieee80211_recalc_sw_work(local, start_time); } static void ieee80211_hw_roc_start(struct work_struct *work) @@ -291,6 +285,7 @@ static void ieee80211_hw_roc_start(struct work_struct *work) if (!roc->started) break; + roc->hw_begun = true; ieee80211_handle_roc_started(roc, local->hw_roc_start_time); } @@ -413,6 +408,10 @@ void ieee80211_start_next_roc(struct ieee80211_local *local) return; } + /* defer roc if driver is not started (i.e. during reconfig) */ + if (local->in_reconfig) + return; + roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work, list); @@ -534,8 +533,10 @@ ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local, * begin, otherwise they'll both be marked properly by the work * struct that runs once the driver notifies us of the beginning */ - if (cur_roc->hw_begun) + if (cur_roc->hw_begun) { + new_roc->hw_begun = true; ieee80211_handle_roc_started(new_roc, now); + } return true; } @@ -658,6 +659,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local, queued = true; roc->on_channel = tmp->on_channel; ieee80211_handle_roc_started(roc, now); + ieee80211_recalc_sw_work(local, now); break; } diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c index a413e52f7691..ae980ce8daff 100644 --- a/net/mac80211/scan.c +++ b/net/mac80211/scan.c @@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) bool was_scanning = local->scanning; struct cfg80211_scan_request *scan_req; struct ieee80211_sub_if_data *scan_sdata; + struct ieee80211_sub_if_data *sdata; lockdep_assert_held(&local->mtx); @@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) ieee80211_mlme_notify_scan_completed(local); ieee80211_ibss_notify_scan_completed(local); - ieee80211_mesh_notify_scan_completed(local); + + /* Requeue all the work that might have been ignored while + * the scan was in progress; if there was none this will + * just be a no-op for the particular interface. + */ + list_for_each_entry_rcu(sdata, &local->interfaces, list) { + if (ieee80211_sdata_running(sdata)) + ieee80211_queue_work(&sdata->local->hw, &sdata->work); + } + if (was_scanning) ieee80211_start_next_roc(local); } @@ -1213,6 +1223,14 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw) trace_api_sched_scan_stopped(local); + /* + * this shouldn't really happen, so for simplicity + * simply ignore it, and let mac80211 reconfigure + * the sched scan later on. + */ + if (local->in_reconfig) + return; + schedule_work(&local->sched_scan_stopped_work); } EXPORT_SYMBOL(ieee80211_sched_scan_stopped); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 4402ad5b27d1..a4a4f89d3ba0 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1453,7 +1453,7 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta, more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids); - if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) + if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL) driver_release_tids = BIT(find_highest_prio_tid(driver_release_tids)); diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 5bad05e9af90..6101deb805a8 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c @@ -51,6 +51,11 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, struct ieee80211_hdr *hdr = (void *)skb->data; int ac; + if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) { + ieee80211_free_txskb(&local->hw, skb); + return; + } + /* * This skb 'survived' a round-trip through the driver, and * hopefully the driver didn't mangle it too badly. However, diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 3943d4bf289c..58f58bd5202f 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -2043,16 +2043,26 @@ int ieee80211_reconfig(struct ieee80211_local *local) */ if (sched_scan_req->n_scan_plans > 1 || __ieee80211_request_sched_scan_start(sched_scan_sdata, - sched_scan_req)) + sched_scan_req)) { + RCU_INIT_POINTER(local->sched_scan_sdata, NULL); + RCU_INIT_POINTER(local->sched_scan_req, NULL); sched_scan_stopped = true; + } mutex_unlock(&local->mtx); if (sched_scan_stopped) cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy); wake_up: - local->in_reconfig = false; - barrier(); + if (local->in_reconfig) { + local->in_reconfig = false; + barrier(); + + /* Restart deferred ROCs */ + mutex_lock(&local->mtx); + ieee80211_start_next_roc(local); + mutex_unlock(&local->mtx); + } if (local->monitors == local->open_count && local->monitors > 0) ieee80211_add_virtual_monitor(local); diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 43d8c9896fa3..f0f688db6213 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -164,8 +164,6 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); - if (e.cidr == 0) - return -EINVAL; if (adt == IPSET_TEST) e.cidr = HOST_MASK; @@ -377,8 +375,6 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); - if (e.cidr == 0) - return -EINVAL; if (adt == IPSET_TEST) e.cidr = HOST_MASK; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 3cb3cb831591..58882de06bd7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -66,6 +66,21 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks); __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); +static __read_mostly spinlock_t nf_conntrack_locks_all_lock; +static __read_mostly bool nf_conntrack_locks_all; + +void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) +{ + spin_lock(lock); + while (unlikely(nf_conntrack_locks_all)) { + spin_unlock(lock); + spin_lock(&nf_conntrack_locks_all_lock); + spin_unlock(&nf_conntrack_locks_all_lock); + spin_lock(lock); + } +} +EXPORT_SYMBOL_GPL(nf_conntrack_lock); + static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) { h1 %= CONNTRACK_LOCKS; @@ -82,12 +97,12 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; if (h1 <= h2) { - spin_lock(&nf_conntrack_locks[h1]); + nf_conntrack_lock(&nf_conntrack_locks[h1]); if (h1 != h2) spin_lock_nested(&nf_conntrack_locks[h2], SINGLE_DEPTH_NESTING); } else { - spin_lock(&nf_conntrack_locks[h2]); + nf_conntrack_lock(&nf_conntrack_locks[h2]); spin_lock_nested(&nf_conntrack_locks[h1], SINGLE_DEPTH_NESTING); } @@ -102,16 +117,19 @@ static void nf_conntrack_all_lock(void) { int i; - for (i = 0; i < CONNTRACK_LOCKS; i++) - spin_lock_nested(&nf_conntrack_locks[i], i); + spin_lock(&nf_conntrack_locks_all_lock); + nf_conntrack_locks_all = true; + + for (i = 0; i < CONNTRACK_LOCKS; i++) { + spin_lock(&nf_conntrack_locks[i]); + spin_unlock(&nf_conntrack_locks[i]); + } } static void nf_conntrack_all_unlock(void) { - int i; - - for (i = 0; i < CONNTRACK_LOCKS; i++) - spin_unlock(&nf_conntrack_locks[i]); + nf_conntrack_locks_all = false; + spin_unlock(&nf_conntrack_locks_all_lock); } unsigned int nf_conntrack_htable_size __read_mostly; @@ -757,7 +775,7 @@ restart: hash = hash_bucket(_hash, net); for (; i < net->ct.htable_size; i++) { lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; - spin_lock(lockp); + nf_conntrack_lock(lockp); if (read_seqcount_retry(&net->ct.generation, sequence)) { spin_unlock(lockp); goto restart; @@ -1382,7 +1400,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), for (; *bucket < net->ct.htable_size; (*bucket)++) { lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; local_bh_disable(); - spin_lock(lockp); + nf_conntrack_lock(lockp); if (*bucket < net->ct.htable_size) { hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index bd9d31537905..3b40ec575cd5 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -425,7 +425,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, } local_bh_disable(); for (i = 0; i < net->ct.htable_size; i++) { - spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); + nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); if (i < net->ct.htable_size) { hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) unhelp(h, me); diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index dbb1bb3edb45..355e8552fd5b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -840,7 +840,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { restart: lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS]; - spin_lock(lockp); + nf_conntrack_lock(lockp); if (cb->args[0] >= net->ct.htable_size) { spin_unlock(lockp); goto out; diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c index b6605e000801..5eefe4a355c6 100644 --- a/net/netfilter/nf_tables_netdev.c +++ b/net/netfilter/nf_tables_netdev.c @@ -224,12 +224,12 @@ static int __init nf_tables_netdev_init(void) nft_register_chain_type(&nft_filter_chain_netdev); ret = register_pernet_subsys(&nf_tables_netdev_net_ops); - if (ret < 0) + if (ret < 0) { nft_unregister_chain_type(&nft_filter_chain_netdev); - + return ret; + } register_netdevice_notifier(&nf_tables_netdev_notifier); - - return ret; + return 0; } static void __exit nf_tables_netdev_exit(void) diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 5d010f27ac01..94837d236ab0 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -307,12 +307,12 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout) local_bh_disable(); for (i = 0; i < net->ct.htable_size; i++) { - spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); + nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); if (i < net->ct.htable_size) { hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) untimeout(h, timeout); } - spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); + nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); } local_bh_enable(); } diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c index 383c17138399..b78c28ba465f 100644 --- a/net/netfilter/nft_byteorder.c +++ b/net/netfilter/nft_byteorder.c @@ -46,16 +46,14 @@ static void nft_byteorder_eval(const struct nft_expr *expr, switch (priv->op) { case NFT_BYTEORDER_NTOH: for (i = 0; i < priv->len / 8; i++) { - src64 = get_unaligned_be64(&src[i]); - src64 = be64_to_cpu((__force __be64)src64); + src64 = get_unaligned((u64 *)&src[i]); put_unaligned_be64(src64, &dst[i]); } break; case NFT_BYTEORDER_HTON: for (i = 0; i < priv->len / 8; i++) { src64 = get_unaligned_be64(&src[i]); - src64 = (__force u64)cpu_to_be64(src64); - put_unaligned_be64(src64, &dst[i]); + put_unaligned(src64, (u64 *)&dst[i]); } break; } diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index a0eb2161e3ef..d4a4619fcebc 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -127,6 +127,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, NF_CT_LABELS_MAX_SIZE - size); return; } +#endif case NFT_CT_BYTES: /* fallthrough */ case NFT_CT_PKTS: { const struct nf_conn_acct *acct = nf_conn_acct_find(ct); @@ -138,7 +139,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr, memcpy(dest, &count, sizeof(count)); return; } -#endif default: break; } diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index b7c43def0dc6..e118397254af 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -228,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par) { struct ipv6hdr *ipv6h = ipv6_hdr(skb); u8 nexthdr; - __be16 frag_off; + __be16 frag_off, oldlen, newlen; int tcphoff; int ret; @@ -244,7 +244,12 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par) return NF_DROP; if (ret > 0) { ipv6h = ipv6_hdr(skb); - ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret); + oldlen = ipv6h->payload_len; + newlen = htons(ntohs(oldlen) + ret); + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->csum = csum_add(csum_sub(skb->csum, oldlen), + newlen); + ipv6h->payload_len = newlen; } return XT_CONTINUE; } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 81dc1bb6e016..f1ffb34e253f 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -2831,7 +2831,8 @@ static int netlink_dump(struct sock *sk) * reasonable static buffer based on the expected largest dump of a * single netdev. The outcome is MSG_TRUNC error. */ - skb_reserve(skb, skb_tailroom(skb) - alloc_size); + if (!netlink_rx_is_mmaped(sk)) + skb_reserve(skb, skb_tailroom(skb) - alloc_size); netlink_skb_set_owner_r(skb, sk); len = cb->dump(skb, cb); diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c index 1605691d9414..de9cb19efb6a 100644 --- a/net/openvswitch/vport-vxlan.c +++ b/net/openvswitch/vport-vxlan.c @@ -91,6 +91,8 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms) struct vxlan_config conf = { .no_share = true, .flags = VXLAN_F_COLLECT_METADATA, + /* Don't restrict the packets that can be sent by MTU */ + .mtu = IP_MAX_MTU, }; if (!options) { diff --git a/net/rfkill/core.c b/net/rfkill/core.c index f53bf3b6558b..cf5b69ab1829 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -1095,17 +1095,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait) return res; } -static bool rfkill_readable(struct rfkill_data *data) -{ - bool r; - - mutex_lock(&data->mtx); - r = !list_empty(&data->events); - mutex_unlock(&data->mtx); - - return r; -} - static ssize_t rfkill_fop_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { @@ -1122,8 +1111,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf, goto out; } mutex_unlock(&data->mtx); + /* since we re-check and it just compares pointers, + * using !list_empty() without locking isn't a problem + */ ret = wait_event_interruptible(data->read_wait, - rfkill_readable(data)); + !list_empty(&data->events)); mutex_lock(&data->mtx); if (ret) diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index f26bdea875c1..a1cd778240cd 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c @@ -403,6 +403,8 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) if (len <= cl->deficit) { cl->deficit -= len; skb = qdisc_dequeue_peeked(cl->qdisc); + if (unlikely(skb == NULL)) + goto out; if (cl->qdisc->q.qlen == 0) list_del(&cl->alist); diff --git a/net/sctp/input.c b/net/sctp/input.c index bf61dfb8e09e..49d2cc751386 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -935,15 +935,22 @@ static struct sctp_association *__sctp_lookup_association( struct sctp_transport **pt) { struct sctp_transport *t; + struct sctp_association *asoc = NULL; + rcu_read_lock(); t = sctp_addrs_lookup_transport(net, local, peer); - if (!t || t->dead) - return NULL; + if (!t || !sctp_transport_hold(t)) + goto out; - sctp_association_hold(t->asoc); + asoc = t->asoc; + sctp_association_hold(asoc); *pt = t; - return t->asoc; + sctp_transport_put(t); + +out: + rcu_read_unlock(); + return asoc; } /* Look up an association. protected by RCU read lock */ @@ -955,9 +962,7 @@ struct sctp_association *sctp_lookup_association(struct net *net, { struct sctp_association *asoc; - rcu_read_lock(); asoc = __sctp_lookup_association(net, laddr, paddr, transportp); - rcu_read_unlock(); return asoc; } diff --git a/net/sctp/proc.c b/net/sctp/proc.c index 684c5b31563b..ded7d931a6a5 100644 --- a/net/sctp/proc.c +++ b/net/sctp/proc.c @@ -165,8 +165,6 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list, transports) { addr = &transport->ipaddr; - if (transport->dead) - continue; af = sctp_get_af_specific(addr->sa.sa_family); if (af->cmp_addr(addr, primary)) { @@ -380,6 +378,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) } transport = (struct sctp_transport *)v; + if (!sctp_transport_hold(transport)) + return 0; assoc = transport->asoc; epb = &assoc->base; sk = epb->sk; @@ -412,6 +412,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v) sk->sk_rcvbuf); seq_printf(seq, "\n"); + sctp_transport_put(transport); + return 0; } @@ -489,12 +491,12 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) } tsp = (struct sctp_transport *)v; + if (!sctp_transport_hold(tsp)) + return 0; assoc = tsp->asoc; list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list, transports) { - if (tsp->dead) - continue; /* * The remote address (ADDR) */ @@ -544,6 +546,8 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "\n"); } + sctp_transport_put(tsp); + return 0; } diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 2e21384697c2..b5327bb77458 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -259,12 +259,6 @@ void sctp_generate_t3_rtx_event(unsigned long peer) goto out_unlock; } - /* Is this transport really dead and just waiting around for - * the timer to let go of the reference? - */ - if (transport->dead) - goto out_unlock; - /* Run through the state machine. */ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), @@ -380,12 +374,6 @@ void sctp_generate_heartbeat_event(unsigned long data) goto out_unlock; } - /* Is this structure just waiting around for us to actually - * get destroyed? - */ - if (transport->dead) - goto out_unlock; - error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), asoc->state, asoc->ep, asoc, diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9bb80ec4c08f..e878da0949db 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5538,6 +5538,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, struct sctp_hmac_algo_param *hmacs; __u16 data_len = 0; u32 num_idents; + int i; if (!ep->auth_enable) return -EACCES; @@ -5555,8 +5556,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len, return -EFAULT; if (put_user(num_idents, &p->shmac_num_idents)) return -EFAULT; - if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len)) - return -EFAULT; + for (i = 0; i < num_idents; i++) { + __u16 hmacid = ntohs(hmacs->hmac_ids[i]); + + if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16))) + return -EFAULT; + } return 0; } @@ -6636,6 +6641,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) if (cmsgs->srinfo->sinfo_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | + SCTP_SACK_IMMEDIATELY | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; @@ -6659,6 +6665,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs) if (cmsgs->sinfo->snd_flags & ~(SCTP_UNORDERED | SCTP_ADDR_OVER | + SCTP_SACK_IMMEDIATELY | SCTP_ABORT | SCTP_EOF)) return -EINVAL; break; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index aab9e3f29755..a431c14044a4 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -132,8 +132,6 @@ fail: */ void sctp_transport_free(struct sctp_transport *transport) { - transport->dead = 1; - /* Try to delete the heartbeat timer. */ if (del_timer(&transport->hb_timer)) sctp_transport_put(transport); @@ -169,7 +167,7 @@ static void sctp_transport_destroy_rcu(struct rcu_head *head) */ static void sctp_transport_destroy(struct sctp_transport *transport) { - if (unlikely(!transport->dead)) { + if (unlikely(atomic_read(&transport->refcnt))) { WARN(1, "Attempt to destroy undead transport %p!\n", transport); return; } @@ -296,9 +294,9 @@ void sctp_transport_route(struct sctp_transport *transport, } /* Hold a reference to a transport. */ -void sctp_transport_hold(struct sctp_transport *transport) +int sctp_transport_hold(struct sctp_transport *transport) { - atomic_inc(&transport->refcnt); + return atomic_add_unless(&transport->refcnt, 1, 0); } /* Release a reference to a transport and clean up diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c index ebc661d3b6e3..47f7da58a7f0 100644 --- a/net/switchdev/switchdev.c +++ b/net/switchdev/switchdev.c @@ -20,6 +20,7 @@ #include <linux/list.h> #include <linux/workqueue.h> #include <linux/if_vlan.h> +#include <linux/rtnetlink.h> #include <net/ip_fib.h> #include <net/switchdev.h> @@ -567,7 +568,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj, } EXPORT_SYMBOL_GPL(switchdev_port_obj_dump); -static DEFINE_MUTEX(switchdev_mutex); static RAW_NOTIFIER_HEAD(switchdev_notif_chain); /** @@ -582,9 +582,9 @@ int register_switchdev_notifier(struct notifier_block *nb) { int err; - mutex_lock(&switchdev_mutex); + rtnl_lock(); err = raw_notifier_chain_register(&switchdev_notif_chain, nb); - mutex_unlock(&switchdev_mutex); + rtnl_unlock(); return err; } EXPORT_SYMBOL_GPL(register_switchdev_notifier); @@ -600,9 +600,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb) { int err; - mutex_lock(&switchdev_mutex); + rtnl_lock(); err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb); - mutex_unlock(&switchdev_mutex); + rtnl_unlock(); return err; } EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); @@ -616,16 +616,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier); * Call all network notifier blocks. This should be called by driver * when it needs to propagate hardware event. * Return values are same as for atomic_notifier_call_chain(). + * rtnl_lock must be held. */ int call_switchdev_notifiers(unsigned long val, struct net_device *dev, struct switchdev_notifier_info *info) { int err; + ASSERT_RTNL(); + info->dev = dev; - mutex_lock(&switchdev_mutex); err = raw_notifier_call_chain(&switchdev_notif_chain, val, info); - mutex_unlock(&switchdev_mutex); return err; } EXPORT_SYMBOL_GPL(call_switchdev_notifiers); diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 350cca33ee0a..69ee2eeef968 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -289,15 +289,14 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid, struct sockaddr_tipc *addr, void *usr_data, void *buf, size_t len) { - struct tipc_subscriber *subscriber = usr_data; + struct tipc_subscriber *subscrb = usr_data; struct tipc_subscription *sub = NULL; struct tipc_net *tn = net_generic(net, tipc_net_id); - tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub); - if (sub) - tipc_nametbl_subscribe(sub); - else - tipc_conn_terminate(tn->topsrv, subscriber->conid); + if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub)) + return tipc_conn_terminate(tn->topsrv, subscrb->conid); + + tipc_nametbl_subscribe(sub); } /* Handle one request to establish a new subscriber */ diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index c5bf5ef2bf89..29be035f9c65 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb) UNIXCB(skb).fp = NULL; for (i = scm->fp->count-1; i >= 0; i--) - unix_notinflight(scm->fp->fp[i]); + unix_notinflight(scm->fp->user, scm->fp->fp[i]); } static void unix_destruct_scm(struct sk_buff *skb) @@ -1561,7 +1561,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) return -ENOMEM; for (i = scm->fp->count - 1; i >= 0; i--) - unix_inflight(scm->fp->fp[i]); + unix_inflight(scm->fp->user, scm->fp->fp[i]); return max_level; } @@ -2339,6 +2339,7 @@ again: if (signal_pending(current)) { err = sock_intr_errno(timeo); + scm_destroy(&scm); goto out; } diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 8fcdc2283af5..6a0d48525fcf 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp) * descriptor if it is for an AF_UNIX socket. */ -void unix_inflight(struct file *fp) +void unix_inflight(struct user_struct *user, struct file *fp) { struct sock *s = unix_get_socket(fp); @@ -133,11 +133,11 @@ void unix_inflight(struct file *fp) } unix_tot_inflight++; } - fp->f_cred->user->unix_inflight++; + user->unix_inflight++; spin_unlock(&unix_gc_lock); } -void unix_notinflight(struct file *fp) +void unix_notinflight(struct user_struct *user, struct file *fp) { struct sock *s = unix_get_socket(fp); @@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp) list_del_init(&u->link); unix_tot_inflight--; } - fp->f_cred->user->unix_inflight--; + user->unix_inflight--; spin_unlock(&unix_gc_lock); } diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 3b0ce1c484a3..547ceecc0523 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -231,20 +231,22 @@ static const struct ieee80211_regdomain world_regdom = { /* IEEE 802.11b/g, channels 1..11 */ REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), /* IEEE 802.11b/g, channels 12..13. */ - REG_RULE(2467-10, 2472+10, 40, 6, 20, - NL80211_RRF_NO_IR), + REG_RULE(2467-10, 2472+10, 20, 6, 20, + NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW), /* IEEE 802.11 channel 14 - Only JP enables * this and for 802.11b only */ REG_RULE(2484-10, 2484+10, 20, 6, 20, NL80211_RRF_NO_IR | NL80211_RRF_NO_OFDM), /* IEEE 802.11a, channel 36..48 */ - REG_RULE(5180-10, 5240+10, 160, 6, 20, - NL80211_RRF_NO_IR), + REG_RULE(5180-10, 5240+10, 80, 6, 20, + NL80211_RRF_NO_IR | + NL80211_RRF_AUTO_BW), /* IEEE 802.11a, channel 52..64 - DFS required */ - REG_RULE(5260-10, 5320+10, 160, 6, 20, + REG_RULE(5260-10, 5320+10, 80, 6, 20, NL80211_RRF_NO_IR | + NL80211_RRF_AUTO_BW | NL80211_RRF_DFS), /* IEEE 802.11a, channel 100..144 - DFS required */ @@ -2745,7 +2747,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) const struct ieee80211_power_rule *power_rule = NULL; char bw[32], cac_time[32]; - pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n"); + pr_debug(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n"); for (i = 0; i < rd->n_reg_rules; i++) { reg_rule = &rd->reg_rules[i]; @@ -2772,7 +2774,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) * in certain regions */ if (power_rule->max_antenna_gain) - pr_info(" (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n", + pr_debug(" (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, bw, @@ -2780,7 +2782,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) power_rule->max_eirp, cac_time); else - pr_info(" (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n", + pr_debug(" (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n", freq_range->start_freq_khz, freq_range->end_freq_khz, bw, @@ -2813,35 +2815,35 @@ static void print_regdomain(const struct ieee80211_regdomain *rd) struct cfg80211_registered_device *rdev; rdev = cfg80211_rdev_by_wiphy_idx(lr->wiphy_idx); if (rdev) { - pr_info("Current regulatory domain updated by AP to: %c%c\n", + pr_debug("Current regulatory domain updated by AP to: %c%c\n", rdev->country_ie_alpha2[0], rdev->country_ie_alpha2[1]); } else - pr_info("Current regulatory domain intersected:\n"); + pr_debug("Current regulatory domain intersected:\n"); } else - pr_info("Current regulatory domain intersected:\n"); + pr_debug("Current regulatory domain intersected:\n"); } else if (is_world_regdom(rd->alpha2)) { - pr_info("World regulatory domain updated:\n"); + pr_debug("World regulatory domain updated:\n"); } else { if (is_unknown_alpha2(rd->alpha2)) - pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n"); + pr_debug("Regulatory domain changed to driver built-in settings (unknown country)\n"); else { if (reg_request_cell_base(lr)) - pr_info("Regulatory domain changed to country: %c%c by Cell Station\n", + pr_debug("Regulatory domain changed to country: %c%c by Cell Station\n", rd->alpha2[0], rd->alpha2[1]); else - pr_info("Regulatory domain changed to country: %c%c\n", + pr_debug("Regulatory domain changed to country: %c%c\n", rd->alpha2[0], rd->alpha2[1]); } } - pr_info(" DFS Master region: %s", reg_dfs_region_str(rd->dfs_region)); + pr_debug(" DFS Master region: %s", reg_dfs_region_str(rd->dfs_region)); print_rd_rules(rd); } static void print_regdomain_info(const struct ieee80211_regdomain *rd) { - pr_info("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]); + pr_debug("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]); print_rd_rules(rd); } @@ -2862,7 +2864,8 @@ static int reg_set_rd_user(const struct ieee80211_regdomain *rd, return -EALREADY; if (!is_valid_rd(rd)) { - pr_err("Invalid regulatory domain detected:\n"); + pr_err("Invalid regulatory domain detected: %c%c\n", + rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } @@ -2898,7 +2901,8 @@ static int reg_set_rd_driver(const struct ieee80211_regdomain *rd, return -EALREADY; if (!is_valid_rd(rd)) { - pr_err("Invalid regulatory domain detected:\n"); + pr_err("Invalid regulatory domain detected: %c%c\n", + rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } @@ -2956,7 +2960,8 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd, */ if (!is_valid_rd(rd)) { - pr_err("Invalid regulatory domain detected:\n"); + pr_err("Invalid regulatory domain detected: %c%c\n", + rd->alpha2[0], rd->alpha2[1]); print_regdomain_info(rd); return -EINVAL; } diff --git a/scripts/prune-kernel b/scripts/prune-kernel new file mode 100755 index 000000000000..ab5034e1d081 --- /dev/null +++ b/scripts/prune-kernel @@ -0,0 +1,20 @@ +#!/bin/bash + +# because I use CONFIG_LOCALVERSION_AUTO, not the same version again and +# again, /boot and /lib/modules/ eventually fill up. +# Dumb script to purge that stuff: + +for f in "$@" +do + if rpm -qf "/lib/modules/$f" >/dev/null; then + echo "keeping $f (installed from rpm)" + elif [ $(uname -r) = "$f" ]; then + echo "keeping $f (running kernel) " + else + echo "removing $f" + rm -f "/boot/initramfs-$f.img" "/boot/System.map-$f" + rm -f "/boot/vmlinuz-$f" "/boot/config-$f" + rm -rf "/lib/modules/$f" + new-kernel-pkg --remove $f + fi +done diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index f7160253f17f..e6ea9d4b1de9 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -23,6 +23,7 @@ #include <linux/integrity.h> #include <linux/evm.h> #include <crypto/hash.h> +#include <crypto/algapi.h> #include "evm.h" int evm_initialized; @@ -148,7 +149,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry, xattr_value_len, calc.digest); if (rc) break; - rc = memcmp(xattr_data->digest, calc.digest, + rc = crypto_memneq(xattr_data->digest, calc.digest, sizeof(calc.digest)); if (rc) rc = -EINVAL; diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 2bbb41822d8e..8495b9368190 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -83,6 +83,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] = { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, + { SOCK_DESTROY, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE }, }; static struct nlmsg_perm nlmsg_xfrm_perms[] = diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index 0e73d03b30e3..ebc9fdfe64df 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c @@ -835,7 +835,8 @@ static int choose_rate(struct snd_pcm_substream *substream, return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL); } -static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream) +static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream, + bool trylock) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_pcm_hw_params *params, *sparams; @@ -849,7 +850,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream) struct snd_mask sformat_mask; struct snd_mask mask; - if (mutex_lock_interruptible(&runtime->oss.params_lock)) + if (trylock) { + if (!(mutex_trylock(&runtime->oss.params_lock))) + return -EAGAIN; + } else if (mutex_lock_interruptible(&runtime->oss.params_lock)) return -EINTR; sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL); params = kmalloc(sizeof(*params), GFP_KERNEL); @@ -1092,7 +1096,7 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil if (asubstream == NULL) asubstream = substream; if (substream->runtime->oss.params) { - err = snd_pcm_oss_change_params(substream); + err = snd_pcm_oss_change_params(substream, false); if (err < 0) return err; } @@ -1132,7 +1136,7 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream) return 0; runtime = substream->runtime; if (runtime->oss.params) { - err = snd_pcm_oss_change_params(substream); + err = snd_pcm_oss_change_params(substream, false); if (err < 0) return err; } @@ -2163,7 +2167,7 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre runtime = substream->runtime; if (runtime->oss.params && - (err = snd_pcm_oss_change_params(substream)) < 0) + (err = snd_pcm_oss_change_params(substream, false)) < 0) return err; info.fragsize = runtime->oss.period_bytes; @@ -2804,7 +2808,12 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area) return -EIO; if (runtime->oss.params) { - if ((err = snd_pcm_oss_change_params(substream)) < 0) + /* use mutex_trylock() for params_lock for avoiding a deadlock + * between mmap_sem and params_lock taken by + * copy_from/to_user() in snd_pcm_oss_write/read() + */ + err = snd_pcm_oss_change_params(substream, true); + if (err < 0) return err; } #ifdef CONFIG_SND_PCM_OSS_PLUGINS diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c index a7759846fbaa..795437b10082 100644 --- a/sound/core/rawmidi.c +++ b/sound/core/rawmidi.c @@ -942,31 +942,36 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream, unsigned long flags; long result = 0, count1; struct snd_rawmidi_runtime *runtime = substream->runtime; + unsigned long appl_ptr; + spin_lock_irqsave(&runtime->lock, flags); while (count > 0 && runtime->avail) { count1 = runtime->buffer_size - runtime->appl_ptr; if (count1 > count) count1 = count; - spin_lock_irqsave(&runtime->lock, flags); if (count1 > (int)runtime->avail) count1 = runtime->avail; + + /* update runtime->appl_ptr before unlocking for userbuf */ + appl_ptr = runtime->appl_ptr; + runtime->appl_ptr += count1; + runtime->appl_ptr %= runtime->buffer_size; + runtime->avail -= count1; + if (kernelbuf) - memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1); + memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1); if (userbuf) { spin_unlock_irqrestore(&runtime->lock, flags); if (copy_to_user(userbuf + result, - runtime->buffer + runtime->appl_ptr, count1)) { + runtime->buffer + appl_ptr, count1)) { return result > 0 ? result : -EFAULT; } spin_lock_irqsave(&runtime->lock, flags); } - runtime->appl_ptr += count1; - runtime->appl_ptr %= runtime->buffer_size; - runtime->avail -= count1; - spin_unlock_irqrestore(&runtime->lock, flags); result += count1; count -= count1; } + spin_unlock_irqrestore(&runtime->lock, flags); return result; } @@ -1055,23 +1060,16 @@ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream) EXPORT_SYMBOL(snd_rawmidi_transmit_empty); /** - * snd_rawmidi_transmit_peek - copy data from the internal buffer + * __snd_rawmidi_transmit_peek - copy data from the internal buffer * @substream: the rawmidi substream * @buffer: the buffer pointer * @count: data size to transfer * - * Copies data from the internal output buffer to the given buffer. - * - * Call this in the interrupt handler when the midi output is ready, - * and call snd_rawmidi_transmit_ack() after the transmission is - * finished. - * - * Return: The size of copied data, or a negative error code on failure. + * This is a variant of snd_rawmidi_transmit_peek() without spinlock. */ -int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream, +int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream, unsigned char *buffer, int count) { - unsigned long flags; int result, count1; struct snd_rawmidi_runtime *runtime = substream->runtime; @@ -1081,7 +1079,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream, return -EINVAL; } result = 0; - spin_lock_irqsave(&runtime->lock, flags); if (runtime->avail >= runtime->buffer_size) { /* warning: lowlevel layer MUST trigger down the hardware */ goto __skip; @@ -1106,25 +1103,47 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream, } } __skip: + return result; +} +EXPORT_SYMBOL(__snd_rawmidi_transmit_peek); + +/** + * snd_rawmidi_transmit_peek - copy data from the internal buffer + * @substream: the rawmidi substream + * @buffer: the buffer pointer + * @count: data size to transfer + * + * Copies data from the internal output buffer to the given buffer. + * + * Call this in the interrupt handler when the midi output is ready, + * and call snd_rawmidi_transmit_ack() after the transmission is + * finished. + * + * Return: The size of copied data, or a negative error code on failure. + */ +int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream, + unsigned char *buffer, int count) +{ + struct snd_rawmidi_runtime *runtime = substream->runtime; + int result; + unsigned long flags; + + spin_lock_irqsave(&runtime->lock, flags); + result = __snd_rawmidi_transmit_peek(substream, buffer, count); spin_unlock_irqrestore(&runtime->lock, flags); return result; } EXPORT_SYMBOL(snd_rawmidi_transmit_peek); /** - * snd_rawmidi_transmit_ack - acknowledge the transmission + * __snd_rawmidi_transmit_ack - acknowledge the transmission * @substream: the rawmidi substream * @count: the transferred count * - * Advances the hardware pointer for the internal output buffer with - * the given size and updates the condition. - * Call after the transmission is finished. - * - * Return: The advanced size if successful, or a negative error code on failure. + * This is a variant of __snd_rawmidi_transmit_ack() without spinlock. */ -int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count) +int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count) { - unsigned long flags; struct snd_rawmidi_runtime *runtime = substream->runtime; if (runtime->buffer == NULL) { @@ -1132,7 +1151,6 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count) "snd_rawmidi_transmit_ack: output is not active!!!\n"); return -EINVAL; } - spin_lock_irqsave(&runtime->lock, flags); snd_BUG_ON(runtime->avail + count > runtime->buffer_size); runtime->hw_ptr += count; runtime->hw_ptr %= runtime->buffer_size; @@ -1142,9 +1160,32 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count) if (runtime->drain || snd_rawmidi_ready(substream)) wake_up(&runtime->sleep); } - spin_unlock_irqrestore(&runtime->lock, flags); return count; } +EXPORT_SYMBOL(__snd_rawmidi_transmit_ack); + +/** + * snd_rawmidi_transmit_ack - acknowledge the transmission + * @substream: the rawmidi substream + * @count: the transferred count + * + * Advances the hardware pointer for the internal output buffer with + * the given size and updates the condition. + * Call after the transmission is finished. + * + * Return: The advanced size if successful, or a negative error code on failure. + */ +int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count) +{ + struct snd_rawmidi_runtime *runtime = substream->runtime; + int result; + unsigned long flags; + + spin_lock_irqsave(&runtime->lock, flags); + result = __snd_rawmidi_transmit_ack(substream, count); + spin_unlock_irqrestore(&runtime->lock, flags); + return result; +} EXPORT_SYMBOL(snd_rawmidi_transmit_ack); /** @@ -1160,12 +1201,22 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_ack); int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream, unsigned char *buffer, int count) { + struct snd_rawmidi_runtime *runtime = substream->runtime; + int result; + unsigned long flags; + + spin_lock_irqsave(&runtime->lock, flags); if (!substream->opened) - return -EBADFD; - count = snd_rawmidi_transmit_peek(substream, buffer, count); - if (count < 0) - return count; - return snd_rawmidi_transmit_ack(substream, count); + result = -EBADFD; + else { + count = __snd_rawmidi_transmit_peek(substream, buffer, count); + if (count <= 0) + result = count; + else + result = __snd_rawmidi_transmit_ack(substream, count); + } + spin_unlock_irqrestore(&runtime->lock, flags); + return result; } EXPORT_SYMBOL(snd_rawmidi_transmit); @@ -1177,8 +1228,9 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, unsigned long flags; long count1, result; struct snd_rawmidi_runtime *runtime = substream->runtime; + unsigned long appl_ptr; - if (snd_BUG_ON(!kernelbuf && !userbuf)) + if (!kernelbuf && !userbuf) return -EINVAL; if (snd_BUG_ON(!runtime->buffer)) return -EINVAL; @@ -1197,12 +1249,19 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, count1 = count; if (count1 > (long)runtime->avail) count1 = runtime->avail; + + /* update runtime->appl_ptr before unlocking for userbuf */ + appl_ptr = runtime->appl_ptr; + runtime->appl_ptr += count1; + runtime->appl_ptr %= runtime->buffer_size; + runtime->avail -= count1; + if (kernelbuf) - memcpy(runtime->buffer + runtime->appl_ptr, + memcpy(runtime->buffer + appl_ptr, kernelbuf + result, count1); else if (userbuf) { spin_unlock_irqrestore(&runtime->lock, flags); - if (copy_from_user(runtime->buffer + runtime->appl_ptr, + if (copy_from_user(runtime->buffer + appl_ptr, userbuf + result, count1)) { spin_lock_irqsave(&runtime->lock, flags); result = result > 0 ? result : -EFAULT; @@ -1210,9 +1269,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream, } spin_lock_irqsave(&runtime->lock, flags); } - runtime->appl_ptr += count1; - runtime->appl_ptr %= runtime->buffer_size; - runtime->avail -= count1; result += count1; count -= count1; } diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 13cfa815732d..58e79e02f217 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c @@ -678,6 +678,9 @@ static int deliver_to_subscribers(struct snd_seq_client *client, else down_read(&grp->list_mutex); list_for_each_entry(subs, &grp->list_head, src_list) { + /* both ports ready? */ + if (atomic_read(&subs->ref_count) != 2) + continue; event->dest = subs->info.dest; if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP) /* convert time according to flag with subscription */ diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c index 55170a20ae72..921fb2bd8fad 100644 --- a/sound/core/seq/seq_ports.c +++ b/sound/core/seq/seq_ports.c @@ -173,10 +173,6 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, } /* */ -enum group_type { - SRC_LIST, DEST_LIST -}; - static int subscribe_port(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, @@ -203,6 +199,20 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr, return NULL; } +static void delete_and_unsubscribe_port(struct snd_seq_client *client, + struct snd_seq_client_port *port, + struct snd_seq_subscribers *subs, + bool is_src, bool ack); + +static inline struct snd_seq_subscribers * +get_subscriber(struct list_head *p, bool is_src) +{ + if (is_src) + return list_entry(p, struct snd_seq_subscribers, src_list); + else + return list_entry(p, struct snd_seq_subscribers, dest_list); +} + /* * remove all subscribers on the list * this is called from port_delete, for each src and dest list. @@ -210,7 +220,7 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr, static void clear_subscriber_list(struct snd_seq_client *client, struct snd_seq_client_port *port, struct snd_seq_port_subs_info *grp, - int grptype) + int is_src) { struct list_head *p, *n; @@ -219,15 +229,13 @@ static void clear_subscriber_list(struct snd_seq_client *client, struct snd_seq_client *c; struct snd_seq_client_port *aport; - if (grptype == SRC_LIST) { - subs = list_entry(p, struct snd_seq_subscribers, src_list); + subs = get_subscriber(p, is_src); + if (is_src) aport = get_client_port(&subs->info.dest, &c); - } else { - subs = list_entry(p, struct snd_seq_subscribers, dest_list); + else aport = get_client_port(&subs->info.sender, &c); - } - list_del(p); - unsubscribe_port(client, port, grp, &subs->info, 0); + delete_and_unsubscribe_port(client, port, subs, is_src, false); + if (!aport) { /* looks like the connected port is being deleted. * we decrease the counter, and when both ports are deleted @@ -235,21 +243,14 @@ static void clear_subscriber_list(struct snd_seq_client *client, */ if (atomic_dec_and_test(&subs->ref_count)) kfree(subs); - } else { - /* ok we got the connected port */ - struct snd_seq_port_subs_info *agrp; - agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src; - down_write(&agrp->list_mutex); - if (grptype == SRC_LIST) - list_del(&subs->dest_list); - else - list_del(&subs->src_list); - up_write(&agrp->list_mutex); - unsubscribe_port(c, aport, agrp, &subs->info, 1); - kfree(subs); - snd_seq_port_unlock(aport); - snd_seq_client_unlock(c); + continue; } + + /* ok we got the connected port */ + delete_and_unsubscribe_port(c, aport, subs, !is_src, true); + kfree(subs); + snd_seq_port_unlock(aport); + snd_seq_client_unlock(c); } } @@ -262,8 +263,8 @@ static int port_delete(struct snd_seq_client *client, snd_use_lock_sync(&port->use_lock); /* clear subscribers info */ - clear_subscriber_list(client, port, &port->c_src, SRC_LIST); - clear_subscriber_list(client, port, &port->c_dest, DEST_LIST); + clear_subscriber_list(client, port, &port->c_src, true); + clear_subscriber_list(client, port, &port->c_dest, false); if (port->private_free) port->private_free(port->private_data); @@ -479,85 +480,120 @@ static int match_subs_info(struct snd_seq_port_subscribe *r, return 0; } - -/* connect two ports */ -int snd_seq_port_connect(struct snd_seq_client *connector, - struct snd_seq_client *src_client, - struct snd_seq_client_port *src_port, - struct snd_seq_client *dest_client, - struct snd_seq_client_port *dest_port, - struct snd_seq_port_subscribe *info) +static int check_and_subscribe_port(struct snd_seq_client *client, + struct snd_seq_client_port *port, + struct snd_seq_subscribers *subs, + bool is_src, bool exclusive, bool ack) { - struct snd_seq_port_subs_info *src = &src_port->c_src; - struct snd_seq_port_subs_info *dest = &dest_port->c_dest; - struct snd_seq_subscribers *subs, *s; - int err, src_called = 0; - unsigned long flags; - int exclusive; + struct snd_seq_port_subs_info *grp; + struct list_head *p; + struct snd_seq_subscribers *s; + int err; - subs = kzalloc(sizeof(*subs), GFP_KERNEL); - if (! subs) - return -ENOMEM; - - subs->info = *info; - atomic_set(&subs->ref_count, 2); - - down_write(&src->list_mutex); - down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING); - - exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0; + grp = is_src ? &port->c_src : &port->c_dest; err = -EBUSY; + down_write(&grp->list_mutex); if (exclusive) { - if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head)) + if (!list_empty(&grp->list_head)) goto __error; } else { - if (src->exclusive || dest->exclusive) + if (grp->exclusive) goto __error; /* check whether already exists */ - list_for_each_entry(s, &src->list_head, src_list) { - if (match_subs_info(info, &s->info)) - goto __error; - } - list_for_each_entry(s, &dest->list_head, dest_list) { - if (match_subs_info(info, &s->info)) + list_for_each(p, &grp->list_head) { + s = get_subscriber(p, is_src); + if (match_subs_info(&subs->info, &s->info)) goto __error; } } - if ((err = subscribe_port(src_client, src_port, src, info, - connector->number != src_client->number)) < 0) - goto __error; - src_called = 1; - - if ((err = subscribe_port(dest_client, dest_port, dest, info, - connector->number != dest_client->number)) < 0) + err = subscribe_port(client, port, grp, &subs->info, ack); + if (err < 0) { + grp->exclusive = 0; goto __error; + } /* add to list */ - write_lock_irqsave(&src->list_lock, flags); - // write_lock(&dest->list_lock); // no other lock yet - list_add_tail(&subs->src_list, &src->list_head); - list_add_tail(&subs->dest_list, &dest->list_head); - // write_unlock(&dest->list_lock); // no other lock yet - write_unlock_irqrestore(&src->list_lock, flags); + write_lock_irq(&grp->list_lock); + if (is_src) + list_add_tail(&subs->src_list, &grp->list_head); + else + list_add_tail(&subs->dest_list, &grp->list_head); + grp->exclusive = exclusive; + atomic_inc(&subs->ref_count); + write_unlock_irq(&grp->list_lock); + err = 0; + + __error: + up_write(&grp->list_mutex); + return err; +} - src->exclusive = dest->exclusive = exclusive; +static void delete_and_unsubscribe_port(struct snd_seq_client *client, + struct snd_seq_client_port *port, + struct snd_seq_subscribers *subs, + bool is_src, bool ack) +{ + struct snd_seq_port_subs_info *grp; + + grp = is_src ? &port->c_src : &port->c_dest; + down_write(&grp->list_mutex); + write_lock_irq(&grp->list_lock); + if (is_src) + list_del(&subs->src_list); + else + list_del(&subs->dest_list); + grp->exclusive = 0; + write_unlock_irq(&grp->list_lock); + up_write(&grp->list_mutex); + + unsubscribe_port(client, port, grp, &subs->info, ack); +} + +/* connect two ports */ +int snd_seq_port_connect(struct snd_seq_client *connector, + struct snd_seq_client *src_client, + struct snd_seq_client_port *src_port, + struct snd_seq_client *dest_client, + struct snd_seq_client_port *dest_port, + struct snd_seq_port_subscribe *info) +{ + struct snd_seq_subscribers *subs; + bool exclusive; + int err; + + subs = kzalloc(sizeof(*subs), GFP_KERNEL); + if (!subs) + return -ENOMEM; + + subs->info = *info; + atomic_set(&subs->ref_count, 0); + INIT_LIST_HEAD(&subs->src_list); + INIT_LIST_HEAD(&subs->dest_list); + + exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE); + + err = check_and_subscribe_port(src_client, src_port, subs, true, + exclusive, + connector->number != src_client->number); + if (err < 0) + goto error; + err = check_and_subscribe_port(dest_client, dest_port, subs, false, + exclusive, + connector->number != dest_client->number); + if (err < 0) + goto error_dest; - up_write(&dest->list_mutex); - up_write(&src->list_mutex); return 0; - __error: - if (src_called) - unsubscribe_port(src_client, src_port, src, info, - connector->number != src_client->number); + error_dest: + delete_and_unsubscribe_port(src_client, src_port, subs, true, + connector->number != src_client->number); + error: kfree(subs); - up_write(&dest->list_mutex); - up_write(&src->list_mutex); return err; } - /* remove the connection */ int snd_seq_port_disconnect(struct snd_seq_client *connector, struct snd_seq_client *src_client, @@ -567,37 +603,28 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector, struct snd_seq_port_subscribe *info) { struct snd_seq_port_subs_info *src = &src_port->c_src; - struct snd_seq_port_subs_info *dest = &dest_port->c_dest; struct snd_seq_subscribers *subs; int err = -ENOENT; - unsigned long flags; down_write(&src->list_mutex); - down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING); - /* look for the connection */ list_for_each_entry(subs, &src->list_head, src_list) { if (match_subs_info(info, &subs->info)) { - write_lock_irqsave(&src->list_lock, flags); - // write_lock(&dest->list_lock); // no lock yet - list_del(&subs->src_list); - list_del(&subs->dest_list); - // write_unlock(&dest->list_lock); - write_unlock_irqrestore(&src->list_lock, flags); - src->exclusive = dest->exclusive = 0; - unsubscribe_port(src_client, src_port, src, info, - connector->number != src_client->number); - unsubscribe_port(dest_client, dest_port, dest, info, - connector->number != dest_client->number); - kfree(subs); + atomic_dec(&subs->ref_count); /* mark as not ready */ err = 0; break; } } - - up_write(&dest->list_mutex); up_write(&src->list_mutex); - return err; + if (err < 0) + return err; + + delete_and_unsubscribe_port(src_client, src_port, subs, true, + connector->number != src_client->number); + delete_and_unsubscribe_port(dest_client, dest_port, subs, false, + connector->number != dest_client->number); + kfree(subs); + return 0; } diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c index 82b220c769c1..293104926098 100644 --- a/sound/core/seq/seq_timer.c +++ b/sound/core/seq/seq_timer.c @@ -90,6 +90,9 @@ void snd_seq_timer_delete(struct snd_seq_timer **tmr) void snd_seq_timer_defaults(struct snd_seq_timer * tmr) { + unsigned long flags; + + spin_lock_irqsave(&tmr->lock, flags); /* setup defaults */ tmr->ppq = 96; /* 96 PPQ */ tmr->tempo = 500000; /* 120 BPM */ @@ -105,21 +108,25 @@ void snd_seq_timer_defaults(struct snd_seq_timer * tmr) tmr->preferred_resolution = seq_default_timer_resolution; tmr->skew = tmr->skew_base = SKEW_BASE; + spin_unlock_irqrestore(&tmr->lock, flags); } -void snd_seq_timer_reset(struct snd_seq_timer * tmr) +static void seq_timer_reset(struct snd_seq_timer *tmr) { - unsigned long flags; - - spin_lock_irqsave(&tmr->lock, flags); - /* reset time & songposition */ tmr->cur_time.tv_sec = 0; tmr->cur_time.tv_nsec = 0; tmr->tick.cur_tick = 0; tmr->tick.fraction = 0; +} + +void snd_seq_timer_reset(struct snd_seq_timer *tmr) +{ + unsigned long flags; + spin_lock_irqsave(&tmr->lock, flags); + seq_timer_reset(tmr); spin_unlock_irqrestore(&tmr->lock, flags); } @@ -138,8 +145,11 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri, tmr = q->timer; if (tmr == NULL) return; - if (!tmr->running) + spin_lock_irqsave(&tmr->lock, flags); + if (!tmr->running) { + spin_unlock_irqrestore(&tmr->lock, flags); return; + } resolution *= ticks; if (tmr->skew != tmr->skew_base) { @@ -148,8 +158,6 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri, (((resolution & 0xffff) * tmr->skew) >> 16); } - spin_lock_irqsave(&tmr->lock, flags); - /* update timer */ snd_seq_inc_time_nsec(&tmr->cur_time, resolution); @@ -296,26 +304,30 @@ int snd_seq_timer_open(struct snd_seq_queue *q) t->callback = snd_seq_timer_interrupt; t->callback_data = q; t->flags |= SNDRV_TIMER_IFLG_AUTO; + spin_lock_irq(&tmr->lock); tmr->timeri = t; + spin_unlock_irq(&tmr->lock); return 0; } int snd_seq_timer_close(struct snd_seq_queue *q) { struct snd_seq_timer *tmr; + struct snd_timer_instance *t; tmr = q->timer; if (snd_BUG_ON(!tmr)) return -EINVAL; - if (tmr->timeri) { - snd_timer_stop(tmr->timeri); - snd_timer_close(tmr->timeri); - tmr->timeri = NULL; - } + spin_lock_irq(&tmr->lock); + t = tmr->timeri; + tmr->timeri = NULL; + spin_unlock_irq(&tmr->lock); + if (t) + snd_timer_close(t); return 0; } -int snd_seq_timer_stop(struct snd_seq_timer * tmr) +static int seq_timer_stop(struct snd_seq_timer *tmr) { if (! tmr->timeri) return -EINVAL; @@ -326,6 +338,17 @@ int snd_seq_timer_stop(struct snd_seq_timer * tmr) return 0; } +int snd_seq_timer_stop(struct snd_seq_timer *tmr) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&tmr->lock, flags); + err = seq_timer_stop(tmr); + spin_unlock_irqrestore(&tmr->lock, flags); + return err; +} + static int initialize_timer(struct snd_seq_timer *tmr) { struct snd_timer *t; @@ -358,13 +381,13 @@ static int initialize_timer(struct snd_seq_timer *tmr) return 0; } -int snd_seq_timer_start(struct snd_seq_timer * tmr) +static int seq_timer_start(struct snd_seq_timer *tmr) { if (! tmr->timeri) return -EINVAL; if (tmr->running) - snd_seq_timer_stop(tmr); - snd_seq_timer_reset(tmr); + seq_timer_stop(tmr); + seq_timer_reset(tmr); if (initialize_timer(tmr) < 0) return -EINVAL; snd_timer_start(tmr->timeri, tmr->ticks); @@ -373,14 +396,25 @@ int snd_seq_timer_start(struct snd_seq_timer * tmr) return 0; } -int snd_seq_timer_continue(struct snd_seq_timer * tmr) +int snd_seq_timer_start(struct snd_seq_timer *tmr) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&tmr->lock, flags); + err = seq_timer_start(tmr); + spin_unlock_irqrestore(&tmr->lock, flags); + return err; +} + +static int seq_timer_continue(struct snd_seq_timer *tmr) { if (! tmr->timeri) return -EINVAL; if (tmr->running) return -EBUSY; if (! tmr->initialized) { - snd_seq_timer_reset(tmr); + seq_timer_reset(tmr); if (initialize_timer(tmr) < 0) return -EINVAL; } @@ -390,11 +424,24 @@ int snd_seq_timer_continue(struct snd_seq_timer * tmr) return 0; } +int snd_seq_timer_continue(struct snd_seq_timer *tmr) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&tmr->lock, flags); + err = seq_timer_continue(tmr); + spin_unlock_irqrestore(&tmr->lock, flags); + return err; +} + /* return current 'real' time. use timeofday() to get better granularity. */ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr) { snd_seq_real_time_t cur_time; + unsigned long flags; + spin_lock_irqsave(&tmr->lock, flags); cur_time = tmr->cur_time; if (tmr->running) { struct timeval tm; @@ -410,7 +457,7 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr) } snd_seq_sanity_real_time(&cur_time); } - + spin_unlock_irqrestore(&tmr->lock, flags); return cur_time; } diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c index 3da2d48610b3..c82ed3e70506 100644 --- a/sound/core/seq/seq_virmidi.c +++ b/sound/core/seq/seq_virmidi.c @@ -155,21 +155,26 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream, struct snd_virmidi *vmidi = substream->runtime->private_data; int count, res; unsigned char buf[32], *pbuf; + unsigned long flags; if (up) { vmidi->trigger = 1; if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH && !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) { - snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail); - return; /* ignored */ + while (snd_rawmidi_transmit(substream, buf, + sizeof(buf)) > 0) { + /* ignored */ + } + return; } if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) { if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0) return; vmidi->event.type = SNDRV_SEQ_EVENT_NONE; } + spin_lock_irqsave(&substream->runtime->lock, flags); while (1) { - count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf)); + count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf)); if (count <= 0) break; pbuf = buf; @@ -179,16 +184,18 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream, snd_midi_event_reset_encode(vmidi->parser); continue; } - snd_rawmidi_transmit_ack(substream, res); + __snd_rawmidi_transmit_ack(substream, res); pbuf += res; count -= res; if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) { if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0) - return; + goto out; vmidi->event.type = SNDRV_SEQ_EVENT_NONE; } } } + out: + spin_unlock_irqrestore(&substream->runtime->lock, flags); } else { vmidi->trigger = 0; } @@ -254,9 +261,13 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream) */ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream) { + struct snd_virmidi_dev *rdev = substream->rmidi->private_data; struct snd_virmidi *vmidi = substream->runtime->private_data; - snd_midi_event_free(vmidi->parser); + + write_lock_irq(&rdev->filelist_lock); list_del(&vmidi->list); + write_unlock_irq(&rdev->filelist_lock); + snd_midi_event_free(vmidi->parser); substream->runtime->private_data = NULL; kfree(vmidi); return 0; diff --git a/sound/core/timer.c b/sound/core/timer.c index af1f68f7e315..dca817fc7894 100644 --- a/sound/core/timer.c +++ b/sound/core/timer.c @@ -422,7 +422,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event) spin_lock_irqsave(&timer->lock, flags); list_for_each_entry(ts, &ti->slave_active_head, active_list) if (ts->ccallback) - ts->ccallback(ti, event + 100, &tstamp, resolution); + ts->ccallback(ts, event + 100, &tstamp, resolution); spin_unlock_irqrestore(&timer->lock, flags); } @@ -451,6 +451,10 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri) unsigned long flags; spin_lock_irqsave(&slave_active_lock, flags); + if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) { + spin_unlock_irqrestore(&slave_active_lock, flags); + return -EBUSY; + } timeri->flags |= SNDRV_TIMER_IFLG_RUNNING; if (timeri->master && timeri->timer) { spin_lock(&timeri->timer->lock); @@ -475,7 +479,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) return -EINVAL; if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { result = snd_timer_start_slave(timeri); - snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); + if (result >= 0) + snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } timer = timeri->timer; @@ -484,11 +489,18 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks) if (timer->card && timer->card->shutdown) return -ENODEV; spin_lock_irqsave(&timer->lock, flags); + if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | + SNDRV_TIMER_IFLG_START)) { + result = -EBUSY; + goto unlock; + } timeri->ticks = timeri->cticks = ticks; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, ticks); + unlock: spin_unlock_irqrestore(&timer->lock, flags); - snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); + if (result >= 0) + snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START); return result; } @@ -502,9 +514,17 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event) if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) { spin_lock_irqsave(&slave_active_lock, flags); + if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) { + spin_unlock_irqrestore(&slave_active_lock, flags); + return -EBUSY; + } + if (timeri->timer) + spin_lock(&timeri->timer->lock); timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING; list_del_init(&timeri->ack_list); list_del_init(&timeri->active_list); + if (timeri->timer) + spin_unlock(&timeri->timer->lock); spin_unlock_irqrestore(&slave_active_lock, flags); goto __end; } @@ -512,6 +532,11 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event) if (!timer) return -EINVAL; spin_lock_irqsave(&timer->lock, flags); + if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING | + SNDRV_TIMER_IFLG_START))) { + spin_unlock_irqrestore(&timer->lock, flags); + return -EBUSY; + } list_del_init(&timeri->ack_list); list_del_init(&timeri->active_list); if (timer->card && timer->card->shutdown) { @@ -581,10 +606,15 @@ int snd_timer_continue(struct snd_timer_instance *timeri) if (timer->card && timer->card->shutdown) return -ENODEV; spin_lock_irqsave(&timer->lock, flags); + if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) { + result = -EBUSY; + goto unlock; + } if (!timeri->cticks) timeri->cticks = 1; timeri->pticks = 0; result = snd_timer_start1(timer, timeri, timer->sticks); + unlock: spin_unlock_irqrestore(&timer->lock, flags); snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE); return result; @@ -718,8 +748,8 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left) ti->cticks = ti->ticks; } else { ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING; - if (--timer->running) - list_del_init(&ti->active_list); + --timer->running; + list_del_init(&ti->active_list); } if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) || (ti->flags & SNDRV_TIMER_IFLG_FAST)) @@ -1032,11 +1062,21 @@ static int snd_timer_s_stop(struct snd_timer * timer) return 0; } +static int snd_timer_s_close(struct snd_timer *timer) +{ + struct snd_timer_system_private *priv; + + priv = (struct snd_timer_system_private *)timer->private_data; + del_timer_sync(&priv->tlist); + return 0; +} + static struct snd_timer_hardware snd_timer_system = { .flags = SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET, .resolution = 1000000000L / HZ, .ticks = 10000000L, + .close = snd_timer_s_close, .start = snd_timer_s_start, .stop = snd_timer_s_stop }; @@ -1893,6 +1933,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, { struct snd_timer_user *tu; long result = 0, unit; + int qhead; int err = 0; tu = file->private_data; @@ -1904,7 +1945,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) { err = -EAGAIN; - break; + goto _error; } set_current_state(TASK_INTERRUPTIBLE); @@ -1919,42 +1960,37 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer, if (tu->disconnected) { err = -ENODEV; - break; + goto _error; } if (signal_pending(current)) { err = -ERESTARTSYS; - break; + goto _error; } } + qhead = tu->qhead++; + tu->qhead %= tu->queue_size; spin_unlock_irq(&tu->qlock); - if (err < 0) - goto _error; if (tu->tread) { - if (copy_to_user(buffer, &tu->tqueue[tu->qhead++], - sizeof(struct snd_timer_tread))) { + if (copy_to_user(buffer, &tu->tqueue[qhead], + sizeof(struct snd_timer_tread))) err = -EFAULT; - goto _error; - } } else { - if (copy_to_user(buffer, &tu->queue[tu->qhead++], - sizeof(struct snd_timer_read))) { + if (copy_to_user(buffer, &tu->queue[qhead], + sizeof(struct snd_timer_read))) err = -EFAULT; - goto _error; - } } - tu->qhead %= tu->queue_size; - - result += unit; - buffer += unit; - spin_lock_irq(&tu->qlock); tu->qused--; + if (err < 0) + goto _error; + result += unit; + buffer += unit; } - spin_unlock_irq(&tu->qlock); _error: + spin_unlock_irq(&tu->qlock); return result > 0 ? result : err; } diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c index bde33308f0d6..c0f8f613f1f1 100644 --- a/sound/drivers/dummy.c +++ b/sound/drivers/dummy.c @@ -87,7 +87,7 @@ MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver."); module_param(fake_buffer, bool, 0444); MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations."); #ifdef CONFIG_HIGH_RES_TIMERS -module_param(hrtimer, bool, 0444); +module_param(hrtimer, bool, 0644); MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source."); #endif @@ -109,6 +109,9 @@ struct dummy_timer_ops { snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *); }; +#define get_dummy_ops(substream) \ + (*(const struct dummy_timer_ops **)(substream)->runtime->private_data) + struct dummy_model { const char *name; int (*playback_constraints)(struct snd_pcm_runtime *runtime); @@ -137,7 +140,6 @@ struct snd_dummy { int iobox; struct snd_kcontrol *cd_volume_ctl; struct snd_kcontrol *cd_switch_ctl; - const struct dummy_timer_ops *timer_ops; }; /* @@ -231,6 +233,8 @@ static struct dummy_model *dummy_models[] = { */ struct dummy_systimer_pcm { + /* ops must be the first item */ + const struct dummy_timer_ops *timer_ops; spinlock_t lock; struct timer_list timer; unsigned long base_time; @@ -366,6 +370,8 @@ static const struct dummy_timer_ops dummy_systimer_ops = { */ struct dummy_hrtimer_pcm { + /* ops must be the first item */ + const struct dummy_timer_ops *timer_ops; ktime_t base_time; ktime_t period_time; atomic_t running; @@ -492,31 +498,25 @@ static const struct dummy_timer_ops dummy_hrtimer_ops = { static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { - struct snd_dummy *dummy = snd_pcm_substream_chip(substream); - switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: - return dummy->timer_ops->start(substream); + return get_dummy_ops(substream)->start(substream); case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: - return dummy->timer_ops->stop(substream); + return get_dummy_ops(substream)->stop(substream); } return -EINVAL; } static int dummy_pcm_prepare(struct snd_pcm_substream *substream) { - struct snd_dummy *dummy = snd_pcm_substream_chip(substream); - - return dummy->timer_ops->prepare(substream); + return get_dummy_ops(substream)->prepare(substream); } static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream) { - struct snd_dummy *dummy = snd_pcm_substream_chip(substream); - - return dummy->timer_ops->pointer(substream); + return get_dummy_ops(substream)->pointer(substream); } static struct snd_pcm_hardware dummy_pcm_hardware = { @@ -562,17 +562,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream) struct snd_dummy *dummy = snd_pcm_substream_chip(substream); struct dummy_model *model = dummy->model; struct snd_pcm_runtime *runtime = substream->runtime; + const struct dummy_timer_ops *ops; int err; - dummy->timer_ops = &dummy_systimer_ops; + ops = &dummy_systimer_ops; #ifdef CONFIG_HIGH_RES_TIMERS if (hrtimer) - dummy->timer_ops = &dummy_hrtimer_ops; + ops = &dummy_hrtimer_ops; #endif - err = dummy->timer_ops->create(substream); + err = ops->create(substream); if (err < 0) return err; + get_dummy_ops(substream) = ops; runtime->hw = dummy->pcm_hw; if (substream->pcm->device & 1) { @@ -594,7 +596,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream) err = model->capture_constraints(substream->runtime); } if (err < 0) { - dummy->timer_ops->free(substream); + get_dummy_ops(substream)->free(substream); return err; } return 0; @@ -602,8 +604,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream) static int dummy_pcm_close(struct snd_pcm_substream *substream) { - struct snd_dummy *dummy = snd_pcm_substream_chip(substream); - dummy->timer_ops->free(substream); + get_dummy_ops(substream)->free(substream); return 0; } diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c index b02a5e8cad44..0ac92aba5bc1 100644 --- a/sound/firewire/digi00x/amdtp-dot.c +++ b/sound/firewire/digi00x/amdtp-dot.c @@ -63,7 +63,7 @@ struct amdtp_dot { #define BYTE_PER_SAMPLE (4) #define MAGIC_DOT_BYTE (2) #define MAGIC_BYTE_OFF(x) (((x) * BYTE_PER_SAMPLE) + MAGIC_DOT_BYTE) -static const u8 dot_scrt(const u8 idx, const unsigned int off) +static u8 dot_scrt(const u8 idx, const unsigned int off) { /* * the length of the added pattern only depends on the lower nibble diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c index 904ce0329fa1..040a96d1ba8e 100644 --- a/sound/firewire/tascam/tascam-transaction.c +++ b/sound/firewire/tascam/tascam-transaction.c @@ -230,6 +230,7 @@ int snd_tscm_transaction_register(struct snd_tscm *tscm) return err; error: fw_core_remove_address_handler(&tscm->async_handler); + tscm->async_handler.callback_data = NULL; return err; } @@ -276,6 +277,9 @@ void snd_tscm_transaction_unregister(struct snd_tscm *tscm) __be32 reg; unsigned int i; + if (tscm->async_handler.callback_data == NULL) + return; + /* Turn off FireWire LED. */ reg = cpu_to_be32(0x0000008e); snd_fw_transaction(tscm->unit, TCODE_WRITE_QUADLET_REQUEST, @@ -297,6 +301,8 @@ void snd_tscm_transaction_unregister(struct snd_tscm *tscm) ®, sizeof(reg), 0); fw_core_remove_address_handler(&tscm->async_handler); + tscm->async_handler.callback_data = NULL; + for (i = 0; i < TSCM_MIDI_OUT_PORT_MAX; i++) snd_fw_async_midi_port_destroy(&tscm->out_ports[i]); } diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c index ee0bc1839508..e281c338e562 100644 --- a/sound/firewire/tascam/tascam.c +++ b/sound/firewire/tascam/tascam.c @@ -21,7 +21,6 @@ static struct snd_tscm_spec model_specs[] = { .pcm_playback_analog_channels = 8, .midi_capture_ports = 4, .midi_playback_ports = 4, - .is_controller = true, }, { .name = "FW-1082", @@ -31,9 +30,16 @@ static struct snd_tscm_spec model_specs[] = { .pcm_playback_analog_channels = 2, .midi_capture_ports = 2, .midi_playback_ports = 2, - .is_controller = true, }, - /* FW-1804 may be supported. */ + { + .name = "FW-1804", + .has_adat = true, + .has_spdif = true, + .pcm_capture_analog_channels = 8, + .pcm_playback_analog_channels = 2, + .midi_capture_ports = 2, + .midi_playback_ports = 4, + }, }; static int identify_model(struct snd_tscm *tscm) diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h index 2d028d2bd3bd..30ab77e924f7 100644 --- a/sound/firewire/tascam/tascam.h +++ b/sound/firewire/tascam/tascam.h @@ -39,7 +39,6 @@ struct snd_tscm_spec { unsigned int pcm_playback_analog_channels; unsigned int midi_capture_ports; unsigned int midi_playback_ports; - bool is_controller; }; #define TSCM_MIDI_IN_PORT_MAX 4 @@ -72,9 +71,6 @@ struct snd_tscm { struct snd_fw_async_midi_port out_ports[TSCM_MIDI_OUT_PORT_MAX]; u8 running_status[TSCM_MIDI_OUT_PORT_MAX]; bool on_sysex[TSCM_MIDI_OUT_PORT_MAX]; - - /* For control messages. */ - struct snd_firewire_tascam_status *status; }; #define TSCM_ADDR_BASE 0xffff00000000ull diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c index 28e2f8b42f5e..891453451543 100644 --- a/sound/pci/emu10k1/emu10k1_main.c +++ b/sound/pci/emu10k1/emu10k1_main.c @@ -1141,6 +1141,14 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu) emu->emu1010.firmware_thread = kthread_create(emu1010_firmware_thread, emu, "emu1010_firmware"); + if (IS_ERR(emu->emu1010.firmware_thread)) { + err = PTR_ERR(emu->emu1010.firmware_thread); + emu->emu1010.firmware_thread = NULL; + dev_info(emu->card->dev, + "emu1010: Creating thread failed\n"); + return err; + } + wake_up_process(emu->emu1010.firmware_thread); } diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 30c8efe0f80a..7ca5b89f088a 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -4028,9 +4028,9 @@ static void pin_power_callback(struct hda_codec *codec, struct hda_jack_callback *jack, bool on) { - if (jack && jack->tbl->nid) + if (jack && jack->nid) sync_power_state_change(codec, - set_pin_power_jack(codec, jack->tbl->nid, on)); + set_pin_power_jack(codec, jack->nid, on)); } /* callback only doing power up -- called at first */ diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c index c945e257d368..a33234e04d4f 100644 --- a/sound/pci/hda/hda_jack.c +++ b/sound/pci/hda/hda_jack.c @@ -259,7 +259,7 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid, if (!callback) return ERR_PTR(-ENOMEM); callback->func = func; - callback->tbl = jack; + callback->nid = jack->nid; callback->next = jack->callback; jack->callback = callback; } diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h index 858708a044f5..e9814c0168ea 100644 --- a/sound/pci/hda/hda_jack.h +++ b/sound/pci/hda/hda_jack.h @@ -21,7 +21,7 @@ struct hda_jack_callback; typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *); struct hda_jack_callback { - struct hda_jack_tbl *tbl; + hda_nid_t nid; hda_jack_callback_fn func; unsigned int private_data; /* arbitrary data */ struct hda_jack_callback *next; diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 4ef2259f88ca..9ceb2bc36e68 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -4427,13 +4427,16 @@ static void ca0132_process_dsp_response(struct hda_codec *codec, static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb) { struct ca0132_spec *spec = codec->spec; + struct hda_jack_tbl *tbl; /* Delay enabling the HP amp, to let the mic-detection * state machine run. */ cancel_delayed_work_sync(&spec->unsol_hp_work); schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500)); - cb->tbl->block_report = 1; + tbl = snd_hda_jack_tbl_get(codec, cb->nid); + if (tbl) + tbl->block_report = 1; } static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb) diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index a12ae8ac0914..c1c855a6c0af 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c @@ -614,6 +614,7 @@ enum { CS4208_MAC_AUTO, CS4208_MBA6, CS4208_MBP11, + CS4208_MACMINI, CS4208_GPIO0, }; @@ -621,6 +622,7 @@ static const struct hda_model_fixup cs4208_models[] = { { .id = CS4208_GPIO0, .name = "gpio0" }, { .id = CS4208_MBA6, .name = "mba6" }, { .id = CS4208_MBP11, .name = "mbp11" }, + { .id = CS4208_MACMINI, .name = "macmini" }, {} }; @@ -632,6 +634,7 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = { /* codec SSID matching */ static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11), + SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI), SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11), @@ -666,6 +669,24 @@ static void cs4208_fixup_mac(struct hda_codec *codec, snd_hda_apply_fixup(codec, action); } +/* MacMini 7,1 has the inverted jack detection */ +static void cs4208_fixup_macmini(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + static const struct hda_pintbl pincfgs[] = { + { 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */ + { 0x21, 0x004be140 }, /* SPDIF: disable detect */ + { } + }; + + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + /* HP pin (0x10) has an inverted detection */ + codec->inv_jack_detect = 1; + /* disable the bogus Mic and SPDIF jack detections */ + snd_hda_apply_pincfgs(codec, pincfgs); + } +} + static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { @@ -709,6 +730,12 @@ static const struct hda_fixup cs4208_fixups[] = { .chained = true, .chain_id = CS4208_GPIO0, }, + [CS4208_MACMINI] = { + .type = HDA_FIXUP_FUNC, + .v.func = cs4208_fixup_macmini, + .chained = true, + .chain_id = CS4208_GPIO0, + }, [CS4208_GPIO0] = { .type = HDA_FIXUP_FUNC, .v.func = cs4208_fixup_gpio0, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 1f52b55d77c9..8ee78dbd4c60 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -448,7 +448,8 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol, eld = &per_pin->sink_eld; mutex_lock(&per_pin->lock); - if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) { + if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) || + eld->eld_size > ELD_MAX_SIZE) { mutex_unlock(&per_pin->lock); snd_BUG(); return -EINVAL; @@ -1193,7 +1194,7 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid) static void jack_callback(struct hda_codec *codec, struct hda_jack_callback *jack) { - check_presence_and_report(codec, jack->tbl->nid); + check_presence_and_report(codec, jack->nid); } static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 33753244f48f..efd4980cffb8 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -282,7 +282,7 @@ static void alc_update_knob_master(struct hda_codec *codec, uctl = kzalloc(sizeof(*uctl), GFP_KERNEL); if (!uctl) return; - val = snd_hda_codec_read(codec, jack->tbl->nid, 0, + val = snd_hda_codec_read(codec, jack->nid, 0, AC_VERB_GET_VOLUME_KNOB_CONTROL, 0); val &= HDA_AMP_VOLMASK; uctl->value.integer.value[0] = val; @@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0292: alc_update_coef_idx(codec, 0x4, 1<<15, 0); break; + case 0x10ec0225: case 0x10ec0233: case 0x10ec0255: case 0x10ec0256: @@ -900,6 +901,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = { { 0x10ec0899, 0x1028, 0, "ALC3861" }, { 0x10ec0298, 0x1028, 0, "ALC3266" }, { 0x10ec0256, 0x1028, 0, "ALC3246" }, + { 0x10ec0225, 0x1028, 0, "ALC3253" }, { 0x10ec0670, 0x1025, 0, "ALC669X" }, { 0x10ec0676, 0x1025, 0, "ALC679X" }, { 0x10ec0282, 0x1043, 0, "ALC3229" }, @@ -1785,7 +1787,6 @@ enum { ALC882_FIXUP_NO_PRIMARY_HP, ALC887_FIXUP_ASUS_BASS, ALC887_FIXUP_BASS_CHMAP, - ALC882_FIXUP_DISABLE_AAMIX, }; static void alc889_fixup_coef(struct hda_codec *codec, @@ -1947,8 +1948,6 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec, static void alc_fixup_bass_chmap(struct hda_codec *codec, const struct hda_fixup *fix, int action); -static void alc_fixup_disable_aamix(struct hda_codec *codec, - const struct hda_fixup *fix, int action); static const struct hda_fixup alc882_fixups[] = { [ALC882_FIXUP_ABIT_AW9D_MAX] = { @@ -2186,10 +2185,6 @@ static const struct hda_fixup alc882_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_bass_chmap, }, - [ALC882_FIXUP_DISABLE_AAMIX] = { - .type = HDA_FIXUP_FUNC, - .v.func = alc_fixup_disable_aamix, - }, }; static const struct snd_pci_quirk alc882_fixup_tbl[] = { @@ -2228,6 +2223,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP), /* All Apple entries are in codec SSIDs */ SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF), @@ -2257,7 +2253,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), - SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), @@ -2651,6 +2646,7 @@ enum { ALC269_TYPE_ALC298, ALC269_TYPE_ALC255, ALC269_TYPE_ALC256, + ALC269_TYPE_ALC225, }; /* @@ -2680,6 +2676,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec) case ALC269_TYPE_ALC298: case ALC269_TYPE_ALC255: case ALC269_TYPE_ALC256: + case ALC269_TYPE_ALC225: ssids = alc269_ssids; break; default: @@ -3658,6 +3655,16 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) WRITE_COEF(0xb7, 0x802b), {} }; + static struct coef_fw coef0225[] = { + UPDATE_COEF(0x4a, 1<<8, 0), + UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), + UPDATE_COEF(0x63, 3<<14, 3<<14), + UPDATE_COEF(0x4a, 3<<4, 2<<4), + UPDATE_COEF(0x4a, 3<<10, 3<<10), + UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10), + UPDATE_COEF(0x4a, 3<<10, 0), + {} + }; switch (codec->core.vendor_id) { case 0x10ec0255: @@ -3682,6 +3689,9 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) case 0x10ec0668: alc_process_coef_fw(codec, coef0668); break; + case 0x10ec0225: + alc_process_coef_fw(codec, coef0225); + break; } codec_dbg(codec, "Headset jack set to unplugged mode.\n"); } @@ -3727,6 +3737,13 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, UPDATE_COEF(0xc3, 0, 1<<12), {} }; + static struct coef_fw coef0225[] = { + UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), + UPDATE_COEF(0x4a, 3<<4, 2<<4), + UPDATE_COEF(0x63, 3<<14, 0), + {} + }; + switch (codec->core.vendor_id) { case 0x10ec0255: @@ -3772,6 +3789,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin, alc_process_coef_fw(codec, coef0688); snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); break; + case 0x10ec0225: + alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10); + snd_hda_set_pin_ctl_cache(codec, hp_pin, 0); + alc_process_coef_fw(codec, coef0225); + snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50); + break; } codec_dbg(codec, "Headset jack set to mic-in mode.\n"); } @@ -3884,6 +3907,13 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) WRITE_COEF(0xc3, 0x0000), {} }; + static struct coef_fw coef0225[] = { + UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10), + UPDATE_COEF(0x49, 1<<8, 1<<8), + UPDATE_COEF(0x4a, 7<<6, 7<<6), + UPDATE_COEF(0x4a, 3<<4, 3<<4), + {} + }; switch (codec->core.vendor_id) { case 0x10ec0255: @@ -3912,6 +3942,9 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) case 0x10ec0668: alc_process_coef_fw(codec, coef0688); break; + case 0x10ec0225: + alc_process_coef_fw(codec, coef0225); + break; } codec_dbg(codec, "Headset jack set to iPhone-style headset mode.\n"); } @@ -3955,6 +3988,13 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) WRITE_COEF(0xc3, 0x0000), {} }; + static struct coef_fw coef0225[] = { + UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10), + UPDATE_COEF(0x49, 1<<8, 1<<8), + UPDATE_COEF(0x4a, 7<<6, 7<<6), + UPDATE_COEF(0x4a, 3<<4, 3<<4), + {} + }; switch (codec->core.vendor_id) { case 0x10ec0255: @@ -3983,6 +4023,9 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) case 0x10ec0668: alc_process_coef_fw(codec, coef0688); break; + case 0x10ec0225: + alc_process_coef_fw(codec, coef0225); + break; } codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n"); } @@ -4014,6 +4057,11 @@ static void alc_determine_headset_type(struct hda_codec *codec) WRITE_COEF(0xc3, 0x0c00), {} }; + static struct coef_fw coef0225[] = { + UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10), + UPDATE_COEF(0x49, 1<<8, 1<<8), + {} + }; switch (codec->core.vendor_id) { case 0x10ec0255: @@ -4058,6 +4106,12 @@ static void alc_determine_headset_type(struct hda_codec *codec) val = alc_read_coef_idx(codec, 0xbe); is_ctia = (val & 0x1c02) == 0x1c02; break; + case 0x10ec0225: + alc_process_coef_fw(codec, coef0225); + msleep(800); + val = alc_read_coef_idx(codec, 0x46); + is_ctia = (val & 0x00f0) == 0x00f0; + break; } codec_dbg(codec, "Headset jack detected iPhone-style headset: %s\n", @@ -5560,6 +5614,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {.id = ALC292_FIXUP_TPT440, .name = "tpt440"}, {} }; +#define ALC225_STANDARD_PINS \ + {0x12, 0xb7a60130}, \ + {0x21, 0x04211020} #define ALC256_STANDARD_PINS \ {0x12, 0x90a60140}, \ @@ -5581,6 +5638,12 @@ static const struct hda_model_fixup alc269_fixup_models[] = { {0x21, 0x03211020} static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC225_STANDARD_PINS, + {0x14, 0x901701a0}), + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, + ALC225_STANDARD_PINS, + {0x14, 0x901701b0}), SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE, {0x14, 0x90170110}, {0x21, 0x02211020}), @@ -5906,6 +5969,9 @@ static int patch_alc269(struct hda_codec *codec) spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/ break; + case 0x10ec0225: + spec->codec_variant = ALC269_TYPE_ALC225; + break; } if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) { @@ -6796,6 +6862,7 @@ static int patch_alc680(struct hda_codec *codec) */ static const struct hda_device_id snd_hda_id_realtek[] = { HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269), + HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269), HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269), HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269), HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269), diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 2c7c5eb8b1e9..37b70f8e878f 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -493,9 +493,9 @@ static void jack_update_power(struct hda_codec *codec, if (!spec->num_pwrs) return; - if (jack && jack->tbl->nid) { - stac_toggle_power_map(codec, jack->tbl->nid, - snd_hda_jack_detect(codec, jack->tbl->nid), + if (jack && jack->nid) { + stac_toggle_power_map(codec, jack->nid, + snd_hda_jack_detect(codec, jack->nid), true); return; } diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c index 3191e0a7d273..d1fb035f44db 100644 --- a/sound/soc/amd/acp-pcm-dma.c +++ b/sound/soc/amd/acp-pcm-dma.c @@ -635,6 +635,7 @@ static int acp_dma_open(struct snd_pcm_substream *substream) SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) { dev_err(prtd->platform->dev, "set integer constraint failed\n"); + kfree(adata); return ret; } diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c index 33143fe1de0b..91785318b283 100644 --- a/sound/soc/codecs/arizona.c +++ b/sound/soc/codecs/arizona.c @@ -1929,6 +1929,25 @@ static struct { { 1000000, 13500000, 0, 1 }, }; +static const unsigned int pseudo_fref_max[ARIZONA_FLL_MAX_FRATIO] = { + 13500000, + 6144000, + 6144000, + 3072000, + 3072000, + 2822400, + 2822400, + 1536000, + 1536000, + 1536000, + 1536000, + 1536000, + 1536000, + 1536000, + 1536000, + 768000, +}; + static struct { unsigned int min; unsigned int max; @@ -2042,16 +2061,32 @@ static int arizona_calc_fratio(struct arizona_fll *fll, /* Adjust FRATIO/refdiv to avoid integer mode if possible */ refdiv = cfg->refdiv; + arizona_fll_dbg(fll, "pseudo: initial ratio=%u fref=%u refdiv=%u\n", + init_ratio, Fref, refdiv); + while (div <= ARIZONA_FLL_MAX_REFDIV) { for (ratio = init_ratio; ratio <= ARIZONA_FLL_MAX_FRATIO; ratio++) { if ((ARIZONA_FLL_VCO_CORNER / 2) / - (fll->vco_mult * ratio) < Fref) + (fll->vco_mult * ratio) < Fref) { + arizona_fll_dbg(fll, "pseudo: hit VCO corner\n"); break; + } + + if (Fref > pseudo_fref_max[ratio - 1]) { + arizona_fll_dbg(fll, + "pseudo: exceeded max fref(%u) for ratio=%u\n", + pseudo_fref_max[ratio - 1], + ratio); + break; + } if (target % (ratio * Fref)) { cfg->refdiv = refdiv; cfg->fratio = ratio - 1; + arizona_fll_dbg(fll, + "pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n", + Fref, refdiv, div, ratio); return ratio; } } @@ -2060,6 +2095,9 @@ static int arizona_calc_fratio(struct arizona_fll *fll, if (target % (ratio * Fref)) { cfg->refdiv = refdiv; cfg->fratio = ratio - 1; + arizona_fll_dbg(fll, + "pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n", + Fref, refdiv, div, ratio); return ratio; } } @@ -2068,6 +2106,9 @@ static int arizona_calc_fratio(struct arizona_fll *fll, Fref /= 2; refdiv++; init_ratio = arizona_find_fratio(Fref, NULL); + arizona_fll_dbg(fll, + "pseudo: change fref=%u refdiv=%d(%d) ratio=%u\n", + Fref, refdiv, div, init_ratio); } arizona_fll_warn(fll, "Falling back to integer mode operation\n"); diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c index bc08f0c5a5f6..1bd31644a782 100644 --- a/sound/soc/codecs/rt286.c +++ b/sound/soc/codecs/rt286.c @@ -266,6 +266,8 @@ static int rt286_jack_detect(struct rt286_priv *rt286, bool *hp, bool *mic) } else { *mic = false; regmap_write(rt286->regmap, RT286_SET_MIC1, 0x20); + regmap_update_bits(rt286->regmap, + RT286_CBJ_CTRL1, 0x0400, 0x0000); } } else { regmap_read(rt286->regmap, RT286_GET_HP_SENSE, &buf); @@ -470,24 +472,6 @@ static int rt286_set_dmic1_event(struct snd_soc_dapm_widget *w, return 0; } -static int rt286_vref_event(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - snd_soc_update_bits(codec, - RT286_CBJ_CTRL1, 0x0400, 0x0000); - mdelay(50); - break; - default: - return 0; - } - - return 0; -} - static int rt286_ldo2_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -536,7 +520,7 @@ static const struct snd_soc_dapm_widget rt286_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY_S("HV", 1, RT286_POWER_CTRL1, 12, 1, NULL, 0), SND_SOC_DAPM_SUPPLY("VREF", RT286_POWER_CTRL1, - 0, 1, rt286_vref_event, SND_SOC_DAPM_PRE_PMU), + 0, 1, NULL, 0), SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT286_POWER_CTRL2, 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY_S("LDO2", 2, RT286_POWER_CTRL1, @@ -911,8 +895,6 @@ static int rt286_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_ON: mdelay(10); snd_soc_update_bits(codec, - RT286_CBJ_CTRL1, 0x0400, 0x0400); - snd_soc_update_bits(codec, RT286_DC_GAIN, 0x200, 0x0); break; @@ -920,8 +902,6 @@ static int rt286_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_STANDBY: snd_soc_write(codec, RT286_SET_AUDIO_POWER, AC_PWRST_D3); - snd_soc_update_bits(codec, - RT286_CBJ_CTRL1, 0x0400, 0x0000); break; default: diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c index c61d38b585fb..93e8c9017633 100644 --- a/sound/soc/codecs/rt5645.c +++ b/sound/soc/codecs/rt5645.c @@ -776,7 +776,7 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = { /* IN1/IN2 Control */ SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1, - RT5645_BST_SFT1, 8, 0, bst_tlv), + RT5645_BST_SFT1, 12, 0, bst_tlv), SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL, RT5645_BST_SFT2, 8, 0, bst_tlv), diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c index 820d8fa62b5e..fb8ea05c0de1 100644 --- a/sound/soc/codecs/rt5659.c +++ b/sound/soc/codecs/rt5659.c @@ -3985,7 +3985,6 @@ static int rt5659_i2c_probe(struct i2c_client *i2c, if (rt5659 == NULL) return -ENOMEM; - rt5659->i2c = i2c; i2c_set_clientdata(i2c, rt5659); if (pdata) @@ -4157,24 +4156,17 @@ static int rt5659_i2c_probe(struct i2c_client *i2c, INIT_DELAYED_WORK(&rt5659->jack_detect_work, rt5659_jack_detect_work); - if (rt5659->i2c->irq) { - ret = request_threaded_irq(rt5659->i2c->irq, NULL, rt5659_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING + if (i2c->irq) { + ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL, + rt5659_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "rt5659", rt5659); if (ret) dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret); } - ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659, + return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659, rt5659_dai, ARRAY_SIZE(rt5659_dai)); - - if (ret) { - if (rt5659->i2c->irq) - free_irq(rt5659->i2c->irq, rt5659); - } - - return 0; } static int rt5659_i2c_remove(struct i2c_client *i2c) @@ -4191,24 +4183,29 @@ void rt5659_i2c_shutdown(struct i2c_client *client) regmap_write(rt5659->regmap, RT5659_RESET, 0); } +#ifdef CONFIG_OF static const struct of_device_id rt5659_of_match[] = { { .compatible = "realtek,rt5658", }, { .compatible = "realtek,rt5659", }, - {}, + { }, }; +MODULE_DEVICE_TABLE(of, rt5659_of_match); +#endif +#ifdef CONFIG_ACPI static struct acpi_device_id rt5659_acpi_match[] = { - { "10EC5658", 0}, - { "10EC5659", 0}, - { }, + { "10EC5658", 0, }, + { "10EC5659", 0, }, + { }, }; MODULE_DEVICE_TABLE(acpi, rt5659_acpi_match); +#endif struct i2c_driver rt5659_i2c_driver = { .driver = { .name = "rt5659", .owner = THIS_MODULE, - .of_match_table = rt5659_of_match, + .of_match_table = of_match_ptr(rt5659_of_match), .acpi_match_table = ACPI_PTR(rt5659_acpi_match), }, .probe = rt5659_i2c_probe, diff --git a/sound/soc/codecs/rt5659.h b/sound/soc/codecs/rt5659.h index 8f07ee903eaa..d31c9e5bcec8 100644 --- a/sound/soc/codecs/rt5659.h +++ b/sound/soc/codecs/rt5659.h @@ -1792,7 +1792,6 @@ struct rt5659_priv { struct snd_soc_codec *codec; struct rt5659_platform_data pdata; struct regmap *regmap; - struct i2c_client *i2c; struct gpio_desc *gpiod_ldo1_en; struct gpio_desc *gpiod_reset; struct snd_soc_jack *hs_jack; diff --git a/sound/soc/codecs/sigmadsp-i2c.c b/sound/soc/codecs/sigmadsp-i2c.c index 21ca3a5e9f66..d374c18d4db7 100644 --- a/sound/soc/codecs/sigmadsp-i2c.c +++ b/sound/soc/codecs/sigmadsp-i2c.c @@ -31,7 +31,10 @@ static int sigmadsp_write_i2c(void *control_data, kfree(buf); - return ret; + if (ret < 0) + return ret; + + return 0; } static int sigmadsp_read_i2c(void *control_data, diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c index 6088d30962a9..97c0f1e23886 100644 --- a/sound/soc/codecs/wm5110.c +++ b/sound/soc/codecs/wm5110.c @@ -2382,6 +2382,7 @@ error: static int wm5110_remove(struct platform_device *pdev) { + snd_soc_unregister_platform(&pdev->dev); snd_soc_unregister_codec(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index ff237726775a..d7f444f87460 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -240,13 +240,13 @@ SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL, SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL, 7, 1, 1), -SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", +SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume", WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv), -SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume", +SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume", WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv), -SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume", +SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume", WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv), -SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume", +SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume", WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv), SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume", WM8960_RINPATH, 4, 3, 0, micboost_tlv), @@ -643,29 +643,31 @@ static int wm8960_configure_clocking(struct snd_soc_codec *codec) return -EINVAL; } - /* check if the sysclk frequency is available. */ - for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) { - if (sysclk_divs[i] == -1) - continue; - sysclk = freq_out / sysclk_divs[i]; - for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) { - if (sysclk == dac_divs[j] * lrclk) { + if (wm8960->clk_id != WM8960_SYSCLK_PLL) { + /* check if the sysclk frequency is available. */ + for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) { + if (sysclk_divs[i] == -1) + continue; + sysclk = freq_out / sysclk_divs[i]; + for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) { + if (sysclk != dac_divs[j] * lrclk) + continue; for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k) if (sysclk == bclk * bclk_divs[k] / 10) break; if (k != ARRAY_SIZE(bclk_divs)) break; } + if (j != ARRAY_SIZE(dac_divs)) + break; } - if (j != ARRAY_SIZE(dac_divs)) - break; - } - if (i != ARRAY_SIZE(sysclk_divs)) { - goto configure_clock; - } else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) { - dev_err(codec->dev, "failed to configure clock\n"); - return -EINVAL; + if (i != ARRAY_SIZE(sysclk_divs)) { + goto configure_clock; + } else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) { + dev_err(codec->dev, "failed to configure clock\n"); + return -EINVAL; + } } /* get a available pll out frequency and set pll */ for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) { diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c index ce664c239be3..bff258d7bcea 100644 --- a/sound/soc/dwc/designware_i2s.c +++ b/sound/soc/dwc/designware_i2s.c @@ -645,6 +645,8 @@ static int dw_i2s_probe(struct platform_device *pdev) dev->dev = &pdev->dev; + dev->i2s_reg_comp1 = I2S_COMP_PARAM_1; + dev->i2s_reg_comp2 = I2S_COMP_PARAM_2; if (pdata) { dev->capability = pdata->cap; clk_id = NULL; @@ -652,9 +654,6 @@ static int dw_i2s_probe(struct platform_device *pdev) if (dev->quirks & DW_I2S_QUIRK_COMP_REG_OFFSET) { dev->i2s_reg_comp1 = pdata->i2s_reg_comp1; dev->i2s_reg_comp2 = pdata->i2s_reg_comp2; - } else { - dev->i2s_reg_comp1 = I2S_COMP_PARAM_1; - dev->i2s_reg_comp2 = I2S_COMP_PARAM_2; } ret = dw_configure_dai_by_pd(dev, dw_i2s_dai, res, pdata); } else { diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c index 40dfd8a36484..ed8de1035cda 100644 --- a/sound/soc/fsl/fsl_ssi.c +++ b/sound/soc/fsl/fsl_ssi.c @@ -112,20 +112,6 @@ struct fsl_ssi_rxtx_reg_val { struct fsl_ssi_reg_val tx; }; -static const struct reg_default fsl_ssi_reg_defaults[] = { - {CCSR_SSI_SCR, 0x00000000}, - {CCSR_SSI_SIER, 0x00003003}, - {CCSR_SSI_STCR, 0x00000200}, - {CCSR_SSI_SRCR, 0x00000200}, - {CCSR_SSI_STCCR, 0x00040000}, - {CCSR_SSI_SRCCR, 0x00040000}, - {CCSR_SSI_SACNT, 0x00000000}, - {CCSR_SSI_STMSK, 0x00000000}, - {CCSR_SSI_SRMSK, 0x00000000}, - {CCSR_SSI_SACCEN, 0x00000000}, - {CCSR_SSI_SACCDIS, 0x00000000}, -}; - static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg) { switch (reg) { @@ -190,8 +176,7 @@ static const struct regmap_config fsl_ssi_regconfig = { .val_bits = 32, .reg_stride = 4, .val_format_endian = REGMAP_ENDIAN_NATIVE, - .reg_defaults = fsl_ssi_reg_defaults, - .num_reg_defaults = ARRAY_SIZE(fsl_ssi_reg_defaults), + .num_reg_defaults_raw = CCSR_SSI_SACCDIS / sizeof(uint32_t) + 1, .readable_reg = fsl_ssi_readable_reg, .volatile_reg = fsl_ssi_volatile_reg, .precious_reg = fsl_ssi_precious_reg, @@ -201,6 +186,7 @@ static const struct regmap_config fsl_ssi_regconfig = { struct fsl_ssi_soc_data { bool imx; + bool imx21regs; /* imx21-class SSI - no SACC{ST,EN,DIS} regs */ bool offline_config; u32 sisr_write_mask; }; @@ -303,6 +289,7 @@ static struct fsl_ssi_soc_data fsl_ssi_mpc8610 = { static struct fsl_ssi_soc_data fsl_ssi_imx21 = { .imx = true, + .imx21regs = true, .offline_config = true, .sisr_write_mask = 0, }; @@ -586,8 +573,12 @@ static void fsl_ssi_setup_ac97(struct fsl_ssi_private *ssi_private) */ regmap_write(regs, CCSR_SSI_SACNT, CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV); - regmap_write(regs, CCSR_SSI_SACCDIS, 0xff); - regmap_write(regs, CCSR_SSI_SACCEN, 0x300); + + /* no SACC{ST,EN,DIS} regs on imx21-class SSI */ + if (!ssi_private->soc->imx21regs) { + regmap_write(regs, CCSR_SSI_SACCDIS, 0xff); + regmap_write(regs, CCSR_SSI_SACCEN, 0x300); + } /* * Enable SSI, Transmit and Receive. AC97 has to communicate with the @@ -1397,6 +1388,7 @@ static int fsl_ssi_probe(struct platform_device *pdev) struct resource *res; void __iomem *iomem; char name[64]; + struct regmap_config regconfig = fsl_ssi_regconfig; of_id = of_match_device(fsl_ssi_ids, &pdev->dev); if (!of_id || !of_id->data) @@ -1444,15 +1436,25 @@ static int fsl_ssi_probe(struct platform_device *pdev) return PTR_ERR(iomem); ssi_private->ssi_phys = res->start; + if (ssi_private->soc->imx21regs) { + /* + * According to datasheet imx21-class SSI + * don't have SACC{ST,EN,DIS} regs. + */ + regconfig.max_register = CCSR_SSI_SRMSK; + regconfig.num_reg_defaults_raw = + CCSR_SSI_SRMSK / sizeof(uint32_t) + 1; + } + ret = of_property_match_string(np, "clock-names", "ipg"); if (ret < 0) { ssi_private->has_ipg_clk_name = false; ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem, - &fsl_ssi_regconfig); + ®config); } else { ssi_private->has_ipg_clk_name = true; ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev, - "ipg", iomem, &fsl_ssi_regconfig); + "ipg", iomem, ®config); } if (IS_ERR(ssi_private->regs)) { dev_err(&pdev->dev, "Failed to init register map\n"); diff --git a/sound/soc/fsl/imx-spdif.c b/sound/soc/fsl/imx-spdif.c index a407e833c612..fb896b2c9ba3 100644 --- a/sound/soc/fsl/imx-spdif.c +++ b/sound/soc/fsl/imx-spdif.c @@ -72,8 +72,6 @@ static int imx_spdif_audio_probe(struct platform_device *pdev) goto end; } - platform_set_drvdata(pdev, data); - end: of_node_put(spdif_np); diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 1ded8811598e..2389ab47e25f 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -99,7 +99,7 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream, if (ret && ret != -ENOTSUPP) goto err; } - + return 0; err: return ret; } diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index 803f95e40679..7d7c872c280d 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig @@ -30,11 +30,15 @@ config SND_SST_IPC_ACPI config SND_SOC_INTEL_SST tristate select SND_SOC_INTEL_SST_ACPI if ACPI + select SND_SOC_INTEL_SST_MATCH if ACPI depends on (X86 || COMPILE_TEST) config SND_SOC_INTEL_SST_ACPI tristate +config SND_SOC_INTEL_SST_MATCH + tristate + config SND_SOC_INTEL_HASWELL tristate @@ -57,7 +61,7 @@ config SND_SOC_INTEL_HASWELL_MACH config SND_SOC_INTEL_BYT_RT5640_MACH tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec" depends on X86_INTEL_LPSS && I2C - depends on DW_DMAC_CORE=y && (SND_SOC_INTEL_BYTCR_RT5640_MACH = n) + depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n) select SND_SOC_INTEL_SST select SND_SOC_INTEL_BAYTRAIL select SND_SOC_RT5640 @@ -69,7 +73,7 @@ config SND_SOC_INTEL_BYT_RT5640_MACH config SND_SOC_INTEL_BYT_MAX98090_MACH tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec" depends on X86_INTEL_LPSS && I2C - depends on DW_DMAC_CORE=y + depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n) select SND_SOC_INTEL_SST select SND_SOC_INTEL_BAYTRAIL select SND_SOC_MAX98090 @@ -97,6 +101,7 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH select SND_SOC_RT5640 select SND_SST_MFLD_PLATFORM select SND_SST_IPC_ACPI + select SND_SOC_INTEL_SST_MATCH if ACPI help This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR platforms with RT5640 audio codec. @@ -109,6 +114,7 @@ config SND_SOC_INTEL_BYTCR_RT5651_MACH select SND_SOC_RT5651 select SND_SST_MFLD_PLATFORM select SND_SST_IPC_ACPI + select SND_SOC_INTEL_SST_MATCH if ACPI help This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR platforms with RT5651 audio codec. @@ -121,6 +127,7 @@ config SND_SOC_INTEL_CHT_BSW_RT5672_MACH select SND_SOC_RT5670 select SND_SST_MFLD_PLATFORM select SND_SST_IPC_ACPI + select SND_SOC_INTEL_SST_MATCH if ACPI help This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell platforms with RT5672 audio codec. @@ -133,6 +140,7 @@ config SND_SOC_INTEL_CHT_BSW_RT5645_MACH select SND_SOC_RT5645 select SND_SST_MFLD_PLATFORM select SND_SST_IPC_ACPI + select SND_SOC_INTEL_SST_MATCH if ACPI help This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell platforms with RT5645/5650 audio codec. @@ -145,6 +153,7 @@ config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH select SND_SOC_TS3A227E select SND_SST_MFLD_PLATFORM select SND_SST_IPC_ACPI + select SND_SOC_INTEL_SST_MATCH if ACPI help This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell platforms with MAX98090 audio codec it also can support TI jack chip as aux device. diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c index 55c33dc76ce4..52ed434cbca6 100644 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c @@ -528,6 +528,7 @@ static struct snd_soc_dai_driver sst_platform_dai[] = { .ops = &sst_compr_dai_ops, .playback = { .stream_name = "Compress Playback", + .channels_min = 1, }, }, /* BE CPU Dais */ diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c index 7396ddb427d8..2cbcbe412661 100644 --- a/sound/soc/intel/boards/skl_rt286.c +++ b/sound/soc/intel/boards/skl_rt286.c @@ -212,7 +212,10 @@ static int skylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd, { struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); - channels->min = channels->max = 4; + if (params_channels(params) == 2) + channels->min = channels->max = 2; + else + channels->min = channels->max = 4; return 0; } diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile index 668fdeee195e..fbbb25c2ceed 100644 --- a/sound/soc/intel/common/Makefile +++ b/sound/soc/intel/common/Makefile @@ -1,13 +1,10 @@ snd-soc-sst-dsp-objs := sst-dsp.o -ifneq ($(CONFIG_SND_SST_IPC_ACPI),) -snd-soc-sst-acpi-objs := sst-match-acpi.o -else -snd-soc-sst-acpi-objs := sst-acpi.o sst-match-acpi.o -endif - +snd-soc-sst-acpi-objs := sst-acpi.o +snd-soc-sst-match-objs := sst-match-acpi.o snd-soc-sst-ipc-objs := sst-ipc.o snd-soc-sst-dsp-$(CONFIG_DW_DMAC_CORE) += sst-firmware.o obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o +obj-$(CONFIG_SND_SOC_INTEL_SST_MATCH) += snd-soc-sst-match.o diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c index 7a85c576dad3..2c5eda14d510 100644 --- a/sound/soc/intel/common/sst-acpi.c +++ b/sound/soc/intel/common/sst-acpi.c @@ -215,6 +215,7 @@ static struct sst_acpi_desc sst_acpi_broadwell_desc = { .dma_size = SST_LPT_DSP_DMA_SIZE, }; +#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI) static struct sst_acpi_mach baytrail_machines[] = { { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL }, { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL }, @@ -231,11 +232,14 @@ static struct sst_acpi_desc sst_acpi_baytrail_desc = { .sst_id = SST_DEV_ID_BYT, .resindex_dma_base = -1, }; +#endif static const struct acpi_device_id sst_acpi_match[] = { { "INT33C8", (unsigned long)&sst_acpi_haswell_desc }, { "INT3438", (unsigned long)&sst_acpi_broadwell_desc }, +#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI) { "80860F28", (unsigned long)&sst_acpi_baytrail_desc }, +#endif { } }; MODULE_DEVICE_TABLE(acpi, sst_acpi_match); diff --git a/sound/soc/intel/common/sst-match-acpi.c b/sound/soc/intel/common/sst-match-acpi.c index dd077e116d25..3b4539d21492 100644 --- a/sound/soc/intel/common/sst-match-acpi.c +++ b/sound/soc/intel/common/sst-match-acpi.c @@ -41,3 +41,6 @@ struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines) return NULL; } EXPORT_SYMBOL_GPL(sst_acpi_find_machine); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel Common ACPI Match module"); diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c index de6dac496a0d..4629372d7c8e 100644 --- a/sound/soc/intel/skylake/skl-messages.c +++ b/sound/soc/intel/skylake/skl-messages.c @@ -688,14 +688,14 @@ int skl_unbind_modules(struct skl_sst *ctx, /* get src queue index */ src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max); if (src_index < 0) - return -EINVAL; + return 0; msg.src_queue = src_index; /* get dst queue index */ dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max); if (dst_index < 0) - return -EINVAL; + return 0; msg.dst_queue = dst_index; @@ -747,7 +747,7 @@ int skl_bind_modules(struct skl_sst *ctx, skl_dump_bind_info(ctx, src_mcfg, dst_mcfg); - if (src_mcfg->m_state < SKL_MODULE_INIT_DONE && + if (src_mcfg->m_state < SKL_MODULE_INIT_DONE || dst_mcfg->m_state < SKL_MODULE_INIT_DONE) return 0; diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c index f3553258091a..b6e6b61d10ec 100644 --- a/sound/soc/intel/skylake/skl-pcm.c +++ b/sound/soc/intel/skylake/skl-pcm.c @@ -863,6 +863,7 @@ static int skl_get_delay_from_lpib(struct hdac_ext_bus *ebus, else delay += hstream->bufsize; } + delay = (hstream->bufsize == delay) ? 0 : delay; if (delay >= hstream->period_bytes) { dev_info(bus->dev, diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c index 4624556f486d..a294fee431f0 100644 --- a/sound/soc/intel/skylake/skl-topology.c +++ b/sound/soc/intel/skylake/skl-topology.c @@ -54,12 +54,9 @@ static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w) /* * Each pipelines needs memory to be allocated. Check if we have free memory - * from available pool. Then only add this to pool - * This is freed when pipe is deleted - * Note: DSP does actual memory management we only keep track for complete - * pool + * from available pool. */ -static bool skl_tplg_alloc_pipe_mem(struct skl *skl, +static bool skl_is_pipe_mem_avail(struct skl *skl, struct skl_module_cfg *mconfig) { struct skl_sst *ctx = skl->skl_sst; @@ -74,10 +71,20 @@ static bool skl_tplg_alloc_pipe_mem(struct skl *skl, "exceeds ppl memory available %d mem %d\n", skl->resource.max_mem, skl->resource.mem); return false; + } else { + return true; } +} +/* + * Add the mem to the mem pool. This is freed when pipe is deleted. + * Note: DSP does actual memory management we only keep track for complete + * pool + */ +static void skl_tplg_alloc_pipe_mem(struct skl *skl, + struct skl_module_cfg *mconfig) +{ skl->resource.mem += mconfig->pipe->memory_pages; - return true; } /* @@ -85,10 +92,10 @@ static bool skl_tplg_alloc_pipe_mem(struct skl *skl, * quantified in MCPS (Million Clocks Per Second) required for module/pipe * * Each pipelines needs mcps to be allocated. Check if we have mcps for this - * pipe. This adds the mcps to driver counter - * This is removed on pipeline delete + * pipe. */ -static bool skl_tplg_alloc_pipe_mcps(struct skl *skl, + +static bool skl_is_pipe_mcps_avail(struct skl *skl, struct skl_module_cfg *mconfig) { struct skl_sst *ctx = skl->skl_sst; @@ -98,13 +105,18 @@ static bool skl_tplg_alloc_pipe_mcps(struct skl *skl, "%s: module_id %d instance %d\n", __func__, mconfig->id.module_id, mconfig->id.instance_id); dev_err(ctx->dev, - "exceeds ppl memory available %d > mem %d\n", + "exceeds ppl mcps available %d > mem %d\n", skl->resource.max_mcps, skl->resource.mcps); return false; + } else { + return true; } +} +static void skl_tplg_alloc_pipe_mcps(struct skl *skl, + struct skl_module_cfg *mconfig) +{ skl->resource.mcps += mconfig->mcps; - return true; } /* @@ -411,7 +423,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) mconfig = w->priv; /* check resource available */ - if (!skl_tplg_alloc_pipe_mcps(skl, mconfig)) + if (!skl_is_pipe_mcps_avail(skl, mconfig)) return -ENOMEM; if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) { @@ -435,6 +447,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe) ret = skl_tplg_set_module_params(w, ctx); if (ret < 0) return ret; + skl_tplg_alloc_pipe_mcps(skl, mconfig); } return 0; @@ -477,10 +490,10 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, struct skl_sst *ctx = skl->skl_sst; /* check resource available */ - if (!skl_tplg_alloc_pipe_mcps(skl, mconfig)) + if (!skl_is_pipe_mcps_avail(skl, mconfig)) return -EBUSY; - if (!skl_tplg_alloc_pipe_mem(skl, mconfig)) + if (!skl_is_pipe_mem_avail(skl, mconfig)) return -ENOMEM; /* @@ -526,11 +539,15 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, src_module = dst_module; } + skl_tplg_alloc_pipe_mem(skl, mconfig); + skl_tplg_alloc_pipe_mcps(skl, mconfig); + return 0; } static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, struct skl *skl, + struct snd_soc_dapm_widget *src_w, struct skl_module_cfg *src_mconfig) { struct snd_soc_dapm_path *p; @@ -547,6 +564,10 @@ static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name); next_sink = p->sink; + + if (!is_skl_dsp_widget_type(p->sink)) + return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig); + /* * here we will check widgets in sink pipelines, so that * can be any widgets type and we are only interested if @@ -576,7 +597,7 @@ static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w, } if (!sink) - return skl_tplg_bind_sinks(next_sink, skl, src_mconfig); + return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig); return 0; } @@ -605,7 +626,7 @@ static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w, * if sink is not started, start sink pipe first, then start * this pipe */ - ret = skl_tplg_bind_sinks(w, skl, src_mconfig); + ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig); if (ret) return ret; @@ -773,10 +794,7 @@ static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, continue; } - ret = skl_unbind_modules(ctx, src_module, dst_module); - if (ret < 0) - return ret; - + skl_unbind_modules(ctx, src_module, dst_module); src_module = dst_module; } @@ -814,9 +832,6 @@ static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w, * This is a connecter and if path is found that means * unbind between source and sink has not happened yet */ - ret = skl_stop_pipe(ctx, sink_mconfig->pipe); - if (ret < 0) - return ret; ret = skl_unbind_modules(ctx, src_mconfig, sink_mconfig); } @@ -842,6 +857,12 @@ static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w, case SND_SOC_DAPM_PRE_PMU: return skl_tplg_mixer_dapm_pre_pmu_event(w, skl); + case SND_SOC_DAPM_POST_PMU: + return skl_tplg_mixer_dapm_post_pmu_event(w, skl); + + case SND_SOC_DAPM_PRE_PMD: + return skl_tplg_mixer_dapm_pre_pmd_event(w, skl); + case SND_SOC_DAPM_POST_PMD: return skl_tplg_mixer_dapm_post_pmd_event(w, skl); } @@ -916,6 +937,13 @@ static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol, skl_get_module_params(skl->skl_sst, (u32 *)bc->params, bc->max, bc->param_id, mconfig); + /* decrement size for TLV header */ + size -= 2 * sizeof(u32); + + /* check size as we don't want to send kernel data */ + if (size > bc->max) + size = bc->max; + if (bc->params) { if (copy_to_user(data, &bc->param_id, sizeof(u32))) return -EFAULT; @@ -1510,6 +1538,7 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus) &skl_tplg_ops, fw, 0); if (ret < 0) { dev_err(bus->dev, "tplg component load failed%d\n", ret); + release_firmware(fw); return -EINVAL; } diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c index 443a15de94b5..092705e73db4 100644 --- a/sound/soc/intel/skylake/skl.c +++ b/sound/soc/intel/skylake/skl.c @@ -614,8 +614,6 @@ static int skl_probe(struct pci_dev *pci, goto out_unregister; /*configure PM */ - pm_runtime_set_autosuspend_delay(bus->dev, SKL_SUSPEND_DELAY); - pm_runtime_use_autosuspend(bus->dev); pm_runtime_put_noidle(bus->dev); pm_runtime_allow(bus->dev); diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig index 15c04e2eae34..976967675387 100644 --- a/sound/soc/mediatek/Kconfig +++ b/sound/soc/mediatek/Kconfig @@ -9,7 +9,7 @@ config SND_SOC_MEDIATEK config SND_SOC_MT8173_MAX98090 tristate "ASoC Audio driver for MT8173 with MAX98090 codec" - depends on SND_SOC_MEDIATEK + depends on SND_SOC_MEDIATEK && I2C select SND_SOC_MAX98090 help This adds ASoC driver for Mediatek MT8173 boards @@ -19,7 +19,7 @@ config SND_SOC_MT8173_MAX98090 config SND_SOC_MT8173_RT5650_RT5676 tristate "ASoC Audio driver for MT8173 with RT5650 RT5676 codecs" - depends on SND_SOC_MEDIATEK + depends on SND_SOC_MEDIATEK && I2C select SND_SOC_RT5645 select SND_SOC_RT5677 help diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c index c866ade28ad0..a6c7b8d87cd2 100644 --- a/sound/soc/mxs/mxs-saif.c +++ b/sound/soc/mxs/mxs-saif.c @@ -381,9 +381,19 @@ static int mxs_saif_startup(struct snd_pcm_substream *substream, __raw_writel(BM_SAIF_CTRL_CLKGATE, saif->base + SAIF_CTRL + MXS_CLR_ADDR); + clk_prepare(saif->clk); + return 0; } +static void mxs_saif_shutdown(struct snd_pcm_substream *substream, + struct snd_soc_dai *cpu_dai) +{ + struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai); + + clk_unprepare(saif->clk); +} + /* * Should only be called when port is inactive. * although can be called multiple times by upper layers. @@ -424,8 +434,6 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream, return ret; } - /* prepare clk in hw_param, enable in trigger */ - clk_prepare(saif->clk); if (saif != master_saif) { /* * Set an initial clock rate for the saif internal logic to work @@ -611,6 +619,7 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd, static const struct snd_soc_dai_ops mxs_saif_dai_ops = { .startup = mxs_saif_startup, + .shutdown = mxs_saif_shutdown, .trigger = mxs_saif_trigger, .prepare = mxs_saif_prepare, .hw_params = mxs_saif_hw_params, diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c index 79688aa1941a..4aeb8e1a7160 100644 --- a/sound/soc/qcom/lpass-platform.c +++ b/sound/soc/qcom/lpass-platform.c @@ -440,18 +440,18 @@ static irqreturn_t lpass_platform_lpaif_irq(int irq, void *data) } static int lpass_platform_alloc_buffer(struct snd_pcm_substream *substream, - struct snd_soc_pcm_runtime *soc_runtime) + struct snd_soc_pcm_runtime *rt) { struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = lpass_platform_pcm_hardware.buffer_bytes_max; buf->dev.type = SNDRV_DMA_TYPE_DEV; - buf->dev.dev = soc_runtime->dev; + buf->dev.dev = rt->platform->dev; buf->private_data = NULL; - buf->area = dma_alloc_coherent(soc_runtime->dev, size, &buf->addr, + buf->area = dma_alloc_coherent(rt->platform->dev, size, &buf->addr, GFP_KERNEL); if (!buf->area) { - dev_err(soc_runtime->dev, "%s: Could not allocate DMA buffer\n", + dev_err(rt->platform->dev, "%s: Could not allocate DMA buffer\n", __func__); return -ENOMEM; } @@ -461,12 +461,12 @@ static int lpass_platform_alloc_buffer(struct snd_pcm_substream *substream, } static void lpass_platform_free_buffer(struct snd_pcm_substream *substream, - struct snd_soc_pcm_runtime *soc_runtime) + struct snd_soc_pcm_runtime *rt) { struct snd_dma_buffer *buf = &substream->dma_buffer; if (buf->area) { - dma_free_coherent(soc_runtime->dev, buf->bytes, buf->area, + dma_free_coherent(rt->dev, buf->bytes, buf->area, buf->addr); } buf->area = NULL; @@ -499,9 +499,6 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime) snd_soc_pcm_set_drvdata(soc_runtime, data); - soc_runtime->dev->coherent_dma_mask = DMA_BIT_MASK(32); - soc_runtime->dev->dma_mask = &soc_runtime->dev->coherent_dma_mask; - ret = lpass_platform_alloc_buffer(substream, soc_runtime); if (ret) return ret; diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 5a2812fa8946..0d3707987900 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -310,7 +310,7 @@ struct dapm_kcontrol_data { }; static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, - struct snd_kcontrol *kcontrol) + struct snd_kcontrol *kcontrol, const char *ctrl_name) { struct dapm_kcontrol_data *data; struct soc_mixer_control *mc; @@ -333,7 +333,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, if (mc->autodisable) { struct snd_soc_dapm_widget template; - name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name, + name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name, "Autodisable"); if (!name) { ret = -ENOMEM; @@ -371,7 +371,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, if (e->autodisable) { struct snd_soc_dapm_widget template; - name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name, + name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name, "Autodisable"); if (!name) { ret = -ENOMEM; @@ -871,7 +871,7 @@ static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w, kcontrol->private_free = dapm_kcontrol_free; - ret = dapm_kcontrol_data_alloc(w, kcontrol); + ret = dapm_kcontrol_data_alloc(w, kcontrol, name); if (ret) { snd_ctl_free_one(kcontrol); goto exit_free; diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index e898b427be7e..1af4f23697a7 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1810,7 +1810,8 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream) (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) && (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) && - (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP)) + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) && + (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND)) continue; dev_dbg(be->dev, "ASoC: hw_free BE %s\n", diff --git a/sound/usb/midi.c b/sound/usb/midi.c index cc39f63299ef..007cf5831121 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -2455,7 +2455,6 @@ int snd_usbmidi_create(struct snd_card *card, else err = snd_usbmidi_create_endpoints(umidi, endpoints); if (err < 0) { - snd_usbmidi_free(umidi); return err; } diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index a75d9ce7d77a..4f6ce1cac8e2 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1121,6 +1121,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) switch (chip->usb_id) { case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */ case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */ + case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */ case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */ case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */ case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ @@ -1281,7 +1282,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */ case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */ case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */ - case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/ + case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */ if (fp->altsetting == 2) return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; @@ -1290,6 +1291,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */ case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */ + case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */ if (fp->altsetting == 3) return SNDRV_PCM_FMTBIT_DSD_U32_BE; break; diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c index 7ec7df9e7fc7..0c1a7e65bb81 100644 --- a/tools/testing/nvdimm/test/iomap.c +++ b/tools/testing/nvdimm/test/iomap.c @@ -113,7 +113,7 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res, } EXPORT_SYMBOL(__wrap_devm_memremap_pages); -pfn_t __wrap_phys_to_pfn_t(dma_addr_t addr, unsigned long flags) +pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags) { struct nfit_test_resource *nfit_res = get_nfit_res(addr); |